input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
"bigquery_options")
@bigquery_options.setter
def bigquery_options(self, value: Optional[pulumi.Input['OrganizationSinkBigqueryOptionsArgs']]):
pulumi.set(self, "bigquery_options", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of this exclusion.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input[str]]:
"""
The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, a BigQuery dataset or a Cloud Logging bucket. Examples:
```python
import pulumi
```
The writer associated with the sink must have access to write to the above resource.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
If set to True, then this exclusion is disabled and it does not exclude any log entries.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def exclusions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OrganizationSinkExclusionArgs']]]]:
"""
Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is documented below.
"""
return pulumi.get(self, "exclusions")
@exclusions.setter
def exclusions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OrganizationSinkExclusionArgs']]]]):
pulumi.set(self, "exclusions", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input[str]]:
"""
An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to
write a filter.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter(name="includeChildren")
def include_children(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to include children organizations in the sink export. If true, logs
associated with child projects are also exported; otherwise only logs relating to the provided organization are included.
"""
return pulumi.get(self, "include_children")
@include_children.setter
def include_children(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_children", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="orgId")
def org_id(self) -> Optional[pulumi.Input[str]]:
"""
The numeric ID of the organization to be exported to the sink.
"""
return pulumi.get(self, "org_id")
@org_id.setter
def org_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "org_id", value)
@property
@pulumi.getter(name="writerIdentity")
def writer_identity(self) -> Optional[pulumi.Input[str]]:
"""
The identity associated with this sink. This identity must be granted write access to the
configured `destination`.
"""
return pulumi.get(self, "writer_identity")
@writer_identity.setter
def writer_identity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "writer_identity", value)
class OrganizationSink(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bigquery_options: Optional[pulumi.Input[pulumi.InputType['OrganizationSinkBigqueryOptionsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OrganizationSinkExclusionArgs']]]]] = None,
filter: Optional[pulumi.Input[str]] = None,
include_children: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a organization-level logging sink. For more information see:
* [API documentation](https://cloud.google.com/logging/docs/reference/v2/rest/v2/organizations.sinks)
* How-to Guides
* [Exporting Logs](https://cloud.google.com/logging/docs/export)
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
log_bucket = gcp.storage.Bucket("log-bucket")
my_sink = gcp.logging.OrganizationSink("my-sink",
description="some explanation on what this is",
org_id="123456789",
destination=log_bucket.name.apply(lambda name: f"storage.googleapis.com/{name}"),
filter="resource.type = gce_instance AND severity >= WARNING")
log_writer = gcp.projects.IAMMember("log-writer",
role="roles/storage.objectCreator",
member=my_sink.writer_identity)
```
## Import
Organization-level logging sinks can be imported using this format
```sh
$ pulumi import gcp:logging/organizationSink:OrganizationSink my_sink organizations/{{organization_id}}/sinks/{{sink_id}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['OrganizationSinkBigqueryOptionsArgs']] bigquery_options: Options that affect sinks exporting data to BigQuery. Structure documented below.
:param pulumi.Input[str] description: A description of this exclusion.
:param pulumi.Input[str] destination: The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, a BigQuery dataset or a Cloud Logging bucket. Examples:
```python
import pulumi
```
The writer associated with the sink must have access to write to the above resource.
:param pulumi.Input[bool] disabled: If set to True, then this exclusion is disabled and it does not exclude any log entries.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OrganizationSinkExclusionArgs']]]] exclusions: Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is documented below.
:param pulumi.Input[str] filter: An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to
write a filter.
:param pulumi.Input[bool] include_children: Whether or not to include children organizations in the sink export. If true, logs
associated with child projects are also exported; otherwise only logs relating to the provided organization are included.
:param pulumi.Input[str] name: A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.
:param pulumi.Input[str] org_id: The numeric ID of the organization to be exported to the sink.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OrganizationSinkArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a organization-level logging sink. For more information see:
* [API documentation](https://cloud.google.com/logging/docs/reference/v2/rest/v2/organizations.sinks)
* How-to Guides
* [Exporting Logs](https://cloud.google.com/logging/docs/export)
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
log_bucket = gcp.storage.Bucket("log-bucket")
my_sink = gcp.logging.OrganizationSink("my-sink",
description="some explanation on what this is",
org_id="123456789",
destination=log_bucket.name.apply(lambda name: f"storage.googleapis.com/{name}"),
filter="resource.type = gce_instance AND severity >= WARNING")
log_writer = gcp.projects.IAMMember("log-writer",
role="roles/storage.objectCreator",
member=my_sink.writer_identity)
```
## Import
Organization-level logging sinks can be imported using this format
```sh
$ pulumi import gcp:logging/organizationSink:OrganizationSink my_sink organizations/{{organization_id}}/sinks/{{sink_id}}
```
:param str resource_name: The name of the resource.
:param OrganizationSinkArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OrganizationSinkArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bigquery_options: Optional[pulumi.Input[pulumi.InputType['OrganizationSinkBigqueryOptionsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OrganizationSinkExclusionArgs']]]]] = None,
filter: Optional[pulumi.Input[str]] = None,
include_children: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OrganizationSinkArgs.__new__(OrganizationSinkArgs)
__props__.__dict__["bigquery_options"] = bigquery_options
__props__.__dict__["description"] = description
if destination is None and not opts.urn:
raise TypeError("Missing required property 'destination'")
__props__.__dict__["destination"] = destination
__props__.__dict__["disabled"] = disabled
__props__.__dict__["exclusions"] = exclusions
__props__.__dict__["filter"] = filter
__props__.__dict__["include_children"] = include_children
__props__.__dict__["name"] = name
if org_id is None and not opts.urn:
raise TypeError("Missing required property 'org_id'")
__props__.__dict__["org_id"] = org_id
__props__.__dict__["writer_identity"] = None
super(OrganizationSink, __self__).__init__(
'gcp:logging/organizationSink:OrganizationSink',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
bigquery_options: Optional[pulumi.Input[pulumi.InputType['OrganizationSinkBigqueryOptionsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OrganizationSinkExclusionArgs']]]]] = None,
filter: Optional[pulumi.Input[str]] = None,
include_children: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
writer_identity: Optional[pulumi.Input[str]] = None) -> 'OrganizationSink':
"""
Get an existing OrganizationSink resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['OrganizationSinkBigqueryOptionsArgs']] bigquery_options: Options that affect sinks exporting data to BigQuery. Structure documented below.
:param pulumi.Input[str] description: A description of this exclusion.
:param pulumi.Input[str] destination: The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage | |
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.DONE
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn)
def transition_retry(self, pipeline_key, retry_message):
"""Marks the given pipeline as requiring another retry.
Does nothing if all attempts have been exceeded.
Args:
pipeline_key: db.Key of the _PipelineRecord that needs to be retried.
retry_message: User-supplied message indicating the reason for the retry.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to retry pipeline ID "%s" but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to retry pipeline ID "%s", found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
params = pipeline_record.params
offset_seconds = (
params['backoff_seconds'] *
(params['backoff_factor'] ** pipeline_record.current_attempt))
pipeline_record.next_retry_time = (
self._gettime() + datetime.timedelta(seconds=offset_seconds))
pipeline_record.current_attempt += 1
pipeline_record.retry_message = retry_message
pipeline_record.status = _PipelineRecord.WAITING
if pipeline_record.current_attempt >= pipeline_record.max_attempts:
root_pipeline_key = (
_PipelineRecord.root_pipeline.get_value_for_datastore(
pipeline_record))
logging.warning(
'Giving up on pipeline ID "%s" after %d attempt(s); causing abort '
'all the way to the root pipeline ID "%s"', pipeline_key.name(),
pipeline_record.current_attempt, root_pipeline_key.name())
# NOTE: We do *not* set the status to aborted here to ensure that
# this pipeline will be finalized before it has been marked as aborted.
pipeline_record.abort_message = (
'Aborting after %d attempts' % pipeline_record.current_attempt)
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
else:
task = taskqueue.Task(
url=self.pipeline_handler_path,
eta=pipeline_record.next_retry_time,
params=dict(pipeline_key=pipeline_key,
purpose=_BarrierRecord.START,
attempt=pipeline_record.current_attempt),
headers={'X-Ae-Pipeline-Key': pipeline_key})
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
db.run_in_transaction(txn)
def transition_aborted(self, pipeline_key):
"""Makes the given pipeline as having aborted.
Does nothing if the pipeline is in a bad state.
Args:
pipeline_key: db.Key of the _PipelineRecord that needs to be retried.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to abort pipeline ID "%s" but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to abort pipeline ID "%s", found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.ABORTED
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn)
################################################################################
class _BarrierHandler(webapp.RequestHandler):
"""Request handler for triggering barriers."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
context.notify_barriers(
self.request.get('slot_key'),
self.request.get('cursor'),
use_barrier_indexes=self.request.get('use_barrier_indexes') == 'True')
class _PipelineHandler(webapp.RequestHandler):
"""Request handler for running pipelines."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
context.evaluate(self.request.get('pipeline_key'),
purpose=self.request.get('purpose'),
attempt=int(self.request.get('attempt', '0')))
class _FanoutAbortHandler(webapp.RequestHandler):
"""Request handler for fanning out abort notifications."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
context.continue_abort(
self.request.get('root_pipeline_key'),
self.request.get('cursor'))
class _FanoutHandler(webapp.RequestHandler):
"""Request handler for fanning out pipeline children."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
# Set of stringified db.Keys of children to run.
all_pipeline_keys = set()
# For backwards compatibility with the old style of fan-out requests.
all_pipeline_keys.update(self.request.get_all('pipeline_key'))
# Fetch the child pipelines from the parent. This works around the 10KB
# task payload limit. This get() is consistent-on-read and the fan-out
# task is enqueued in the transaction that updates the parent, so the
# fanned_out property is consistent here.
parent_key = self.request.get('parent_key')
child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
if parent_key:
parent_key = db.Key(parent_key)
parent = db.get(parent_key)
for index in child_indexes:
all_pipeline_keys.add(str(parent.fanned_out[index]))
all_tasks = []
for pipeline_key in all_pipeline_keys:
all_tasks.append(taskqueue.Task(
url=context.pipeline_handler_path,
params=dict(pipeline_key=pipeline_key),
headers={'X-Ae-Pipeline-Key': pipeline_key},
name='ae-pipeline-fan-out-' + db.Key(pipeline_key).name()))
batch_size = 100 # Limit of taskqueue API bulk add.
for i in xrange(0, len(all_tasks), batch_size):
batch = all_tasks[i:i+batch_size]
try:
taskqueue.Queue(context.queue_name).add(batch)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
pass
class _CleanupHandler(webapp.RequestHandler):
"""Request handler for cleaning up a Pipeline."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
root_pipeline_key = db.Key(self.request.get('root_pipeline_key'))
logging.debug('Cleaning up root_pipeline_key=%r', root_pipeline_key)
# TODO(user): Accumulate all BlobKeys from _PipelineRecord and
# _SlotRecord entities and delete them.
pipeline_keys = (
_PipelineRecord.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(pipeline_keys)
slot_keys = (
_SlotRecord.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(slot_keys)
barrier_keys = (
_BarrierRecord.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(barrier_keys)
status_keys = (
_StatusRecord.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(status_keys)
barrier_index_keys = (
_BarrierIndex.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(barrier_index_keys)
class _CallbackHandler(webapp.RequestHandler):
"""Receives asynchronous callback requests from humans or tasks."""
def post(self):
self.get()
def get(self):
try:
self.run_callback()
except _CallbackTaskError, e:
logging.error(str(e))
if 'HTTP_X_APPENGINE_TASKRETRYCOUNT' in self.request.environ:
# Silently give up on tasks that have retried many times. This
# probably means that the target pipeline has been deleted, so there's
# no reason to keep trying this task forever.
retry_count = int(
self.request.environ.get('HTTP_X_APPENGINE_TASKRETRYCOUNT'))
if retry_count > _MAX_CALLBACK_TASK_RETRIES:
logging.error('Giving up on task after %d retries',
_MAX_CALLBACK_TASK_RETRIES)
return
# NOTE: The undescriptive error code 400 are present to address security
# risks of giving external users access to cause PipelineRecord lookups
# and execution.
self.response.set_status(400)
def run_callback(self):
"""Runs the callback for the pipeline specified in the request.
Raises:
_CallbackTaskError if something was wrong with the request parameters.
"""
pipeline_id = self.request.get('pipeline_id')
if not pipeline_id:
raise _CallbackTaskError('"pipeline_id" parameter missing.')
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id)
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
raise _CallbackTaskError(
'Pipeline ID "%s" for callback does not exist.' % pipeline_id)
params = pipeline_record.params
real_class_path = params['class_path']
try:
pipeline_func_class = mr_util.for_name(real_class_path)
except ImportError, e:
raise _CallbackTaskError(
'Cannot load class named "%s" for pipeline ID "%s".'
% (real_class_path, pipeline_id))
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
if pipeline_func_class.public_callbacks:
pass
elif pipeline_func_class.admin_callbacks:
if not users.is_current_user_admin():
raise _CallbackTaskError(
'Unauthorized callback for admin-only pipeline ID "%s"'
% pipeline_id)
else:
raise _CallbackTaskError(
'External callback for internal-only pipeline ID "%s"'
% pipeline_id)
kwargs = {}
for key in self.request.arguments():
if key != 'pipeline_id':
kwargs[str(key)] = self.request.get(key)
def perform_callback():
stage = pipeline_func_class.from_id(pipeline_id)
if stage is None:
raise _CallbackTaskError(
'Pipeline ID "%s" deleted during callback' % pipeline_id)
return stage._callback_internal(kwargs)
# callback_xg_transaction is a 3-valued setting (None=no trans,
# False=1-eg-trans, True=xg-trans)
if pipeline_func_class._callback_xg_transaction is not None:
transaction_options = db.create_transaction_options(
xg=pipeline_func_class._callback_xg_transaction)
callback_result = db.run_in_transaction_options(transaction_options,
perform_callback)
else:
callback_result = perform_callback()
if callback_result is not None:
status_code, content_type, content = callback_result
self.response.set_status(status_code)
self.response.headers['Content-Type'] = content_type
self.response.out.write(content)
################################################################################
def _get_timestamp_ms(when):
"""Converts a datetime.datetime to integer milliseconds since the epoch.
Requires special handling to preserve microseconds.
Args:
when: A datetime.datetime instance.
Returns:
Integer time since the epoch in milliseconds. If the supplied 'when' is
None, the return value will be None.
"""
if when is None:
return None
ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0)
ms_since_epoch += when.microsecond / 1000.0
return int(ms_since_epoch)
def _get_internal_status(pipeline_key=None,
pipeline_dict=None,
slot_dict=None,
barrier_dict=None,
status_dict=None):
"""Gets the UI dictionary of a pipeline from a set of status dictionaries.
Args:
pipeline_key: The key of the pipeline to lookup.
pipeline_dict: Dictionary mapping pipeline db.Key to _PipelineRecord.
Default is an empty dictionary.
slot_dict: Dictionary mapping slot db.Key to _SlotRecord.
Default is an empty dictionary.
barrier_dict: Dictionary mapping barrier db.Key to _BarrierRecord.
Default is an empty dictionary.
status_dict: Dictionary mapping status record db.Key to _StatusRecord.
Default is an empty dictionary.
Returns:
Dictionary with the keys:
classPath: The pipeline function being run.
args: List of positional argument slot dictionaries.
kwargs: Dictionary of keyword argument slot dictionaries.
outputs: Dictionary of output slot dictionaries.
children: List of child pipeline IDs.
queueName: Queue on which this pipeline is running.
afterSlotKeys: List of Slot Ids after which this pipeline runs.
currentAttempt: Number of the current attempt, starting at 1.
maxAttempts: Maximum number of attempts before aborting.
backoffSeconds: Constant factor for backoff before retrying.
backoffFactor: Exponential factor for backoff before retrying.
status: Current status of the pipeline.
startTimeMs: When this pipeline ran or will run due to retries, if present.
endTimeMs: When this pipeline finalized, if present.
lastRetryMessage: Why the pipeline failed during the last retry, if there
was a failure; may be empty.
abortMessage: For root pipelines, why the pipeline was aborted if it was
aborted; may be empty.
Dictionary will contain these keys if explicit status is set:
statusTimeMs: When the status was set as milliseconds since the epoch.
statusMessage: Status message, if present.
statusConsoleUrl: The relative URL for the console of this pipeline.
statusLinks: Dictionary mapping human-readable names to relative URLs
for related URLs to this pipeline.
Raises:
PipelineStatusError if any input is bad.
"""
if pipeline_dict is None:
pipeline_dict = {}
if slot_dict is None:
slot_dict = {}
if barrier_dict is None:
barrier_dict = {}
if status_dict is None:
status_dict = {}
pipeline_record = pipeline_dict.get(pipeline_key)
if pipeline_record is None:
raise PipelineStatusError(
'Could not find pipeline ID "%s"' % | |
<reponame>gjkennedy/OpenMDAO
"""
A console script wrapper for multiple openmdao functions.
"""
import sys
import os
import argparse
from openmdao import __version__ as version
try:
import pkg_resources
except ImportError:
pkg_resources = None
from itertools import chain
import openmdao.utils.hooks as hooks
from openmdao.visualization.n2_viewer.n2_viewer import n2
from openmdao.visualization.connection_viewer.viewconns import view_connections
try:
import bokeh
from openmdao.visualization.meta_model_viewer.meta_model_visualization import view_metamodel
except ImportError:
bokeh = None
from openmdao.components.meta_model_unstructured_comp import MetaModelUnStructuredComp
from openmdao.components.meta_model_structured_comp import MetaModelStructuredComp
from openmdao.devtools.debug import config_summary, tree
from openmdao.devtools.itrace import _itrace_exec, _itrace_setup_parser
from openmdao.devtools.iprofile_app.iprofile_app import _iprof_exec, _iprof_setup_parser
from openmdao.devtools.iprofile import _iprof_totals_exec, _iprof_totals_setup_parser
from openmdao.devtools.iprof_mem import _mem_prof_exec, _mem_prof_setup_parser, \
_mempost_exec, _mempost_setup_parser
from openmdao.devtools.iprof_utils import _Options
from openmdao.error_checking.check_config import _check_config_cmd, _check_config_setup_parser
from openmdao.utils.mpi import MPI
from openmdao.utils.find_cite import print_citations
from openmdao.utils.code_utils import _calltree_setup_parser, _calltree_exec
from openmdao.utils.coloring import _total_coloring_setup_parser, _total_coloring_cmd, \
_partial_coloring_setup_parser, _partial_coloring_cmd, \
_view_coloring_setup_parser, _view_coloring_exec
from openmdao.utils.scaffold import _scaffold_setup_parser, _scaffold_exec
from openmdao.utils.file_utils import _load_and_exec, _to_filename
from openmdao.utils.entry_points import _list_installed_setup_parser, _list_installed_cmd, \
split_ep, _compute_entry_points_setup_parser, _compute_entry_points_exec, \
_find_plugins_setup_parser, _find_plugins_exec
from openmdao.core.component import Component
def _n2_setup_parser(parser):
"""
Set up the openmdao subparser for the 'openmdao n2' command.
Parameters
----------
parser : argparse subparser
The parser we're adding options to.
"""
parser.add_argument('file', nargs=1, help='Python script or recording containing the model.')
parser.add_argument('-o', default='n2.html', action='store', dest='outfile',
help='html output file.')
parser.add_argument('--no_browser', action='store_true', dest='no_browser',
help="don't display in a browser.")
parser.add_argument('--embed', action='store_true', dest='embeddable',
help="create embeddable version.")
parser.add_argument('--title', default=None,
action='store', dest='title', help='diagram title.')
parser.add_argument('--use_declare_partial_info', action='store_true',
dest='use_declare_partial_info',
help="use declare partial info for internal connectivity.")
def _n2_cmd(options, user_args):
"""
Process command line args and call n2 on the specified file.
Parameters
----------
options : argparse Namespace
Command line options.
user_args : list of str
Command line options after '--' (if any). Passed to user script.
"""
filename = _to_filename(options.file[0])
if filename.endswith('.py'):
# the file is a python script, run as a post_setup hook
def _noraise(prob):
prob.model._raise_connection_errors = False
def _viewmod(prob):
n2(prob, outfile=options.outfile, show_browser=not options.no_browser,
title=options.title, embeddable=options.embeddable,
use_declare_partial_info=options.use_declare_partial_info)
exit() # could make this command line selectable later
hooks._register_hook('setup', 'Problem', pre=_noraise)
hooks._register_hook('final_setup', 'Problem', post=_viewmod)
_load_and_exec(options.file[0], user_args)
else:
# assume the file is a recording, run standalone
n2(filename, outfile=options.outfile, title=options.title,
show_browser=not options.no_browser, embeddable=options.embeddable,
use_declare_partial_info=options.use_declare_partial_info)
def _view_connections_setup_parser(parser):
"""
Set up the openmdao subparser for the 'openmdao view_connections' command.
Parameters
----------
parser : argparse subparser
The parser we're adding options to.
"""
parser.add_argument('file', nargs=1, help='Python file containing the model.')
parser.add_argument('-o', default='connections.html', action='store', dest='outfile',
help='html output file.')
parser.add_argument('-t', '--title', action='store', dest='title',
help='title of web page.')
parser.add_argument('--no_browser', action='store_true', dest='no_browser',
help="don't display in a browser.")
parser.add_argument('-v', '--show_values', action='store_true', dest='show_values',
help="Display values.")
parser.add_argument('-p', '--problem', action='store', dest='problem', help='Problem name')
def _view_connections_cmd(options, user_args):
"""
Return the post_setup hook function for 'openmdao view_connections'.
Parameters
----------
options : argparse Namespace
Command line options.
user_args : list of str
Args to be passed to the user script.
"""
def _viewconns(prob):
if options.title:
title = options.title
else:
title = "Connections for %s" % os.path.basename(options.file[0])
view_connections(prob, outfile=options.outfile, show_browser=not options.no_browser,
show_values=options.show_values, title=title)
exit()
# register the hook
if options.show_values:
funcname = 'final_setup'
else:
funcname = 'setup'
hooks._register_hook(funcname, class_name='Problem', inst_id=options.problem, post=_viewconns)
_load_and_exec(options.file[0], user_args)
def _meta_model_parser(parser):
"""
Set up the openmdao subparser for the 'openmdao meta_model' command.
Parameters
----------
parser : argparse subparser
The parser we're adding options to.
"""
parser.add_argument('file', nargs=1, help='Python file containing the model.')
parser.add_argument('-m', '--metamodel_pathname', action='store', dest='pathname',
help='pathname of the metamodel component.')
parser.add_argument('-r', '--resolution', default=50, type=int,
action='store', dest='resolution',
help='Number of points to create contour grid')
parser.add_argument('-p', '--port_number', default=5007, action='store', dest='port_number',
help='Port number to open viewer')
parser.add_argument('--no_browser', action='store_false', dest='browser',
help='Bokeh server will start server without browser')
def _meta_model_cmd(options, user_args):
"""
Return the post_setup hook function for 'openmdao meta_model'.
Parameters
----------
options : argparse Namespace
Command line options.
user_args : list of str
Args to be passed to the user script.
"""
def _view_metamodel(prob):
if bokeh is None:
print("bokeh must be installed to view a MetaModel. Use the command:\n",
" pip install bokeh")
exit()
hooks._unregister_hook('final_setup', 'Problem')
mm_types = (MetaModelStructuredComp, MetaModelUnStructuredComp)
pathname = options.pathname
port_number = options.port_number
resolution = options.resolution
browser = options.browser
if pathname:
comp = prob.model._get_subsystem(pathname)
if comp and isinstance(comp, mm_types):
view_metamodel(comp, resolution, port_number, browser)
exit()
else:
comp = None
metamodels = {mm.pathname: mm for
mm in prob.model.system_iter(include_self=True, typ=mm_types)}
mm_names = list(metamodels.keys())
mm_count = len(mm_names)
if mm_count == 0:
print("No Metamodel components found in model.")
elif mm_count == 1 and not pathname:
comp = metamodels[mm_names[0]]
view_metamodel(comp, resolution, port_number, browser)
else:
try_str = "Try one of the following: {}.".format(mm_names)
if not pathname:
print("\nMetamodel not specified. {}".format(try_str))
elif not comp:
print("\nMetamodel '{}' not found.\n {}".format(pathname, try_str))
else:
print("\n'{}' is not a Metamodel.\n {}".format(pathname, try_str))
exit()
hooks._register_hook('final_setup', 'Problem', post=_view_metamodel)
_load_and_exec(options.file[0], user_args)
def _config_summary_setup_parser(parser):
"""
Set up the openmdao subparser for the 'openmdao summary' command.
Parameters
----------
parser : argparse subparser
The parser we're adding options to.
"""
parser.add_argument('file', nargs=1, help='Python file containing the model.')
def _config_summary_cmd(options, user_args):
"""
Return the post_setup hook function for 'openmdao summary'.
Parameters
----------
options : argparse Namespace
Command line options.
user_args : list of str
Args to be passed to the user script.
"""
def summary(prob):
config_summary(prob)
sys.exit(0)
hooks._register_hook('final_setup', 'Problem', post=summary)
_load_and_exec(options.file[0], user_args)
def _tree_setup_parser(parser):
"""
Set up the openmdao subparser for the 'openmdao tree' command.
Parameters
----------
parser : argparse subparser
The parser we're adding options to.
"""
parser.add_argument('file', nargs=1, help='Python file containing the model.')
parser.add_argument('-o', default=None, action='store', dest='outfile',
help='Output file name. By default, output goes to stdout.')
parser.add_argument('-c', '--colors', action='store_true', dest='show_colors',
help="Display colors if the terminal supports it. Requires 'colorama' "
"python package. Use 'pip install colorama' to install it.")
parser.add_argument('-d', '--depth', action='store', type=int, dest='depth',
default=0, help="Max depth of tree to display.")
parser.add_argument('-a', '--attr', action='append', default=[], dest='attrs',
help='Add an attribute to search for in tree systems.')
parser.add_argument('-v', '--var', action='append', default=[], dest='vecvars',
help='Add a variable to search for in vectors of tree components. '
'Use component relative names.')
parser.add_argument('-r', '--rank', action='store', type=int, dest='rank',
default=0, help="Display the tree on this rank (if MPI is active).")
parser.add_argument('-p', '--problem', action='store', dest='problem', help='Problem name')
parser.add_argument('-s', '--sizes', action='store_true', dest='show_sizes',
help="Display input and output sizes.")
parser.add_argument('--approx', action='store_true', dest='show_approx',
help="Show which components compute approximations.")
def _get_tree_filter(attrs, vecvars):
"""
Pull attributes and input/output vector variables out of a tree System.
Parameters
----------
attrs : list of str
Names of attributes (may contain dots).
vecvars : list of str
Names of variables contained in the input or output vectors. Use component relative
names.
Returns
-------
function
A function that takes a System and returns a list of name value pairs.
"""
def _finder(system):
found = []
for attr in attrs:
parts = attr.split('.') # allow attrs with dots
try:
obj = system
for p in parts:
obj = getattr(obj, p)
found.append((attr, obj))
except AttributeError:
pass
if isinstance(system, Component):
for var in vecvars:
if var in system._var_rel2meta:
if var in system._outputs:
found.append((var, system._outputs[var]))
elif var in system._inputs:
found.append((var, system._inputs[var]))
return found
return _finder
def _tree_cmd(options, user_args):
"""
Return the post_setup hook function for 'openmdao tree'.
Parameters
----------
options : argparse Namespace
Command line options.
user_args : list of str
Args to be passed to the user script.
"""
if options.outfile is None:
out = sys.stdout
else:
out = open(options.outfile, 'w')
if options.attrs or options.vecvars:
filt = _get_tree_filter(options.attrs, options.vecvars)
else:
filt = None
def _tree(prob):
tree(prob, show_colors=options.show_colors, show_sizes=options.show_sizes,
show_approx=options.show_approx, filter=filt, max_depth=options.depth,
rank=options.rank, stream=out)
exit()
# register the hook
if options.vecvars or options.show_sizes or options.show_approx:
funcname = 'final_setup'
else:
funcname = 'setup'
hooks._register_hook(funcname, class_name='Problem', inst_id=options.problem, post=_tree)
_load_and_exec(options.file[0], user_args)
def _cite_setup_parser(parser):
"""
Set up the openmdao subparser for the 'openmdao cite' command.
Parameters
----------
parser : argparse subparser
The parser we're adding options to.
"""
parser.add_argument('file', nargs=1, help='Python file containing the model.')
parser.add_argument('-o', default=None, action='store', dest='outfile',
help='Name of output file. By default, output goes to stdout.')
parser.add_argument('-c', '--class', action='append', default=[], dest='classes',
help='Find citation for this class.')
def _cite_cmd(options, user_args):
"""
Run the `openmdao cite` command.
Parameters
----------
options : argparse Namespace
Command line options.
user_args : list of str
Args to be passed to the user script.
"""
if options.outfile is None:
out = sys.stdout
else:
out = open(options.outfile, 'w')
if not options.classes:
options.classes = None
def _cite(prob):
if not MPI or MPI.COMM_WORLD.rank == 0:
print_citations(prob, classes=options.classes, out_stream=out)
exit()
hooks._register_hook('setup', 'Problem', post=_cite)
_load_and_exec(options.file[0], user_args)
# this dict should contain names mapped to tuples of the form:
# (setup_parser_func, executor, description)
_command_map = {
'call_tree': (_calltree_setup_parser, _calltree_exec,
"Display the call tree for the specified | |
<gh_stars>0
import abc
import collections
import copy
import inspect
import itertools
import json
import re
import warnings
from datetime import datetime
import elasticsearch_dsl as dsl
from django.conf import settings
from django.contrib import messages
from django.forms.forms import Form
from django.http import Http404, JsonResponse, QueryDict, StreamingHttpResponse
from django.http.response import HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import redirect, render
from django.template import Context, RequestContext, TemplateDoesNotExist, loader
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.html import escape, format_html
from django.utils.http import urlencode
from django.views.generic import View
from django.views.generic.edit import CreateView, FormView
from elasticsearch_dsl import Q
from elasticsearch_dsl.utils import AttrList
from .facets import TermsFacet, RangeFilter, TextFacet
from .mapping import DEFAULT_ANALYZER
from .signals import advanced_search_performed, search_complete
from .templatetags.seeker import seeker_format
from seeker.utils import update_timestamp_index
seekerview_field_templates = {}
class Column(object):
"""
"""
view = None
visible = False
def __init__(self, field, label=None, sort=None, value_format=None, template=None, header=None, export=True, highlight=None, field_definition=None):
self.field = field
self.label = label if label is not None else field.replace('_', ' ').replace('.raw', '').capitalize()
self.sort = sort
self.template = template
self.value_format = value_format
self.header_html = escape(self.label) if header is None else header
self.export = export
self.highlight = highlight
self.field_definition = field_definition
def __str__(self):
return self.label
def __repr__(self):
return 'Column(%s)' % self.field
def bind(self, view, visible):
self.view = view
self.visible = visible
if self.visible:
if self.template:
self.template_obj = loader.get_template(self.template)
else:
self.template_obj = self.view.get_field_template(self.field)
# Set the model_lower variable on Column to the lowercased name of the model on the mapping once view is set above
try:
self.model_lower = self.view.document._model
except AttributeError:
document = self.view.document
if hasattr(document, 'model'):
self.model_lower = document.model.__name__.lower()
elif hasattr(document, 'queryset'):
self.model_lower = document.queryset().model.__name__.lower()
else:
self.model_lower = ''
self.view.document._model = self.model_lower
return self
def header(self):
cls = '%s_%s' % (self.view.document._doc_type.name, self.field.replace('.', '_'))
cls += ' %s_%s' % (self.model_lower, self.field.replace('.', '_'))
if not self.sort:
return format_html('<th class="{}">{}</th>', cls, self.header_html)
q = self.view.request.GET.copy()
field = q.get('s', '')
sort = None
cls += ' sort'
if field.lstrip('-') == self.field:
# If the current sort field is this field, give it a class a change direction.
sort = 'Descending' if field.startswith('-') else 'Ascending'
cls += ' desc' if field.startswith('-') else ' asc'
d = '' if field.startswith('-') else '-'
q['s'] = '%s%s' % (d, self.field)
else:
q['s'] = self.field
next_sort = 'descending' if sort == 'Ascending' else 'ascending'
sr_label = format_html(' <span class="sr-only">({})</span>', sort) if sort else ''
if self.field_definition:
span = format_html('<span title="{}" class ="fa fa-question-circle"></span>', self.field_definition)
else:
span = ''
html = format_html(
'<th class="{}"><a href="?{}" title="Click to sort {}" data-sort="{}">{}{} {}</a></th>',
cls,
q.urlencode(),
next_sort,
q['s'],
self.header_html,
sr_label,
span
)
return html
def context(self, result, **kwargs):
return kwargs
def render(self, result, **kwargs):
value = getattr(result, self.field, None)
try:
if '*' in self.highlight:
# If highlighting was requested for multiple fields, grab any matching fields as a dictionary.
r = self.highlight.replace('*', r'\w+').replace('.', r'\.')
highlight = {f.replace('.', '_'): result.meta.highlight[f] for f in result.meta.highlight if re.match(r, f)}
else:
highlight = result.meta.highlight[self.highlight]
except Exception:
highlight = []
# If the value is a list (AttrList is DSL's custom list) then highlight won't work properly
# The "meta.highlight" will only contain the matched item, not the others
if highlight and isinstance(value, AttrList):
# We are going to modify this copy with the appropriate highlights
modified_values = copy.deepcopy(value)
for highlighted_value in highlight:
# Remove the <em> tags elasticsearch added
stripped_value = highlighted_value.replace('<em>', '').replace('</em>', '')
index_to_replace = None
# Iterate over all of the values and try to find the item that caused the "hit"
for index, individual_value in enumerate(value):
if stripped_value == individual_value:
index_to_replace = index
break
# Specifically check against None because "0" is falsy (but a valid index)
if index_to_replace is not None:
modified_values[index_to_replace] = highlighted_value
highlight = modified_values
if self.value_format:
value = self.value_format(value)
if highlight:
highlight = self.value_format(highlight)
params = {
'result': result,
'field': self.field,
'value': value,
'highlight': highlight,
'model_lower': self.model_lower,
'doc_class_name': result.__class__.__name__.lower(),
'view': self.view,
'user': self.view.request.user,
'query': self.view.get_keywords(self.view.request.GET),
}
params.update(self.context(result, **kwargs))
return self.template_obj.render(params)
def export_value(self, result):
export_field = self.field if self.export is True else self.export
if export_field:
value = getattr(result, export_field, '')
if isinstance(value, datetime) and timezone.is_aware(value):
value = timezone.localtime(value)
export_val = ', '.join(force_text(v.to_dict() if hasattr(v, 'to_dict') else v) for v in value) if isinstance(value, AttrList) else seeker_format(value)
else:
export_val = ''
return export_val
class SeekerView(View):
document = None
"""
A :class:`elasticsearch_dsl.DocType` class to present a view for.
"""
using = None
"""
The ES connection alias to use.
"""
index = None
"""
The ES index to use. Will use the index set on the mapping if this is not set.
"""
template_name = 'seeker/seeker.html'
"""
The overall seeker template to render.
"""
search_form_template = 'seeker/form.html'
"""
The template to render seeker form
"""
header_template = 'seeker/header.html'
"""
The template used to render the search results header.
"""
results_template = 'seeker/results.html'
"""
The template used to render the search results.
"""
footer_template = 'seeker/footer.html'
"""
The template used to render the search results footer.
"""
columns = None
"""
A list of Column objects, or strings representing mapping field names. If None, all mapping fields will be available.
"""
exclude = None
"""
A list of field names to exclude when generating columns.
"""
login_required_columns = []
"""
A list of field names that will automatically be added to the exclude when generating columns if the user is not authenticated.
"""
display = None
"""
A list of field/column names to display by default.
"""
post_filter_facets = False
"""
A boolean set to optionally define a dynamic response in the facets and results after a change to the form
You will need to set javascript and ajax on the seeker template in order to fully enable these features
"""
required_display = []
"""
A list of tuples, ex. ('field name', 0), representing field/column names that will always be displayed (cannot be hidden by the user).
The second value is the index/position of the field (used as the index in list.insert(index, 'field name')).
"""
@property
def required_display_fields(self):
return [t[0] for t in self.required_display]
sort = None
"""
A list of field/column names to sort by default, or None for no default sort order. For reverse order prefix the field with '-'.
"""
search = None
"""
A list of field names to search. By default, will included all fields defined on the document mapping.
"""
highlight = True
"""
A list of field names to highlight, or True/False to enable/disable highlighting for all fields.
"""
highlight_encoder = 'html'
"""
An 'encoder' parameter is used when highlighting to define how highlighted text will be encoded. It can be either
'default' (no encoding) or 'html' (will escape html, if you use html highlighting tags).
"""
number_of_fragments = 0
"""
The number of fragments returned by highlighted search, set to 0 by default (which gives all results)
"""
facets = []
"""
A list of :class:`seeker.Facet` objects that are available to facet the results by.
"""
initial_facets = {}
"""
A dictionary of initial facets, mapping fields to lists of initial values.
"""
page_size = 10
"""
The number of results to show per page.
"""
available_page_sizes = []
"""
If set allows user to set options for page size to be changed, (must include the default page_size)
"""
page_spread = 7
"""
The number of pages (not including first and last) to show in the paginator widget.
"""
can_save = True
"""
Whether searches for this view can be saved.
"""
export_name = 'seeker'
"""
The filename (without extension, which will be .csv) to use when exporting data from this view.
"""
export_timestamp = False
"""
Whether or not to append a timestamp of the current time to the export filename when exporting data from this view.
"""
show_rank = True
"""
Whether or not to show a Rank column when performing keyword searches.
"""
field_columns = {}
"""
A dictionary of field column overrides.
"""
field_labels = {}
"""
A dictionary of field label overrides.
"""
field_definitions = {}
"""
A dictionary of field definitions. These appear in the header of a column.
| |
''
activity_header.paragraphs[0].add_run("Purpose").bold = True
activity_header.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
role_header = server_table.cell(0, 2)
role_header.text = ''
role_header.paragraphs[0].add_run("Role").bold = True
role_header.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
# Loop through the domains to create rows
counter = 1
for server in report_json['infrastructure']['servers']['static'].values():
server_table.add_row()
name_cell = server_table.cell(counter, 0)
name_cell.text = "{}".format(server['ip_address'])
name_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
activity_cell = server_table.cell(counter, 1)
activity_cell.text = "{}".format(server['activity'])
activity_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
role_cell = server_table.cell(counter, 2)
role_cell.text = "{}".format(server['role'])
role_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
# Increase counter for the next row
counter += 1
for server in report_json['infrastructure']['servers']['cloud'].values():
server_table.add_row()
name_cell = server_table.cell(counter, 0)
name_cell.text = "{}".format(server['ip_address'])
name_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
activity_cell = server_table.cell(counter, 1)
activity_cell.text = "{}".format(server['activity'])
activity_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
role_cell = server_table.cell(counter, 2)
role_cell.text = "{}".format(server['role'])
role_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
# Increase counter for the next row
counter += 1
self.create_newline()
# If the style needs to be updated, update it in template.docx
connection_table = self.spenny_doc.add_table(
rows=1,
cols=3,
style='Ghostwriter Table')
connection_table.allow_autofit = True
connection_table.autofit = True
server_header = connection_table.cell(0, 0)
server_header.text = ""
server_header.paragraphs[0].add_run("Domain").bold = True
server_header.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
domain_header = connection_table.cell(0, 1)
domain_header.text = ""
domain_header.paragraphs[0].add_run("Server").bold = True
domain_header.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
domain_header = connection_table.cell(0, 2)
domain_header.text = ""
domain_header.paragraphs[0].add_run("CDN Endpoint").bold = True
domain_header.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
# Loop through the domains to create rows
counter = 1
for connection in report_json[
'infrastructure']['domains_and_servers'].values():
connection_table.add_row()
server_cell = connection_table.cell(counter, 0)
server_cell.text = "{}".format(connection['domain'])
server_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
domain_cell = connection_table.cell(counter, 1)
domain_cell.text = "{}".format(connection['servers'])
domain_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
domain_cell = connection_table.cell(counter, 2)
domain_cell.text = "{}".format(connection['cdn_endpoint'])
domain_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
# Increase counter for the next row
counter += 1
self.spenny_doc.add_page_break()
#####################################
# Create the Findings Summary Table #
#####################################
# If the style needs to be updated, update it in template.docx
finding_table = self.spenny_doc.add_table(
rows=1,
cols=2,
style='Ghostwriter Table')
finding_header = finding_table.cell(0, 0)
finding_header.text = ""
finding_header.paragraphs[0].add_run("Finding").bold = True
finding_header.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
severity_header = finding_table.cell(0, 1)
severity_header.text = ""
severity_header.paragraphs[0].add_run("Severity").bold = True
severity_header.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
# Re-size table headers to provide space for finding name
widths = (Inches(5.4), Inches(1.1))
for row in finding_table.rows:
for idx, width in enumerate(widths):
row.cells[idx].width = width
finding_table.allow_autofit = True
finding_table.autofit = True
# Loop through the findings to create rows
counter = 1
for finding in report_json['findings'].values():
finding_table.add_row()
finding_cell = finding_table.cell(counter, 0)
finding_cell.text = "{}".format(finding['title'])
finding_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
severity_cell = finding_table.cell(counter, 1)
severity_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
run = severity_cell.paragraphs[0].add_run(
'{}'.format(finding['severity']))
font = run.font
font.color.rgb = RGBColor(0x00, 0x00, 0x00)
run.bold = False
severity_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
# Shading a table cell requires parsing some XML and then editing
# the cell to be shaded
if finding['severity'].lower() == 'informational':
shading = parse_xml(r'<w:shd {} w:fill="{}"/>'.
format(nsdecls('w'),
self.informational_color))
elif finding['severity'].lower() == 'low':
shading = parse_xml(r'<w:shd {} w:fill="{}"/>'.
format(nsdecls('w'),
self.low_color))
elif finding['severity'].lower() == 'medium':
shading = parse_xml(r'<w:shd {} w:fill="{}"/>'.
format(nsdecls('w'),
self.medium_color))
elif finding['severity'].lower() == 'high':
shading = parse_xml(r'<w:shd {} w:fill="{}"/>'.
format(nsdecls('w'),
self.high_color))
else:
shading = parse_xml(r'<w:shd {} w:fill="{}"/>'.
format(nsdecls('w'),
self.critical_color))
# Modify font to white so it contrasts better against dark cell
font.color.rgb = RGBColor(0xFF, 0xFF, 0xFF)
# Manually append the appropriate shading to the risk rating cell
finding_table.rows[counter].cells[1]._tc.get_or_add_tcPr().\
append(shading)
# Increase counter for the next row
counter += 1
########################################
# Create the Individual Findings Pages #
########################################
# Add a page break and create each finding's page
self.spenny_doc.add_page_break()
for finding in report_json['findings'].values():
# There's a special Heading 3 for the finding title so we don't
# use `add_heading()` here
p = self.spenny_doc.add_paragraph(finding['title'])
p.style = 'Heading 3 - Finding'
# This is Heading 4 but we want to make severity a run to color it
# so we don't use `add_heading()` here
p = self.spenny_doc.add_paragraph()
p.style = 'Heading 4'
run = p.add_run('Severity – ')
run = p.add_run('{}'.format(finding['severity']))
font = run.font
if finding['severity'].lower() == 'informational':
font.color.rgb = RGBColor(
self.informational_color_hex[0],
self.informational_color_hex[1],
self.informational_color_hex[2])
elif finding['severity'].lower() == 'low':
font.color.rgb = RGBColor(
self.low_color_hex[0],
self.low_color_hex[1],
self.low_color_hex[2])
elif finding['severity'].lower() == 'medium':
font.color.rgb = RGBColor(
self.medium_color_hex[0],
self.medium_color_hex[1],
self.medium_color_hex[2])
elif finding['severity'].lower() == 'high':
font.color.rgb = RGBColor(
self.high_color_hex[0],
self.high_color_hex[1],
self.high_color_hex[2])
else:
font.color.rgb = RGBColor(
self.critical_color_hex[0],
self.critical_color_hex[2],
self.critical_color_hex[2])
# Add an Affected Entities section
self.spenny_doc.add_heading('Affected Entities', 4)
if not finding['affected_entities']:
finding['affected_entities'] = 'Must Be Provided'
all_entities = finding['affected_entities'].split('\n')
for entity in all_entities:
entity = entity.strip()
p = self.spenny_doc.add_paragraph(entity, style='Normal')
self.list_number(p, level=0, num=False)
p.paragraph_format.left_indent = Inches(0.5)
# Add a Description section that may also include evidence figures
self.spenny_doc.add_heading('Description', 4)
self.process_text(finding['description'], finding, report_json)
# Create Impact section
self.spenny_doc.add_heading('Impact', 4)
self.process_text(
finding['impact'],
finding,
report_json)
# Create Recommendations section
self.spenny_doc.add_heading('Recommendation', 4)
self.process_text(
finding['recommendation'],
finding,
report_json)
# Create Replication section
self.spenny_doc.add_heading('Replication Steps', 4)
self.process_text(
finding['replication_steps'],
finding,
report_json)
# Check if techniques are provided before creating a host
# detection section
if finding['host_detection_techniques']:
self.spenny_doc.add_heading(
'Adversary Detection Techniques – Host', 4)
self.process_text(
finding['host_detection_techniques'],
finding,
report_json)
# Check if techniques are provided before creating a network
# detection section
if finding['network_detection_techniques']:
self.spenny_doc.add_heading(
'Adversary Detection Techniques – Network', 4)
self.process_text(
finding['network_detection_techniques'],
finding,
report_json)
# Create References section
self.spenny_doc.add_heading('References', 4)
self.process_text(finding['references'], finding, report_json)
# On to the next finding
self.spenny_doc.add_page_break()
# Finalize document and return it for an HTTP response
return self.spenny_doc
def generate_excel_xlsx(self, memory_object):
"""Generate the finding rows and save the document."""
from ghostwriter.reporting.models import Evidence
# Generate the JSON for the report
report_json = json.loads(self.generate_json())
# Create xlsxwriter
spenny_doc = memory_object
worksheet = spenny_doc.add_worksheet('Findings')
# Create some basic formats
# Header format
bold_format = spenny_doc.add_format({'bold': True})
bold_format.set_text_wrap()
bold_format.set_align('vcenter')
# Affected assets format
asset_format = spenny_doc.add_format()
asset_format.set_text_wrap()
asset_format.set_align('vcenter')
asset_format.set_align('center')
# Remaining cells
wrap_format = spenny_doc.add_format()
wrap_format.set_text_wrap()
wrap_format.set_align('vcenter')
# Create header row for findings
col = 0
headers = ['Finding', 'Severity', 'Affected Entities', 'Description',
'Impact', 'Recommendation', 'Replication Steps',
'Host Detection Techniques', 'Network Detection Techniques',
'References', 'Supporting Evidence']
for header in headers:
worksheet.write(0, col, header, bold_format)
col = col + 1
# Width of all columns set to 30
worksheet.set_column(0, 10, 30)
# Width of severity columns set to 10
worksheet.set_column(1, 1, 10)
# Loop through the dict of findings to create findings worksheet
col = 0
row = 1
for finding in report_json['findings'].values():
# Finding Name
worksheet.write(row, 0, finding['title'], wrap_format)
# Severity
severity_format = spenny_doc.add_format()
severity_format.set_align('vcenter')
severity_format.set_align('center')
# Color the cell based on corresponding severity color
if finding['severity'].lower() == 'informational':
severity_format.set_bg_color(self.informational_color)
elif finding['severity'].lower() == "low":
severity_format.set_bg_color(self.low_color)
elif finding['severity'].lower() == "medium":
severity_format.set_bg_color(self.medium_color)
elif finding['severity'].lower() == "high":
severity_format.set_bg_color(self.high_color)
elif finding['severity'].lower() == "critical":
severity_format.set_bg_color(self.critical_color)
worksheet.write(row, 1, finding['severity'], severity_format)
# Affected Asset
if finding['affected_entities']:
worksheet.write(
row, 2, finding['affected_entities'], asset_format)
else:
worksheet.write(
row, 2, 'N/A', asset_format)
# Description
worksheet.write(
row, 3, finding['description'], wrap_format)
# Impact
worksheet.write(
row, 4, finding['impact'], wrap_format)
# Recommendation
worksheet.write(
row, 5, finding['recommendation'], wrap_format)
# Replication
worksheet.write(
row, 6, finding['replication_steps'], wrap_format)
# Detection
worksheet.write(
row, 7, finding['host_detection_techniques'], wrap_format)
worksheet.write(
row, 8, finding['network_detection_techniques'], wrap_format)
# References
worksheet.write(
row, 9, finding['references'], wrap_format)
# Collect the evidence, if any, from the finding's folder and
# insert inline with description
try:
evidence_queryset = Evidence.objects.\
filter(finding=finding['id'])
except Exception:
evidence_queryset = []
# Loop through any evidence and add it to the evidence column
evidence = [f.document.name for f in evidence_queryset
if f in self.image_extensions or self.text_extensions]
finding_evidence_names = '\r\n'.join(map(str, evidence))
# Evidence List
worksheet.write(row, 10, finding_evidence_names, wrap_format)
# Increment row counter before moving on to next finding
row += 1
# Add a filter to the worksheet
worksheet.autofilter('A1:J{}'.format(len(report_json['findings'])+1))
# Finalize document
spenny_doc.close()
return(spenny_doc)
def insert_slide(self):
"""Shortcut for inserting new ppt slides"""
# TO-DO
def generate_powerpoint_pptx(self):
"""Generate the tables and save the PowerPoint presentation."""
# Generate the JSON for the report
report_json = json.loads(self.generate_json())
# Create document writer using the specified template
if self.template_loc:
try:
self.spenny_ppt = Presentation(self.template_loc)
except Exception:
# TODO: Return error on webpage
pass
else:
# TODO: Return error on webpage
pass
self.ppt_color_info = pptx.dml.color.RGBColor(
self.informational_color_hex[0],
self.informational_color_hex[1],
self.informational_color_hex[2])
self.ppt_color_low = pptx.dml.color.RGBColor(
self.low_color_hex[0],
self.low_color_hex[1],
self.low_color_hex[2])
self.ppt_color_medium = pptx.dml.color.RGBColor(
self.medium_color_hex[0],
self.medium_color_hex[1],
self.medium_color_hex[2])
self.ppt_color_high = pptx.dml.color.RGBColor(
self.high_color_hex[0],
self.high_color_hex[1],
self.high_color_hex[2])
self.ppt_color_critical = pptx.dml.color.RGBColor(
self.critical_color_hex[0],
self.critical_color_hex[1],
self.critical_color_hex[2])
# Loop through the dict of findings to create slides based on findings
# Initialize findings stats dict
findings_stats = {
'Critical': 0,
'High': 0,
'Medium': 0,
'Low': 0,
'Informational': 0
}
# Calculate finding stats
for finding in report_json['findings'].values():
| |
<reponame>STARInformatics/kgx<gh_stars>0
from typing import Dict, List, Optional, Any, Callable
from sys import stderr
import yaml
from json import dump
from json.encoder import JSONEncoder
from kgx import GraphEntityType
from kgx.prefix_manager import PrefixManager
from kgx.graph.base_graph import BaseGraph
"""
Generate a knowledge map that corresponds to TRAPI KnowledgeMap.
Specification based on TRAPI Draft PR: https://github.com/NCATSTranslator/ReasonerAPI/pull/171
"""
####################################################################
# Next Generation Implementation of Graph Summary coding which
# leverages the new "Transformer.process()" data stream "Inspector"
# design pattern, implemented here as a "Callable" inspection class.
####################################################################
def mkg_default(o):
"""
JSONEncoder 'default' function override to
properly serialize 'Set' objects (into 'List')
"""
if isinstance(o, MetaKnowledgeGraph.Category):
return o.json_object()
else:
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(o)
class MetaKnowledgeGraph:
"""
Class for generating a TRAPI 1.1 style of "meta knowledge graph" summary.
The optional 'progress_monitor' for the validator should be a lightweight Callable
which is injected into the class 'inspector' Callable, designed to intercepts
node and edge records streaming through the Validator (inside a Transformer.process() call.
The first (GraphEntityType) argument of the Callable tags the record as a NODE or an EDGE.
The second argument given to the Callable is the current record itself.
This Callable is strictly meant to be procedural and should *not* mutate the record.
The intent of this Callable is to provide a hook to KGX applications wanting the
namesake function of passively monitoring the graph data stream. As such, the Callable
could simply tally up the number of times it is called with a NODE or an EDGE, then
provide a suitable (quick!) report of that count back to the KGX application. The
Callable (function/callable class) should not modify the record and should be of low
complexity, so as not to introduce a large computational overhead to validation!
Parameters
----------
name: str
(Graph) name assigned to the summary.
progress_monitor: Optional[Callable[[GraphEntityType, List], None]]
Function given a peek at the current record being processed by the class wrapped Callable.
error_log:
Where to write any graph processing error message (stderr, by default)
"""
def __init__(
self,
name='',
progress_monitor: Optional[Callable[[GraphEntityType, List], None]] = None,
error_log=None,
**kwargs
):
# formal args
self.name = name
self.progress_monitor: Optional[Callable[[GraphEntityType, List], None]] = progress_monitor
# internal attributes
self.node_catalog: Dict[str, List[int]] = dict()
self.node_stats: Dict[str, MetaKnowledgeGraph.Category] = dict()
self.node_stats['unknown'] = self.Category('unknown')
self.edge_record_count: int = 0
self.predicates: Dict = dict()
self.association_map: Dict = dict()
self.edge_stats = []
self.graph_stats: Dict[str, Dict] = dict()
if error_log:
self.error_log = open(error_log, 'w')
else:
self.error_log = stderr
def __call__(self, entity_type: GraphEntityType, rec: List):
"""
Transformer 'inspector' Callable
"""
if self.progress_monitor:
self.progress_monitor(entity_type, rec)
if entity_type == GraphEntityType.EDGE:
self.analyse_edge(*rec)
elif entity_type == GraphEntityType.NODE:
self.analyse_node(*rec)
else:
raise RuntimeError("Unexpected GraphEntityType: " + str(entity_type))
class Category:
# The 'category map' just associates a unique int catalog
# index ('cid') value as a proxy for the full curie string,
# to reduce storage in the main node catalog
_category_curie_map: List[str] = list()
def __init__(self, category=''):
self.category = category
if category not in self._category_curie_map:
self._category_curie_map.append(category)
self.category_stats: Dict[str, Any] = dict()
self.category_stats['id_prefixes'] = set()
self.category_stats['count'] = 0
self.category_stats['count_by_source'] = {'unknown': 0}
def get_cid(self):
return self._category_curie_map.index(self.category)
@classmethod
def get_category_curie(cls, cid: int):
return cls._category_curie_map[cid]
def get_id_prefixes(self):
return self.category_stats['id_prefixes']
def get_count(self):
return self.category_stats['count']
def get_count_by_source(self, source: str = None) -> Dict:
if source:
return {source: self.category_stats['count_by_source'][source]}
return self.category_stats['count_by_source']
def analyse_node_category(self, n, data):
prefix = PrefixManager.get_prefix(n)
self.category_stats['count'] += 1
if prefix not in self.category_stats['id_prefixes']:
self.category_stats['id_prefixes'].add(prefix)
if 'provided_by' in data:
for s in data['provided_by']:
if s in self.category_stats['count_by_source']:
self.category_stats['count_by_source'][s] += 1
else:
self.category_stats['count_by_source'][s] = 1
else:
self.category_stats['count_by_source']['unknown'] += 1
def json_object(self):
return {
'id_prefixes': list(self.category_stats['id_prefixes']),
'count': self.category_stats['count'],
'count_by_source': self.category_stats['count_by_source']
}
def analyse_node(self, n, data):
# The TRAPI release 1.1 meta_knowledge_graph format indexes nodes by biolink:Category
# the node 'category' field is a list of assigned categories (usually just one...).
# However, this may perhaps sometimes result in duplicate counting and conflation of prefixes(?).
if n in self.node_catalog:
# Report duplications of node records, as discerned from node id.
print("Duplicate node identifier '" + n +
"' encountered in input node data? Ignoring...", file=self.error_log)
return
else:
self.node_catalog[n] = list()
if 'category' not in data:
category = self.node_stats['unknown']
category.analyse_node_category(n, data)
print(
"Node with identifier '" + n +
"' is missing its 'category' value? " +
"Counting it as 'unknown', but otherwise ignoring in the analysis...", file=self.error_log
)
return
for category_data in data['category']:
# we note here that category_curie may be
# a piped '|' set of Biolink category CURIE values
categories = category_data.split("|")
# analyse them each independently...
for category_curie in categories:
if category_curie not in self.node_stats:
self.node_stats[category_curie] = self.Category(category_curie)
category = self.node_stats[category_curie]
category_idx: int = category.get_cid()
if category_idx not in self.node_catalog[n]:
self.node_catalog[n].append(category_idx)
category.analyse_node_category(n, data)
def analyse_edge(self, u, v, k, data):
# we blissfully assume that all the nodes of a
# graph stream were analysed first by the MetaKnowledgeGraph
# before the edges are analysed, thus we can test for
# node 'n' existence internally, by identifier.
#
# Given the use case of multiple categories being assigned to a given node in a KGX data file,
# either by category inheritance (ancestry all the way back up to NamedThing)
# or by conflation (i.e. gene == protein id?), then the Cartesian product of
# subject/object edges mappings need to be captured here.
#
self.edge_record_count += 1
predicate = data['predicate']
if predicate not in self.predicates:
# just need to track the number
# of edge records using this predicate
self.predicates[predicate] = 0
self.predicates[predicate] += 1
if u not in self.node_catalog:
print("Edge 'subject' node ID '" + u + "' not found in node catalog? Ignoring...", file=self.error_log)
# removing from edge count
self.edge_record_count -= 1
self.predicates[predicate] -= 1
return
else:
for subj_cat_idx in self.node_catalog[u]:
subject_category = MetaKnowledgeGraph.Category.get_category_curie(subj_cat_idx)
if v not in self.node_catalog:
print("Edge 'object' node ID '" + v +
"' not found in node catalog? Ignoring...", file=self.error_log)
self.edge_record_count -= 1
self.predicates[predicate] -= 1
return
else:
for obj_cat_idx in self.node_catalog[v]:
object_category = MetaKnowledgeGraph.Category.get_category_curie(obj_cat_idx)
# Process the 'valid' S-P-O triple here...
triple = (subject_category, predicate, object_category)
if triple not in self.association_map:
self.association_map[triple] = {
'subject': triple[0],
'predicate': triple[1],
'object': triple[2],
'relations': set(),
'count': 0,
'count_by_source': {'unknown': 0},
}
if data['relation'] not in self.association_map[triple]['relations']:
self.association_map[triple]['relations'].add(data['relation'])
self.association_map[triple]['count'] += 1
if 'provided_by' in data:
for s in data['provided_by']:
if s not in self.association_map[triple]['count_by_source']:
self.association_map[triple]['count_by_source'][s] = 1
else:
self.association_map[triple]['count_by_source'][s] += 1
else:
self.association_map[triple]['count_by_source']['unknown'] += 1
def get_name(self):
"""
Returns
-------
str
Currently assigned knowledge graph name.
"""
return self.name
def get_category(self, category_curie: str) -> Category:
"""
Counts the number of distinct (Biolink) categories encountered
in the knowledge graph (not including those of 'unknown' category)
Parameters
----------
category_curie: str
Curie identifier for the (Biolink) category.
Returns
-------
Category
MetaKnowledgeGraph.Category object for a given Biolink category.
"""
return self.node_stats[category_curie]
def get_node_stats(self) -> Dict[str, Category]:
if 'unknown' in self.node_stats and not self.node_stats['unknown'].get_count():
self.node_stats.pop('unknown')
return self.node_stats
def get_number_of_categories(self) -> int:
"""
Counts the number of distinct (Biolink) categories encountered
in the knowledge graph (not including those of 'unknown' category)
Returns
-------
int
Number of distinct (Biolink) categories found in the graph (excluding the 'unknown' category)
"""
return len([c for c in self.node_stats.keys() if c != 'unknown'])
def get_edge_stats(self) -> List:
# Not sure if this is "safe" but assume
# that edge_stats may be cached once computed?
if not self.edge_stats:
for k, v in self.association_map.items():
kedge = v
relations = list(v['relations'])
kedge['relations'] = relations
self.edge_stats.append(kedge)
return self.edge_stats
def get_total_nodes_count(self) -> int:
"""
Counts the total number of distinct nodes in the knowledge graph
(**not** including those ignored due to being of 'unknown' category)
Returns
-------
int
Number of distinct nodes in the knowledge.
"""
return len(self.node_catalog)
def get_node_count_by_category(self, category_curie: str) -> int:
"""
Counts the number of edges in the graph
with the specified (Biolink) category curie.
Parameters
----------
category_curie: str
Curie identifier for the (Biolink) category.
Returns
-------
int
Number of nodes for the given category.
Raises
------
RuntimeError
Error if category | |
to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Certificate]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, **kwargs):
super(CertificateCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class CertificatePatch(Model):
"""A certificate to update.
:param tags: Application-specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(CertificatePatch, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class CertificateProperties(Model):
"""Certificate resource specific properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:param password: <PASSWORD>.
:type password: str
:ivar subject_name: Subject name of the certificate.
:vartype subject_name: str
:param value: PFX or PEM blob
:type value: bytearray
:ivar issuer: Certificate issuer.
:vartype issuer: str
:ivar issue_date: Certificate issue Date.
:vartype issue_date: datetime
:ivar expiration_date: Certificate expiration date.
:vartype expiration_date: datetime
:ivar thumbprint: Certificate thumbprint.
:vartype thumbprint: str
:ivar valid: Is the certificate valid?.
:vartype valid: bool
:ivar public_key_hash: Public key hash.
:vartype public_key_hash: str
"""
_validation = {
'subject_name': {'readonly': True},
'issuer': {'readonly': True},
'issue_date': {'readonly': True},
'expiration_date': {'readonly': True},
'thumbprint': {'readonly': True},
'valid': {'readonly': True},
'public_key_hash': {'readonly': True},
}
_attribute_map = {
'password': {'key': 'password', 'type': 'str'},
'subject_name': {'key': 'subjectName', 'type': 'str'},
'value': {'key': 'value', 'type': 'bytearray'},
'issuer': {'key': 'issuer', 'type': 'str'},
'issue_date': {'key': 'issueDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'valid': {'key': 'valid', 'type': 'bool'},
'public_key_hash': {'key': 'publicKeyHash', 'type': 'str'},
}
def __init__(self, **kwargs):
super(CertificateProperties, self).__init__(**kwargs)
self.password = kwargs.get('password', None)
self.subject_name = None
self.value = kwargs.get('value', None)
self.issuer = None
self.issue_date = None
self.expiration_date = None
self.thumbprint = None
self.valid = None
self.public_key_hash = None
class ClientRegistration(Model):
"""The configuration settings of the app registration for providers that have
client ids and client secrets.
:param client_id: The Client ID of the app used for login.
:type client_id: str
:param client_secret_ref_name: The app secret ref name that contains the
client secret.
:type client_secret_ref_name: str
"""
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'client_secret_ref_name': {'key': 'clientSecretRefName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ClientRegistration, self).__init__(**kwargs)
self.client_id = kwargs.get('client_id', None)
self.client_secret_ref_name = kwargs.get('client_secret_ref_name', None)
class Configuration(Model):
"""Non versioned Container App configuration properties that define the
mutable settings of a Container app.
:param secrets: Collection of secrets used by a Container app
:type secrets: list[~commondefinitions.models.Secret]
:param active_revisions_mode: ActiveRevisionsMode controls how active
revisions are handled for the Container app:
<list><item>Multiple: multiple revisions can be active. If no value if
provided, this is the default</item><item>Single: Only one revision can be
active at a time. Revision weights can not be used in this
mode</item></list>. Possible values include: 'multiple', 'single'
:type active_revisions_mode: str or
~commondefinitions.models.ActiveRevisionsMode
:param ingress: Ingress configurations.
:type ingress: ~commondefinitions.models.Ingress
:param dapr: Dapr configuration for the Container App.
:type dapr: ~commondefinitions.models.Dapr
:param registries: Collection of private container registry credentials
for containers used by the Container app
:type registries: list[~commondefinitions.models.RegistryCredentials]
"""
_attribute_map = {
'secrets': {'key': 'secrets', 'type': '[Secret]'},
'active_revisions_mode': {'key': 'activeRevisionsMode', 'type': 'str'},
'ingress': {'key': 'ingress', 'type': 'Ingress'},
'dapr': {'key': 'dapr', 'type': 'Dapr'},
'registries': {'key': 'registries', 'type': '[RegistryCredentials]'},
}
def __init__(self, **kwargs):
super(Configuration, self).__init__(**kwargs)
self.secrets = kwargs.get('secrets', None)
self.active_revisions_mode = kwargs.get('active_revisions_mode', None)
self.ingress = kwargs.get('ingress', None)
self.dapr = kwargs.get('dapr', None)
self.registries = kwargs.get('registries', None)
class Container(Model):
"""Container App container definition.
:param image: Container image tag.
:type image: str
:param name: Custom container name.
:type name: str
:param command: Container start command.
:type command: list[str]
:param args: Container start command arguments.
:type args: list[str]
:param env: Container environment variables.
:type env: list[~commondefinitions.models.EnvironmentVar]
:param resources: Container resource requirements.
:type resources: ~commondefinitions.models.ContainerResources
:param probes: List of probes for the container.
:type probes: list[~commondefinitions.models.ContainerAppProbe]
:param volume_mounts: Container volume mounts.
:type volume_mounts: list[~commondefinitions.models.VolumeMount]
"""
_attribute_map = {
'image': {'key': 'image', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'command': {'key': 'command', 'type': '[str]'},
'args': {'key': 'args', 'type': '[str]'},
'env': {'key': 'env', 'type': '[EnvironmentVar]'},
'resources': {'key': 'resources', 'type': 'ContainerResources'},
'probes': {'key': 'probes', 'type': '[ContainerAppProbe]'},
'volume_mounts': {'key': 'volumeMounts', 'type': '[VolumeMount]'},
}
def __init__(self, **kwargs):
super(Container, self).__init__(**kwargs)
self.image = kwargs.get('image', None)
self.name = kwargs.get('name', None)
self.command = kwargs.get('command', None)
self.args = kwargs.get('args', None)
self.env = kwargs.get('env', None)
self.resources = kwargs.get('resources', None)
self.probes = kwargs.get('probes', None)
self.volume_mounts = kwargs.get('volume_mounts', None)
class ContainerApp(TrackedResource):
"""Container App.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. E.g.
"Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy
and modifiedBy information.
:vartype system_data: ~commondefinitions.models.SystemData
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives
:type location: str
:param identity: managed identities for the Container App to interact with
other Azure services without maintaining any secrets or credentials in
code.
:type identity: ~commondefinitions.models.ManagedServiceIdentity
:ivar provisioning_state: Provisioning state of the Container App.
Possible values include: 'InProgress', 'Succeeded', 'Failed', 'Canceled'
:vartype provisioning_state: str or
~commondefinitions.models.ContainerAppProvisioningState
:param managed_environment_id: Resource ID of the Container App's
environment.
:type managed_environment_id: str
:ivar latest_revision_name: Name of the latest revision of the Container
App.
:vartype latest_revision_name: str
:ivar latest_revision_fqdn: Fully Qualified Domain Name of the latest
revision of the Container App.
:vartype latest_revision_fqdn: str
:ivar custom_domain_verification_id: Id used to verify domain name
ownership
:vartype custom_domain_verification_id: str
:param configuration: Non versioned Container App configuration
properties.
:type configuration: ~commondefinitions.models.Configuration
:param template: Container App versioned application definition.
:type template: ~commondefinitions.models.Template
:ivar outbound_ip_addresses: Outbound IP Addresses for container app.
:vartype outbound_ip_addresses: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'latest_revision_name': {'readonly': True},
'latest_revision_fqdn': {'readonly': True},
'custom_domain_verification_id': {'readonly': True},
'outbound_ip_addresses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'managed_environment_id': {'key': 'properties.managedEnvironmentId', 'type': 'str'},
'latest_revision_name': {'key': 'properties.latestRevisionName', 'type': 'str'},
'latest_revision_fqdn': {'key': 'properties.latestRevisionFqdn', 'type': 'str'},
'custom_domain_verification_id': {'key': 'properties.customDomainVerificationId', 'type': 'str'},
'configuration': {'key': 'properties.configuration', 'type': 'Configuration'},
'template': {'key': 'properties.template', 'type': 'Template'},
'outbound_ip_addresses': {'key': 'properties.outboundIPAddresses', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ContainerApp, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.provisioning_state = None
self.managed_environment_id = kwargs.get('managed_environment_id', None)
self.latest_revision_name = None
self.latest_revision_fqdn = None
self.custom_domain_verification_id = None
self.configuration = kwargs.get('configuration', None)
self.template = kwargs.get('template', None)
self.outbound_ip_addresses = None
class ContainerAppCollection(Model):
"""Container App collection ARM resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~commondefinitions.models.ContainerApp]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ContainerApp]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerAppCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ContainerAppPatch(Model):
"""Container App Patch.
:param tags: Application-specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ContainerAppPatch, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class ContainerAppProbe(Model):
"""Probe describes a health check to be performed against a container to
determine whether it is alive or ready to receive traffic.
:param failure_threshold: Minimum consecutive failures for the probe to be
considered failed after having succeeded. Defaults to 3. Minimum value is
1. Maximum value is 10.
:type failure_threshold: int
:param http_get: HTTPGet specifies the http request to perform.
:type http_get: ~commondefinitions.models.ContainerAppProbeHttpGet
:param initial_delay_seconds: Number of seconds after the container has
started before liveness probes are initiated. | |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
##################################################
# GNU Radio Python Flow Graph
# Title: Dtv Sigmf Playback
# Generated: Sat Feb 23 23:29:06 2019
# GNU Radio version: 3.7.12.0
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from datetime import datetime as dt; import string; import math
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from optparse import OptionParser
import pmt
import sigmf
import sip
import sys
from gnuradio import qtgui
class dtv_sigmf_playback(gr.top_block, Qt.QWidget):
def __init__(self, mod_order=8, mod_scheme='ATSC_8VSB', rx_ant_model='Decotec Tape Measure Discone', rx_db_ser='na', rx_db_type='na', rx_ser_tag='F50030', rx_ser_uhd='F50030', rx_type='B210', signal_name='DTV', symbol_rate=10.76e6):
gr.top_block.__init__(self, "Dtv Sigmf Playback")
Qt.QWidget.__init__(self)
self.setWindowTitle("Dtv Sigmf Playback")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "dtv_sigmf_playback")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Parameters
##################################################
self.mod_order = mod_order
self.mod_scheme = mod_scheme
self.rx_ant_model = rx_ant_model
self.rx_db_ser = rx_db_ser
self.rx_db_type = rx_db_type
self.rx_ser_tag = rx_ser_tag
self.rx_ser_uhd = rx_ser_uhd
self.rx_type = rx_type
self.signal_name = signal_name
self.symbol_rate = symbol_rate
##################################################
# Variables
##################################################
self.ts_str = ts_str = dt.strftime(dt.utcnow(), "%Y-%m-%dT%H:%M:%S.%fZ")
self.fn = fn = "{:s}_{:s}".format(signal_name, ts_str)
self.tune = tune = 0
self.samp_rate = samp_rate = 250e3
self.rx_gain = rx_gain = 45
self.rx_freq = rx_freq = 602.31e6
self.nfft = nfft = 1024
self.fp = fp = "/captures/dtv/{:s}".format(fn)
self.avg_len = avg_len = 100.0
##################################################
# Blocks
##################################################
self._tune_tool_bar = Qt.QToolBar(self)
self._tune_tool_bar.addWidget(Qt.QLabel("tune"+": "))
self._tune_line_edit = Qt.QLineEdit(str(self.tune))
self._tune_tool_bar.addWidget(self._tune_line_edit)
self._tune_line_edit.returnPressed.connect(
lambda: self.set_tune(eng_notation.str_to_num(str(self._tune_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._tune_tool_bar, 8, 2, 1, 2)
for r in range(8, 9):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self._samp_rate_tool_bar = Qt.QToolBar(self)
self._samp_rate_tool_bar.addWidget(Qt.QLabel("samp_rate"+": "))
self._samp_rate_line_edit = Qt.QLineEdit(str(self.samp_rate))
self._samp_rate_tool_bar.addWidget(self._samp_rate_line_edit)
self._samp_rate_line_edit.returnPressed.connect(
lambda: self.set_samp_rate(eng_notation.str_to_num(str(self._samp_rate_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._samp_rate_tool_bar, 9, 2, 1, 2)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(2, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self._avg_len_tool_bar = Qt.QToolBar(self)
self._avg_len_tool_bar.addWidget(Qt.QLabel("avg_len"+": "))
self._avg_len_line_edit = Qt.QLineEdit(str(self.avg_len))
self._avg_len_tool_bar.addWidget(self._avg_len_line_edit)
self._avg_len_line_edit.returnPressed.connect(
lambda: self.set_avg_len(eng_notation.str_to_num(str(self._avg_len_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._avg_len_tool_bar, 8, 4, 1, 2)
for r in range(8, 9):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 6):
self.top_grid_layout.setColumnStretch(c, 1)
self.sigmf_source_0 = sigmf.source('/captures/dtv/DTV_2019-02-20T18:06:10.sigmf-data', "cf32" + ("_le" if sys.byteorder == "little" else "_be"), True)
self._rx_gain_tool_bar = Qt.QToolBar(self)
self._rx_gain_tool_bar.addWidget(Qt.QLabel("rx_gain"+": "))
self._rx_gain_line_edit = Qt.QLineEdit(str(self.rx_gain))
self._rx_gain_tool_bar.addWidget(self._rx_gain_line_edit)
self._rx_gain_line_edit.returnPressed.connect(
lambda: self.set_rx_gain(eng_notation.str_to_num(str(self._rx_gain_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._rx_gain_tool_bar, 8, 0, 1, 2)
for r in range(8, 9):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self._rx_freq_tool_bar = Qt.QToolBar(self)
self._rx_freq_tool_bar.addWidget(Qt.QLabel("rx_freq"+": "))
self._rx_freq_line_edit = Qt.QLineEdit(str(self.rx_freq))
self._rx_freq_tool_bar.addWidget(self._rx_freq_line_edit)
self._rx_freq_line_edit.returnPressed.connect(
lambda: self.set_rx_freq(eng_notation.str_to_num(str(self._rx_freq_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._rx_freq_tool_bar, 9, 0, 1, 2)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_1 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"DTV", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_1.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_1.enable_grid(True)
self.qtgui_waterfall_sink_x_0_1.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_1.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_1.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [1, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_1.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_1.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_1.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_1.set_intensity_range(-140, -40)
self._qtgui_waterfall_sink_x_0_1_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_1.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_1_win, 2, 0, 2, 8)
for r in range(2, 4):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_vector_sink_f_0 = qtgui.vector_sink_f(
nfft,
0,
1.0,
"x-Axis",
"y-Axis",
"",
1 # Number of inputs
)
self.qtgui_vector_sink_f_0.set_update_time(0.010)
self.qtgui_vector_sink_f_0.set_y_axis(-140, 10)
self.qtgui_vector_sink_f_0.enable_autoscale(False)
self.qtgui_vector_sink_f_0.enable_grid(True)
self.qtgui_vector_sink_f_0.set_x_axis_units("")
self.qtgui_vector_sink_f_0.set_y_axis_units("")
self.qtgui_vector_sink_f_0.set_ref_level(-40)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_vector_sink_f_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_vector_sink_f_0.set_line_label(i, labels[i])
self.qtgui_vector_sink_f_0.set_line_width(i, widths[i])
self.qtgui_vector_sink_f_0.set_line_color(i, colors[i])
self.qtgui_vector_sink_f_0.set_line_alpha(i, alphas[i])
self._qtgui_vector_sink_f_0_win = sip.wrapinstance(self.qtgui_vector_sink_f_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_vector_sink_f_0_win, 10, 0, 4, 8)
for r in range(10, 14):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"DTV", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.01)
self.qtgui_freq_sink_x_0.set_y_axis(-140, -40)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(True)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["red", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_win, 0, 0, 2, 8)
for r in range(0, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.fft_vxx_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 4)
self.blocks_vector_to_stream_0 = blocks.vector_to_stream(gr.sizeof_float*1, nfft)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_tagged_stream_to_pdu_0 = blocks.tagged_stream_to_pdu(blocks.float_t, 'fft')
self.blocks_stream_to_vector_1 = blocks.stream_to_vector(gr.sizeof_float*1, nfft)
self.blocks_stream_to_vector_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_stream_to_tagged_stream_0 = blocks.stream_to_tagged_stream(gr.sizeof_float, 1, nfft, "fft")
self.blocks_socket_pdu_0 = blocks.socket_pdu("TCP_SERVER", '0.0.0.0', '52001', 10000, True)
self.blocks_nlog10_ff_0 = blocks.nlog10_ff(10, nfft, -10*math.log10(nfft))
self.blocks_multiply_xx_0_0 = blocks.multiply_vcc(1)
self.blocks_moving_average_xx_0 = blocks.moving_average_ff(int(avg_len), 1/(avg_len)/nfft, 4000, nfft)
self.blocks_message_strobe_0 = blocks.message_strobe(pmt.intern("fft"), 500)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(nfft)
self.analog_sig_source_x_0_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, tune, 1, 0)
##################################################
# Connections
##################################################
self.msg_connect((self.blocks_message_strobe_0, 'strobe'), (self.blocks_socket_pdu_0, 'pdus'))
self.msg_connect((self.blocks_tagged_stream_to_pdu_0, 'pdus'), (self.blocks_message_strobe_0, 'set_msg'))
self.connect((self.analog_sig_source_x_0_0, 0), (self.blocks_multiply_xx_0_0, 1))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.blocks_moving_average_xx_0, 0))
self.connect((self.blocks_moving_average_xx_0, 0), (self.blocks_nlog10_ff_0, 0))
self.connect((self.blocks_multiply_xx_0_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_nlog10_ff_0, 0), (self.blocks_vector_to_stream_0, 0))
self.connect((self.blocks_stream_to_tagged_stream_0, 0), (self.blocks_tagged_stream_to_pdu_0, 0))
self.connect((self.blocks_stream_to_vector_0, 0), (self.fft_vxx_0, 0))
self.connect((self.blocks_stream_to_vector_1, 0), (self.qtgui_vector_sink_f_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.blocks_stream_to_vector_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.qtgui_waterfall_sink_x_0_1, 0))
self.connect((self.blocks_vector_to_stream_0, 0), (self.blocks_stream_to_tagged_stream_0, 0))
self.connect((self.blocks_vector_to_stream_0, 0), (self.blocks_stream_to_vector_1, 0))
self.connect((self.fft_vxx_0, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.sigmf_source_0, 0), (self.blocks_multiply_xx_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "dtv_sigmf_playback")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_mod_order(self):
return self.mod_order
def set_mod_order(self, mod_order):
self.mod_order = mod_order
def get_mod_scheme(self):
return self.mod_scheme
def set_mod_scheme(self, mod_scheme):
self.mod_scheme = mod_scheme
def get_rx_ant_model(self):
return self.rx_ant_model
def set_rx_ant_model(self, rx_ant_model):
self.rx_ant_model = rx_ant_model
def get_rx_db_ser(self):
return self.rx_db_ser
def set_rx_db_ser(self, rx_db_ser):
self.rx_db_ser = rx_db_ser
def get_rx_db_type(self):
return self.rx_db_type
def set_rx_db_type(self, rx_db_type):
self.rx_db_type = rx_db_type
def get_rx_ser_tag(self):
return self.rx_ser_tag
def set_rx_ser_tag(self, rx_ser_tag):
self.rx_ser_tag = rx_ser_tag
def get_rx_ser_uhd(self):
return self.rx_ser_uhd
def set_rx_ser_uhd(self, rx_ser_uhd):
self.rx_ser_uhd = rx_ser_uhd
def get_rx_type(self):
return self.rx_type
def set_rx_type(self, rx_type):
self.rx_type = rx_type
def get_signal_name(self):
return self.signal_name
def set_signal_name(self, signal_name):
self.signal_name = signal_name
self.set_fn("{:s}_{:s}".format(self.signal_name, self.ts_str))
def get_symbol_rate(self):
return self.symbol_rate
def set_symbol_rate(self, symbol_rate):
self.symbol_rate = symbol_rate
def get_ts_str(self):
return self.ts_str
def set_ts_str(self, ts_str):
self.ts_str = ts_str
self.set_fn("{:s}_{:s}".format(self.signal_name, self.ts_str))
def get_fn(self):
return self.fn
def set_fn(self, fn):
self.fn = fn
self.set_fp("/captures/dtv/{:s}".format(self.fn))
def get_tune(self):
return self.tune
def set_tune(self, tune):
self.tune = tune
Qt.QMetaObject.invokeMethod(self._tune_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.tune)))
self.analog_sig_source_x_0_0.set_frequency(self.tune)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
Qt.QMetaObject.invokeMethod(self._samp_rate_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.samp_rate)))
self.qtgui_waterfall_sink_x_0_1.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0_0.set_sampling_freq(self.samp_rate)
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
Qt.QMetaObject.invokeMethod(self._rx_gain_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.rx_gain)))
def get_rx_freq(self):
return self.rx_freq
def set_rx_freq(self, rx_freq):
self.rx_freq = rx_freq
Qt.QMetaObject.invokeMethod(self._rx_freq_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.rx_freq)))
def get_nfft(self):
return self.nfft
def set_nfft(self, nfft):
self.nfft = nfft
self.blocks_stream_to_tagged_stream_0.set_packet_len(self.nfft)
self.blocks_stream_to_tagged_stream_0.set_packet_len_pmt(self.nfft)
self.blocks_moving_average_xx_0.set_length_and_scale(int(self.avg_len), 1/(self.avg_len)/self.nfft)
def get_fp(self):
return self.fp
def set_fp(self, fp):
self.fp = fp
def get_avg_len(self):
return self.avg_len
def set_avg_len(self, avg_len):
self.avg_len = avg_len
Qt.QMetaObject.invokeMethod(self._avg_len_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.avg_len)))
self.blocks_moving_average_xx_0.set_length_and_scale(int(self.avg_len), 1/(self.avg_len)/self.nfft)
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"", "--mod-order", dest="mod_order", type="intx", default=8,
help="Set mod_order [default=%default]")
parser.add_option(
"", "--mod-scheme", dest="mod_scheme", type="string", default='ATSC_8VSB',
help="Set mod_scheme [default=%default]")
parser.add_option(
"", "--rx-ant-model", dest="rx_ant_model", type="string", default='Decotec Tape Measure Discone',
help="Set rx_ant_model [default=%default]")
parser.add_option(
"", "--rx-db-ser", dest="rx_db_ser", type="string", default='na',
help="Set rx_db_ser [default=%default]")
parser.add_option(
"", "--rx-db-type", dest="rx_db_type", type="string", default='na',
help="Set rx_db_type [default=%default]")
parser.add_option(
"", "--rx-ser-tag", dest="rx_ser_tag", type="string", default='F50030',
help="Set rx_ser_tag [default=%default]")
parser.add_option(
"", "--rx-ser-uhd", dest="rx_ser_uhd", type="string", default='F50030',
help="Set rx_ser_uhd [default=%default]")
parser.add_option(
"", "--rx-type", dest="rx_type", type="string", default='B210',
help="Set rx_type [default=%default]")
parser.add_option(
"", "--signal-name", dest="signal_name", type="string", default='DTV',
help="Set signal_name [default=%default]")
parser.add_option(
"", "--symbol-rate", dest="symbol_rate", type="eng_float", default=eng_notation.num_to_str(10.76e6),
help="Set symbol_rate [default=%default]")
return parser
def main(top_block_cls=dtv_sigmf_playback, options=None):
if options is None:
options, _ = argument_parser().parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
| |
import numpy as np
class lattice():
"""Contains functions to help out calculate matrices
in the Fermi-Hubbard model"""
def __init__(self, xs,ys,zs):
'''The dimensions of the grid are given to initialize the lattice.
Recommended max of 4 sites, otherwise it can take too long to
complete.'''
# x, y, and z have the shape of the grid, and contain the
# respective (x,y,z) coordinates of the latttic sites:
self.x, self.y, self.z = np.mgrid[ 0:xs, 0:ys, 0:zs]
self.xs = xs
self.ys = ys
self.zs = zs
def show(self,spins):
''' This prints a particular state to the terminal'''
for i in np.ravel(spins):
print "%d "%i,
print
def state(self,m):
'''
# Each site can have 4 possible configurations, we have
# labeled them as follows:
#
# 0 = vacuum
# 1 = spin up
# 2 = spin down
# 3 = doubly occupied
#
# All possible states are numbered with an index m. This function
# constructs the m_th state in the lattice. The spin configuration of
# the m_th state is stored in the 'spins' matrix and returned.
#
# Since there are 4 possible states per site (see above) the
# convention is that m be represented in base-4 (quaternary) and
# each digit can be assigned using the 0,1,2,3 convention above.
#
'''
spins = np.zeros_like( self.x)
i = 0
end = False
while m > 0:
if i>=spins.size:
end =True
break
spins.flat[i] = (m%4)
m = m /4
i = i +1
if end:
return None
else:
return spins
def sector(self):
# Finds the spin sector for the current state
s = 0
for i in self.spins.flat:
if i == 0 : s = s+0
elif i == 1 : s = s+1
elif i == 2 : s = s-1
elif i == 3 : s = s+0
return s
def filling(self):
# Finds the filling for the current state
f = 0
for i in self.spins.flat:
if i == 0 : f = f+0
elif i == 1 : f = f+1
elif i == 2 : f = f+1
elif i == 3 : f = f+2
return f
def defstates(self):
'''This function defines the half filling states of the
Fermi-Hubbard model in a 3D lattice.
It creates a dictionary where the keys correspond to the
different spin sectors available, and the values are a list
of the states in the spin sector.
For a balanced spin mixture one only needs to consider the
spin=0 sector.
'''
end = False
n = 0
self.states = {}
while n < 300:
self.spins = self.state(n)
# ATTENTION: in this code we have changed to HALF-FILLING to
# QUARTER-FILLING, in order to explore the 2x2 lattice with only
# 2 particles. We use /2 in the filling check:
if self.spins is not None and self.filling() == self.spins.size/2:
sec = self.sector()
if sec in self.states.keys():
self.states[ sec].append(self.spins)
else:
self.states[ sec]=[self.spins]
n = n+1
for k in self.states.keys():
print "Sector %d, %d states:"%(k,len(self.states[k]))
for spins in self.states[k]:
self.show(spins)
def nearest(self):
'''This function makes a list of the nearest neighbor
pairs in the lattice'''
print "\nNearest neighbors:"
# First we create a flat list of all the lattice sites.
# each element in the list is (x[i], y[i], z[i], i)
sites = []
for i in range(self.x.size):
sites.append( (self.x.flat[i], self.y.flat[i], self.z.flat[i], i))
# We do a nested iteration over the lists and create a list
# of pairs which are nearest neighbors.
neighbors = []
for i,s1 in enumerate(sites):
for j,s2 in enumerate(sites):
if j > i:
d2 = (s1[0]-s2[0])**2 + (s1[1]-s2[1])**2 + (s1[2]-s2[2])**2
print s1,"--",s2," = ",d2
if d2 == 1:
neighbors.append( (s1[3],s2[3]))
print
print "Final neighbor list: "
print neighbors
self.neighbors = neighbors
def kinetic0(self):
r'''This function calculates the kinetic energy matrix
in the spin=0 sector.
The matrix is constructed by iterating over the nearest neighbors.
As a reminder, the kinertic enrgy is given by
K = -t \sum_{\langle i j \rangle} a_{i\sigma}^{\dagger} a_{j\sigma}
So in order to find it's matrix elements we need to apply first an
annihilation operator and then a creation operator. The tricky part
is keeping track of the signs.
'''
print
msize = len(self.states[0])
kinetic = np.zeros((msize,msize))
for i,s1 in enumerate(self.states[0]):
for j,s2 in enumerate(self.states[0]):
# We will calculate the matrix element
# < s1 | K | s2 >
# This matrix element involves a sum over nearest neighbors
# and sum over spins, so we go ahead and iterate:
t = 0.
for n in self.neighbors:
PRINT = False
for spin in ['up','down']:
if PRINT:
print
print "<", np.ravel(s1)," | K | ", np.ravel(s2),">"
# Annihilates 'spin' at site n[0]
signA, stateA = annihilate( n[0], spin, s2)
# Create 'spin' at site n[1]
signC, stateC = create(n[1], spin, stateA)
if PRINT:
print "annihilate %d,%5s"%(n[0],spin)," -->",stateA
print " create %d,%5s"%(n[1],spin)," -->",stateC
# If K|s2> has a projecton on <s1| then we add it to
# t
if np.array_equal(stateC,np.ravel(s1)):
if PRINT: print " tmatrix --> % d" % (signA*signC )
t+= signA*signC
r'''
Notice that sometimes people write the kinetic energy as
K = -t \sum_{\langle i j \rangle}
a_{i\sigma}^{\dagger} a_{j\sigma} + c.c.
where the letters c.c. refer to the complex conjugate.
If they do that, then it means that the sum over nearest
neighbors must only occur for one ordering of the
neighbor pair, for instance just 1-2 whereas the sum
over both orderings includes 1-2 and 2-1.
Here we just run the sum over both orderings.
'''
# We repeat the process with the different neighbor
# ordering:
signA, stateA = annihilate( n[1], spin, s2)
signC, stateC = create(n[0], spin, stateA)
if PRINT:
print "annihilate %d,%5s"%(n[1],spin)," -->",stateA
print " create %d,%5s"%(n[0],spin)," -->",stateC
if np.array_equal(stateC,np.ravel(s1)):
if PRINT: print " tmatrix --> % d" % (signA*signC )
t+= signA*signC
kinetic[i,j] = t
print "\nKinetic energy matrix: ",kinetic.shape
print kinetic
self.kinetic = kinetic
def interaction0(self):
'''This fuction calculates the interaction energy matrix
in the spin=0 sector'''
print
msize = len(self.states[0])
inter = np.zeros((msize,msize))
# The basis we have chose is of number states,
# so the interaction energy is diagonal
for i,s1 in enumerate(self.states[0]):
for site in s1.flat:
if site == 3: # 3=double occupancy
inter[i,i] = inter[i,i] + 1
print "\nInteraction energy matrix:i ",inter.shape
print inter
self.inter = inter
def diagonal0(self):
'''This fuction calculates a diagonal matrix
in the spin=0 sector'''
print
msize = len(self.states[0])
diag = np.zeros((msize,msize))
# The basis we have chose is of number states,
# so the interaction energy is diagonal
for i,s1 in enumerate(self.states[0]):
for site in s1.flat:
diag[i,i] = 1.0
self.diag = diag
def annihilate( i, spin, state):
# The order for the creation operators is lower site number
# to the left, and then spin-up to the left
s = np.ravel(state)
out = np.copy(s)
samespin = {'up':1, 'down':2}
flipspin = {'up':2, 'down':1}
ncommute = 0.
for j in range(i):
if s[j] == 3: ncommute +=2
if s[j] == 1 or s[j] == 2: ncommute+=1
sign = (-1)**ncommute
if s[i] == 0:
out = np.zeros_like(s)
if s[i] == flipspin[spin]:
out = np.zeros_like(s)
if s[i] == 3:
out[i] = flipspin[spin]
if spin == 'up': sign*= 1
if spin == 'down': sign*=-1
if s[i] == samespin[spin]:
out[i] = 0
#print s, ", annihilate %d,%5s"%(i,spin)," --> %+d"%sign, out
return sign, out
| |
= int(floor(p[0]/self.ScaleFactor)),int(floor(p[1]/self.ScaleFactor))
w,h = self.Image.shape[-2:]
if x >= 0 and x < w and y >= 0 and y < h:
if self.Image.ndim == 2: count = self.Image[x,y]
elif self.Image.ndim == 3: count = self.Image[:,x,y]
self.SetStatusText("(%d,%d) count %s" % (x,y,count))
else: self.SetStatusText("")
if self.scale != None:
p1,p2 = self.pixel(self.scale[0]),self.pixel(self.scale[1])
else: p1,p2 = ((-100,-100),(-100,-100))
if self.MoveCrosshair:
if event.LeftDown():
self.SetFocus()
self.set_crosshair(event)
self.CaptureMouse()
self.dragging = "crosshair"
self.Refresh()
elif event.Dragging() and self.dragging:
self.set_crosshair(event)
self.Refresh()
elif event.LeftUp() and self.dragging:
self.ReleaseMouse()
self.dragging = None
self.Refresh()
elif self.show_scale or self.tool == "measure":
if event.LeftDown():
if self.tool == "measure":
P = self.point(p)
self.scale = [P,P]
self.show_scale = True
self.dragging = "scale2"
self.scale_selected = False
else:
if point_line_distance(p,(p1,p2)) < 5: self.scale_selected = True
else: self.scale_selected = False
if point_line_distance(p,(p1,p2)) < 5:
self.dragging = (self.point(p),list(self.scale))
if distance(p1,p) < 5: self.dragging = "scale1"
if distance(p2,p) < 5: self.dragging = "scale2"
if self.dragging:
self.SetFocus()
self.set_scale(event)
self.CaptureMouse()
self.Refresh()
elif event.Dragging() and self.dragging:
self.set_scale(event)
self.Refresh()
elif event.LeftUp() and self.dragging:
self.ReleaseMouse()
self.dragging = None
self.Refresh()
# Update the pointer shape to reflect the mouse function.
if self.MoveCrosshair:
self.SetCursor (wx.StockCursor(wx.CURSOR_PENCIL))
#self.SetCursor (self.crosshair_cursor) # garbled under Linux
# CURSOR_CROSS would be better than CURSOR_PENCIL.
# However, under Windows, the cross cursor does not have a white
# border and is hard to see on black background.
elif self.tool == "measure":
self.SetCursor (wx.StockCursor(wx.CURSOR_PENCIL))
elif self.dragging == "scale1" or self.dragging == "scale2":
self.SetCursor (wx.StockCursor(wx.CURSOR_SIZENESW))
elif self.dragging: self.SetCursor(wx.StockCursor(wx.CURSOR_SIZING))
elif self.scale_selected and (distance(p1,p) < 5 or distance(p2,p) < 5):
self.SetCursor(wx.StockCursor(wx.CURSOR_SIZENESW))
elif point_line_distance(p,(p1,p2)) < 5:
self.SetCursor(wx.StockCursor(wx.CURSOR_SIZING))
else: self.SetCursor (wx.StockCursor(wx.CURSOR_DEFAULT))
# CURSOR_SIZENESW would be better when the pointer is hovering over
# the and of the end point.
# However, under Linux, the pointer shape does not update
# to CURSOR_PENCIL while dragging, only after the mouse button is
# released.
# CURSOR_CROSS would be better than CURSOR_PENCIL.
# However, under Windows, the cross cursor does not have a white
# border and is hard to see on black background.
def set_crosshair (self,event):
"Updates the crosshair position based on the last mouse event"
x,y = self.cursor_pos(event)
self.crosshair = (int(round(x/self.ScaleFactor)),int(round(y/self.ScaleFactor)))
def set_scale (self,event):
"Updates the scale based on the last mouse event"
p = self.cursor_pos(event)
if self.dragging == "scale1": self.scale[0] = self.point(p)
elif self.dragging == "scale2": self.scale[1] = self.point(p)
else:
P = self.point(p)
P0,(P1,P2) = self.dragging
self.scale[0] = translate(P1,vector(P0,P))
self.scale[1] = translate(P2,vector(P0,P))
def cursor_pos (self,event):
"""cursor position (x,y) during the given event, in virtual pixel
coordinates, relative to the top left corner of the image, in units
of screen pixels (not image pixels).
"""
x,y = self.CalcUnscrolledPosition (event.GetX(),event.GetY())
ox,oy = self.origin()
return x-ox,y-oy
def OnContextMenu (self,event):
menu = wx.Menu()
menu.Append (10,"Show Mask","",wx.ITEM_CHECK)
if self.show_mask: menu.Check(10,True)
self.Bind (wx.EVT_MENU,self.OnShowMask,id=10)
menu.Append (1,"Show Scale","",wx.ITEM_CHECK)
if self.show_scale: menu.Check(1,True)
self.Bind (wx.EVT_MENU,self.OnShowScale,id=1)
menu.Append (2,"Show Box","",wx.ITEM_CHECK)
if self.show_box: menu.Check(2,True)
self.Bind (wx.EVT_MENU,self.OnShowBox,id=2)
menu.Append (6,"Show Crosshair","",wx.ITEM_CHECK)
if self.show_crosshair: menu.Check(6,True)
self.Bind (wx.EVT_MENU,self.OnShowCrosshair,id=6)
menu.AppendSeparator()
menu.Append (7,"Measure","",wx.ITEM_CHECK)
self.Bind (wx.EVT_MENU,self.OnMeasure,id=7)
if self.tool == "measure": menu.Check(7,True)
menu.AppendSeparator()
if self.show_scale: menu.Append (8,"Scale...","")
self.Bind (wx.EVT_MENU,self.OnScaleProperties,id=8)
if self.show_crosshair: menu.Append (4,"Crosshair...","")
self.Bind (wx.EVT_MENU,self.OnCrosshairProperties,id=4)
if self.show_box: menu.Append (5,"Box...","")
self.Bind (wx.EVT_MENU,self.OnBoxProperties,id=5)
# Display the menu. If an item is selected then its handler will
# be called before 'PopupMenu' returns.
self.PopupMenu(menu)
menu.Destroy()
def OnShowMask (self,event):
"Called if 'Show Scale' is selected from the context menu"
self.show_mask = not self.show_mask
self.Refresh()
def OnShowScale (self,event):
"Called if 'Show Scale' is selected from the context menu"
self.show_scale = not self.show_scale
if self.show_scale and self.scale == None: self.set_default_scale()
self.Refresh()
def set_default_scale(self):
"Set default position for scale"
w,h = self.ImageSize; x,y = self.ImageOrigin
l = 0.4*w; l = round(l,int(round(-log10(l)+0.5)))
self.scale = [(x+w*0.5-l/2,y+h*0.05),(x+w*0.5+l/2,y+h*0.05)]
def OnShowBox (self,event):
"Called if 'Show Box' is selected from the context menu"
self.show_box = not self.show_box
self.Refresh()
def OnShowCrosshair (self,event):
"Called if 'Show Crosshair' is selected from the context menu"
self.show_crosshair = not self.show_crosshair
self.Refresh()
def GetMoveCrosshair (self): return (self.tool == "move crosshair")
def SetMoveCrosshair (self,value):
if value == True: self.tool = "move crosshair"
else: self.tool = None
MoveCrosshair = property(GetMoveCrosshair,SetMoveCrosshair,doc=
"Determines whether the crosshair is movable or locked")
def OnMeasure (self,event):
"Called if 'Measure' is selected from the context menu"
if self.tool != "measure": self.tool = "measure"
else: self.tool = None
def OnScaleProperties (self,event):
dlg = ScaleProperties(self)
dlg.CenterOnParent()
pos = dlg.GetPosition(); pos.y += 100; dlg.SetPosition(pos)
dlg.Show()
def OnCrosshairProperties (self,event):
dlg = CrosshairProperties(self)
dlg.CenterOnParent()
pos = dlg.GetPosition(); pos.y += 100; dlg.SetPosition(pos)
dlg.Show()
def OnBoxProperties (self,event):
dlg = BoxProperties(self)
dlg.CenterOnParent()
pos = dlg.GetPosition(); pos.y += 100; dlg.SetPosition(pos)
dlg.Show()
class CrosshairProperties (wx.Dialog):
"""Allows the user to to read the cross position, enter the position
numerically and change its color."""
def __init__ (self,parent):
wx.Dialog.__init__(self,parent,-1,"Crosshair")
# Controls
self.Coordinates = wx.TextCtrl (self,size=(75,-1),
style=wx.TE_PROCESS_ENTER)
self.Bind (wx.EVT_TEXT_ENTER,self.OnEnterCoordinates,self.Coordinates)
self.Coordinates.SetValue("%d,%d" % parent.Crosshair)
self.Movable = wx.CheckBox(self,label="Movable")
self.Bind (wx.EVT_CHECKBOX,self.OnMovable,self.Movable)
if parent.MoveCrosshair: self.Movable.SetValue(True)
self.CrosshairSize = wx.TextCtrl (self,size=(75,-1),
style=wx.TE_PROCESS_ENTER)
self.Bind (wx.EVT_TEXT_ENTER,self.OnEnterCrosshairSize,
self.CrosshairSize)
self.CrosshairSize.SetValue("%.3f,%.3f" % parent.crosshair_size)
self.ShowCrosshair = wx.CheckBox(self,label="Show")
self.Bind (wx.EVT_CHECKBOX,self.OnShowCrosshair,self.ShowCrosshair)
if parent.show_crosshair: self.ShowCrosshair.SetValue(True)
h = self.Coordinates.GetSize().y
from wx.lib.colourselect import ColourSelect,EVT_COLOURSELECT
self.Color = ColourSelect (self,colour=parent.crosshair_color,size=(h,h))
self.Color.Bind (EVT_COLOURSELECT,self.OnSelectColour)
# Layout
layout = wx.FlexGridSizer (cols=3,hgap=5,vgap=5)
label = wx.StaticText (self,label="Position (x,y) [pixels]:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.Coordinates,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.Movable,flag=wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText (self,label="Size (w,h) [mm]:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.CrosshairSize,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.ShowCrosshair,flag=wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText (self,label="Line color:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.Color,flag=wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(layout)
self.Fit()
self.Bind (wx.EVT_CLOSE,self.OnClose)
def OnEnterCoordinates(self,event):
text = self.Coordinates.GetValue()
try:
(tx,ty) = text.split(",")
self.GetParent().Crosshair = (float(tx),float(ty))
except ValueError: return
def OnMovable(self,event):
self.GetParent().MoveCrosshair = self.Movable.GetValue()
def OnEnterCrosshairSize(self,event):
text = self.CrosshairSize.GetValue()
try:
(tx,ty) = text.split(",")
self.GetParent().crosshair_size = (float(tx),float(ty))
except ValueError: return
self.GetParent().Refresh()
def OnShowCrosshair(self,event):
self.GetParent().show_crosshair = self.ShowCrosshair.GetValue()
self.GetParent().Refresh()
def OnSelectColour(self,event):
self.GetParent().crosshair_color = event.GetValue()
self.GetParent().Refresh()
def OnClose(self,event):
"""Called when the close button is clocked.
When the dialog is closed automatically lock the crosshair."""
self.GetParent().MoveCrosshair = False
self.Destroy()
class BoxProperties (wx.Dialog):
"""Allows the user to change the box size and color"""
def __init__ (self,parent):
wx.Dialog.__init__(self,parent,-1,"Box")
# Controls
self.BoxSize = wx.TextCtrl (self,size=(75,-1),
style=wx.TE_PROCESS_ENTER)
self.Bind (wx.EVT_TEXT_ENTER,self.OnEnterBoxSize,self.BoxSize)
self.BoxSize.SetValue("%.3f,%.3f" % parent.boxsize)
self.ShowBox = wx.CheckBox(self,label="Show")
self.Bind (wx.EVT_CHECKBOX,self.OnShowBox,self.ShowBox)
if parent.show_box: self.ShowBox.SetValue(True)
h = self.BoxSize.GetSize().y
from wx.lib.colourselect import ColourSelect,EVT_COLOURSELECT
self.Color = ColourSelect (self,colour=parent.box_color,size=(h,h))
self.Color.Bind (EVT_COLOURSELECT,self.OnSelectColour)
# Layout
layout = wx.FlexGridSizer (cols=3,hgap=5,vgap=5)
label = wx.StaticText (self,label="Width,Height [mm]:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.BoxSize,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.ShowBox,flag=wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText (self,label="Line color:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.Color,flag=wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(layout)
self.Fit()
def OnEnterBoxSize(self,event):
text = self.BoxSize.GetValue()
try:
(tx,ty) = text.split(",")
self.GetParent().boxsize = (float(tx),float(ty))
except ValueError: return
self.GetParent().Refresh()
def OnShowBox(self,event):
self.GetParent().show_box = self.ShowBox.GetValue()
self.GetParent().Refresh()
def OnSelectColour(self,event):
self.GetParent().box_color = event.GetValue()
self.GetParent().Refresh()
class ScaleProperties (wx.Dialog):
"""Allows the user to enter the length of the measurement scale numerically,
make the line exactly horizonal or vertical and change its color.
"""
def __init__ (self,parent):
wx.Dialog.__init__(self,parent,-1,"Scale")
# Controls
self.Length = wx.TextCtrl (self,size=(60,-1),style=wx.TE_PROCESS_ENTER)
self.Bind (wx.EVT_TEXT_ENTER,self.OnEnterLength,self.Length)
(P1,P2) = parent.scale; length = distance(P1,P2)
self.Length.SetValue("%.3f" % length)
self.Pixelsize = wx.TextCtrl (self,size=(60,-1),
style=wx.TE_PROCESS_ENTER)
self.Bind (wx.EVT_TEXT_ENTER,self.OnEnterPixelsize,self.Pixelsize)
self.Pixelsize.SetValue("%.3f" % parent.pixelsize)
self.Horizontal = wx.CheckBox (self,label="Horizontal")
self.Bind (wx.EVT_CHECKBOX,self.OnHorizontal,self.Horizontal)
self.Vertical = wx.CheckBox (self,label="Vertical")
self.Bind (wx.EVT_CHECKBOX,self.OnVertical,self.Vertical)
v = vector(P1,P2)
if v[1] == 0: self.Horizontal.SetValue(True)
if v[0] == 0: self.Vertical.SetValue(True)
h = self.Length.GetSize().y
from wx.lib.colourselect import ColourSelect,EVT_COLOURSELECT
self.Color = ColourSelect (self,-1,"",parent.scale_color,size=(h,h))
self.Color.Bind (EVT_COLOURSELECT,self.OnSelectColour)
# Layout
layout = wx.FlexGridSizer (cols=2,hgap=5,vgap=5)
label = wx.StaticText (self,label="Length ["+parent.ScaleUnit+"]:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.Length,flag=wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText (self,label="Pixel size [mm]:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.Pixelsize,flag=wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText (self,label="Direction:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
group = wx.BoxSizer()
group.Add (self.Horizontal)
group.AddSpacer((5,5))
group.Add (self.Vertical)
layout.Add (group)
label = wx.StaticText (self,label="Line color:")
layout.Add (label,flag=wx.ALIGN_CENTER_VERTICAL)
layout.Add (self.Color,flag=wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(layout)
self.Fit()
def OnEnterLength(self,event):
text = self.Length.GetValue()
try: length = float(text)
except ValueError: return
parent = self.GetParent()
(P1,P2) = parent.scale
P2 = translate(P1,scale(direction(vector(P1,P2)),length))
parent.scale = [P1,P2]
parent.Refresh()
def OnEnterPixelsize(self,event):
text = self.Pixelsize.Value
try: value = float(text)
except ValueError: self.Pixelsize.Value = "1.000"; return
parent = self.Parent
parent.pixelsize = value
parent.Refresh()
def OnHorizontal(self,event):
self.Horizontal.SetValue(True); self.Vertical.SetValue(False)
parent = self.GetParent()
(P1,P2) = parent.scale; length = distance(P1,P2)
P2 = translate(P1,(length,0))
parent.scale = [P1,P2]
parent.Refresh()
def OnVertical(self,event):
self.Horizontal.SetValue(False); self.Vertical.SetValue(True)
parent = self.GetParent()
(P1,P2) = parent.scale; length = distance(P1,P2)
P2 = translate(P1,(0,length))
parent.scale = [P1,P2]
parent.Refresh()
def OnSelectColour(self,event):
self.GetParent().scale_color = event.GetValue()
self.GetParent().Refresh()
def distance ((x1,y1),(x2,y2)):
"Distance between two points"
return sqrt((x2-x1)**2+(y2-y1)**2)
def point_line_distance (P,line):
"Distance of a point to | |
not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.var, **kwargs)
return self._full_axis_reduce(axis, func)
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the front end will handle.
def _full_axis_reduce_along_select_indices(self, func, axis, index):
"""Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces the dimension of the object and requires full
knowledge of the entire axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting QueryCompiler.
Returns:
A new QueryCompiler object with index or BaseFrameManager object.
"""
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = [i for i, name in enumerate(old_index) if name in index]
result = self.data.apply_func_to_select_indices_along_full_axis(
axis, func, numeric_indices
)
return result
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
new_columns = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
.columns
)
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def _map_across_full_axis(self, axis, func):
return self.data.map_across_full_axis(axis, func)
def _cumulative_builder(self, func, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(func, **kwargs)
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(
new_data, self.index, self.columns, self._dtype_cache
)
def cummax(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cummax(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cummax, **kwargs)
def cummin(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cummin(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cummin, **kwargs)
def cumsum(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cumsum(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cumsum, **kwargs)
def cumprod(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cumprod(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cumprod, **kwargs)
def diff(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().diff(**kwargs).transpose()
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.diff, **kwargs)
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
"""
columns = self.index if self._is_transposed else self.columns
index = self.columns if self._is_transposed else self.index
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
columns_copy = pandas.DataFrame(columns=self.columns)
columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)
expect_series = isinstance(columns_copy, pandas.Series)
def eval_builder(df, **kwargs):
# pop the `axis` parameter because it was needed to build the mapreduce
# function but it is not a parameter used by `eval`.
kwargs.pop("axis", None)
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
return result
func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs)
new_data = self._map_across_full_axis(1, func)
if expect_series:
new_columns = [columns_copy.name]
new_index = index
else:
new_columns = columns_copy.columns
new_index = self.index
return self.__constructor__(new_data, new_index, new_columns)
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = pandas.concat([result, append_values], ignore_index=True)
elif axis and len(df.columns) != len(result.columns):
# Pad rows
append_vals = pandas.DataFrame(
columns=range(len(result.columns), len(df.columns)),
index=result.index,
)
result = pandas.concat([result, append_vals], axis=1)
return pandas.DataFrame(result)
func = self._prepare_method(mode_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index
new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns))
new_dtypes = self._dtype_cache
if new_dtypes is not None:
new_dtypes.index = new_columns
return self.__constructor__(
new_data, new_index, new_columns, new_dtypes
).dropna(axis=axis, how="all")
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
axis = kwargs.get("axis", 0)
value = kwargs.get("value")
method = kwargs.get("method", None)
limit = kwargs.get("limit", None)
full_axis = method is not None or limit is not None
if isinstance(value, dict):
value = kwargs.pop("value")
if axis == 0:
index = self.columns
else:
index = self.index
value = {
idx: value[key] for key in value for idx in index.get_indexer_for([key])
}
def fillna_dict_builder(df, func_dict={}):
# We do this to ensure that no matter the state of the columns we get
# the correct ones.
func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict}
return df.fillna(value=func_dict, **kwargs)
if full_axis:
new_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, fillna_dict_builder, value, keep_remaining=True
)
else:
new_data = self.data.apply_func_to_select_indices(
axis, fillna_dict_builder, value, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
else:
func = self._prepare_method(pandas.DataFrame.fillna, **kwargs)
if full_axis:
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self._map_partitions(func)
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
QueryCompiler containing quantiles of original QueryCompiler along an axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_list_of_values(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self.numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))
]
if axis:
# If along rows, then drop the nonnumeric columns, record the index, and
# take transpose. We have to do this because if we don't, the result is all
# in one column for some reason.
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
query_compiler = self.drop(columns=nonnumeric)
new_columns = query_compiler.index
else:
query_compiler = self
def quantile_builder(df, **kwargs):
result = df.quantile(**kwargs)
return result.T if axis == 1 else result
func = query_compiler._prepare_method(quantile_builder, **kwargs)
q_index = pandas.Float64Index(q)
new_data = query_compiler._map_across_full_axis(axis, func)
# This took a long time to debug, so here is the rundown of why this is needed.
# Previously, we were operating on select indices, but that was broken. We were
# not correctly setting the columns/index. Because of how we compute `to_pandas`
# and because of the static nature of the index for `axis=1` it is easier to
# just handle this as the transpose (see `quantile_builder` above for the
# transpose within the partition) than it is to completely rework other
# internal methods. Basically we are returning the transpose of the object for
# correctness and cleanliness of the code.
if axis == 1:
q_index = new_columns
new_columns = pandas.Float64Index(q)
result = self.__constructor__(new_data, q_index, new_columns)
return result.transpose() if axis == 1 else result
def query(self, expr, **kwargs):
"""Query columns of the QueryCompiler with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
QueryCompiler containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
QueryCompiler containing the ranks of the values along an axis.
"""
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
func = self._prepare_method(pandas.DataFrame.rank, **kwargs)
new_data = self._map_across_full_axis(axis, func)
# Since we assume no knowledge of internal state, we get the columns
# from the internal partitions.
| |
c in points]
dbscan = DBSCAN(
eps=eps, min_samples=min_samples).fit(pos)
clustered_points = []
for label in range(np.max(dbscan.labels_) + 1):
if np.count_nonzero(dbscan.labels_ == label) <= min_points:
continue
for idx, p in enumerate(points):
if dbscan.labels_[idx] == label:
clustered_points.append(p)
return clustered_points
def get_dbscan_core_coords(coords_list, dbscan):
idx = tuple(dbscan.core_sample_indices_)
if len(idx) == 0:
print('No core sample')
return coords_list, False
core_labels = [int(x) for x in list(
filter(lambda x: x != -1, dbscan.labels_))]
core_coords_list = itemgetter(*idx)(coords_list)
return core_coords_list, core_labels
def get_dbscan_label_coords(coords_list, dbscan, label):
idx = dbscan.labels_ == label
label_coords_list = np.array(coords_list)[(np.array(idx))].tolist()
return label_coords_list
def align_coords(coords_list, labels, angle_thresh=90., copy_list=True):
"""Align the x-axis of coords
invert coordinates above the threshold.
If you do not align, the average value of the
rotation map will be incorrect.
Parameters
----------
coords_list : list[skrobot.coordinates.base.Coordinates]
lables : numpy.ndarray
angle_thresh : float, optional
invert coordinates above the threshold, by default 135.0
copy_list ; bool, optional
If True copy coords_list, by default True
Returns
-------
coords_list : list[skrobot.coordinates.base.Coordinates]
"""
aligned_coords_list = []
if copy_list:
coords_list = copy.copy(coords_list)
for label in range(np.max(labels) + 1):
q_base = None
for idx, coords in enumerate(coords_list):
if labels[idx] == label:
if q_base is None:
q_base = coords.quaternion
q_distance \
= coordinates.math.quaternion_distance(
q_base, coords.quaternion)
if np.rad2deg(q_distance) > angle_thresh:
# coords_list[idx].rotate(np.pi, 'y')
aligned_coords_list.append(
coords_list[idx].copy_worldcoords().rotate(
np.pi, 'y'))
else:
aligned_coords_list.append(
coords_list[idx].copy_worldcoords())
return aligned_coords_list
def split_label_coords(coords_list, labels):
"""Split coords based on label
Parameters
----------
coords_list : list[skrobot.coordinates.Coordinates]
Returns
-------
coords_clusters :list[tuple(skrobot.coordinates.Coordinates)]
"""
coords_clusters = []
labels = np.array(labels)
for label in range(np.max(labels) + 1):
idx = tuple(np.where(labels == label)[0])
coords_cluster = itemgetter(*idx)(coords_list)
if len(idx) == 1:
coords_cluster = (coords_cluster,)
coords_clusters.append(coords_cluster)
return coords_clusters
def make_average_coords_list(coords_list, labels, average_pos=True):
"""Make average orientation coords list
Parameters
----------
coords_list : list[skrobot.coordinates.Coordinates]
Returns
-------
coords_list : list[skrobot.coordinates.Coordinates]
"""
average_coords_list = []
coords_clusters = split_label_coords(coords_list, labels)
for coords_cluster in coords_clusters:
coords_average = average_coords(coords_cluster)
if average_pos:
average_coords_list.append(coords_average)
else:
for coords in coords_cluster:
coords.rotation = coords_average.rotation
average_coords_list.append(coords)
return average_coords_list
def averageQuaternions(Q):
"""Calculate average quaternion
https://github.com/christophhagen/averaging-quaternions/blob/master/LICENSE
Q is a Nx4 numpy matrix and contains the quaternions to average in the rows.
The quaternions are arranged as (w,x,y,z), with w being the scalar
The result will be the average quaternion of the input. Note that the signs
of the output quaternion can be reversed, since q and -q describe the same orientation
Parameters
----------
Q : numpy.ndarray or list[float]
Returns
-------
average quaternion : numpy.ndarray
"""
Q = np.array(Q)
M = Q.shape[0]
A = npm.zeros(shape=(4, 4))
for i in range(0, M):
q = Q[i, :]
A = np.outer(q, q) + A
A = (1.0 / M) * A
eigenValues, eigenVectors = np.linalg.eig(A)
eigenVectors = eigenVectors[:, eigenValues.argsort()[::-1]]
# return the real part of the largest eigenvector (has only real part)
return np.real(eigenVectors[:, 0].A1)
def average_coords(coords_list):
"""Calculate average coords
Parameters
----------
coords_list : list[skrobot.coordinates.Coordinates]
Returns
-------
coords_average : skrobot.coordinates.Coordinates
"""
q_list = [c.quaternion for c in coords_list]
q_average = averageQuaternions(q_list)
pos_average = np.mean([c.worldpos() for c in coords_list], axis=0)
coords_average = coordinates.Coordinates(pos_average, q_average)
return coords_average
def coords_to_dict(coords_list, urdf_file, labels=None):
"""Cover coords list to dict for json
Parameters
----------
coords_list : list[skrobot.coordinates.Coordinates]
labels : list[int]
Returns
-------
contact_points_dict : dict
"""
contact_points_list = []
contact_points_dict = {
'urdf_file': urdf_file,
'contact_points': [],
'labels': []}
for coords in coords_list:
pose = np.concatenate(
[coords.T()[:3, 3][None, :],
coords.T()[:3, :3]]).tolist()
contact_points_list.append(pose)
contact_points_dict['contact_points'] = contact_points_list
if labels is not None:
contact_points_dict['labels'] = labels
return contact_points_dict
def make_aligned_contact_points_coords(contact_points_coords, eps=0.01):
"""Aligned contact points coords
Parameters
----------
contact_points_coords : list[skrobot.coordinates.Coordinates]
[description]
eps : float, optional
eps paramerter of sklearn dbscan, by default 0.01
Returns
-------
[type]
[description]
"""
dbscan = dbscan_coords(contact_points_coords, eps=eps)
contact_points_coords, labels = get_dbscan_core_coords(
contact_points_coords, dbscan)
if not labels:
return False
aligned_contact_points_coords \
= align_coords(contact_points_coords, labels)
return aligned_contact_points_coords
def make_aligned_contact_points(contact_points_dict):
contact_points = contact_points_dict['contact_points']
urdf_file = str(contact_points_dict['urdf_file'])
contact_points_coords = make_contact_points_coords(contact_points)
aligned_contact_points_coords \
= make_aligned_contact_points_coords(contact_points_coords)
if not aligned_contact_points_coords:
return False
aligned_contact_points_dict = coords_to_dict(
aligned_contact_points_coords, urdf_file)
return aligned_contact_points_dict
def filter_penetration(obj_file, hanging_points,
box_size=[0.1, 0.0001, 0.0001],
translate=[0, 0.005, 0]):
"""Filter the penetrating hanging points
Parameters
----------
obj_file : srt
obj file path (urdf, stl or obj)
hanging_points : list[list[list[float], list[float]]]
list of hanging points(=contact points)
box_size : list[float]
penetration check box of size [length, width, width] order
translate : list, optional
translate a box for penetration check,
by default [0, 0.005, 0]
Returns
-------
penetrating_hanging_points: list[list[list[float], list[float]]]
filtered_hanging_points: list[list[list[float], list[float]]]
"""
filtered_hanging_points = []
penetrating_hanging_points = []
path_without_ext, ext = osp.splitext(obj_file)
if ext == '.urdf':
obj_file = path_without_ext + '.stl'
if not osp.isfile(obj_file):
obj_file = path_without_ext + '.obj'
obj = skrobot.model.MeshLink(obj_file)
collision_manager = trimesh.collision.CollisionManager()
collision_manager.add_object('obj', obj.visual_mesh)
for hp in hanging_points:
penetration_check_box = skrobot.model.Box(
box_size,
face_colors=[255, 0, 0],
pos=hp[0], rot=hp[1:])
penetration_check_box.translate(translate)
penetration = collision_manager.in_collision_single(
penetration_check_box.visual_mesh, penetration_check_box.T())
if penetration:
penetrating_hanging_points.append(hp)
else:
filtered_hanging_points.append(hp)
return filtered_hanging_points, penetrating_hanging_points
def sample_contact_points(contact_points, num_samples):
"""Sampling contact points for the specified number
Parameters
----------
contact_points : list[list[list[float], list[float]]]
num_samples : int
Returns
-------
contact_points : list[list[list[float], list[float]]]
"""
if num_samples > len(contact_points):
num_samples = len(contact_points)
idx = random.sample(range(0, len(contact_points)), num_samples)
return [contact_points[i] for i in idx]
def set_contact_points_urdf_path(contact_points_path):
"""Set contact points urdf path
Set base.urdf in same directory
Parameters
----------
contact_points_path : str
"""
urdf_file = osp.join(osp.dirname(contact_points_path), 'base.urdf')
contact_points_dict = load_json(contact_points_path)
contact_points_dict['urdf_file'] = urdf_file
save_contact_points(contact_points_path, contact_points_dict)
def filter_contact_points(
contact_points_dict, cluster_min_points=-1,
min_samples=2, eps=0.03, num_samples=30,
use_filter_penetration=False, inf_penetration_check=False,
half_inf_penetration_check=False, translate=[0, 0.005, 0]):
"""Filter contact points by clustering, aligning, averageing
This function is similar to check_contact_points so should be merged.
Parameters
----------
contact_points_dict :
dict{'contact_points' : list[list[list[float], list[float]]]
'urdf_file' : str}
cluster_min_points : int, optional
if -1, set 1/5 of the nuber all the points. by default -1
min_samples : int, optional
dbscan clustering min saples parameter.
The number of samples (or total weight) in a
neighborhood for a point to be considered as
a core point. This includes the point itself.
by default 2.
eps : float, optional
eps paramerter of sklearn dbscan, by default 0.03
num_samples : int, optional
sampling contact points with this value.
if -1 remain all points.
use_filter_penetration : bool, optional
by default False
inf_penetration_check : bool, optional
for hanging.
by default False.
half_penetration_check : bool, optional
for pouring.
by default False
translate : list, optional
translate a box for penetration check,
by default [0, 0.005, 0]
Returns
-------
average_aligned_contact_points_coord_dict :
dict{'contact_points' : list[list[list[float], list[float]]]
'urdf_file' : str}
"""
urdf_file = str(contact_points_dict['urdf_file'])
contact_points = contact_points_dict['contact_points']
if len(contact_points) == 0:
print('No points')
return False
if use_filter_penetration \
or inf_penetration_check \
or half_inf_penetration_check:
# for hanging
if inf_penetration_check:
contact_points, _ = filter_penetration(
urdf_file, contact_points,
box_size=[100, 0.0001, 0.0001],
translate=translate)
# for pouring
elif half_inf_penetration_check:
contact_points, _ = filter_penetration(
urdf_file, contact_points,
box_size=[100, 0.0001, 0.0001],
translate=[-50.005, 0, 0])
else:
contact_points, _ = filter_penetration(
urdf_file, contact_points,
box_size=[0.1, 0.0001, 0.0001],
translate=translate)
print('penetration contact_points :%d' % len(contact_points))
if len(contact_points) == 0:
print('No points after penetration check')
return False
if cluster_min_points or cluster_min_points == -1:
contact_points = cluster_contact_points(
contact_points, min_points=cluster_min_points,
min_samples=min_samples, eps=eps)
print('clusterring contact_points :%d' % len(contact_points))
if len(contact_points) == 0:
print('No points after clustering')
return False
contact_points_coords = make_contact_points_coords(contact_points)
dbscan = dbscan_coords(contact_points_coords, eps=eps)
contact_points_coords, labels = get_dbscan_core_coords(
contact_points_coords, dbscan)
if not labels:
return False
aligned_contact_points_coords \
= align_coords(contact_points_coords, labels)
average_aligned_contact_points_coords = make_average_coords_list(
aligned_contact_points_coords, labels, average_pos=False)
if num_samples > 0:
average_aligned_contact_points_coords = sample_contact_points(
average_aligned_contact_points_coords, num_samples=num_samples)
print('sampled points: ', len(average_aligned_contact_points_coords))
average_aligned_contact_points_coord_dict \
= coords_to_dict(
average_aligned_contact_points_coords, urdf_file, labels)
return average_aligned_contact_points_coord_dict
def filter_contact_points_dir(
input_dir, cluster_min_points=-1, min_samples=2, eps=0.03,
rate_thresh=0.1, num_samples=30,
use_filter_penetration=False, inf_penetration_check=False,
half_inf_penetration_check=False,
points_path_name='contact_points', suffix=''):
"""Filter all contact points in the directory
Parameters
----------
input_dir : str
hanging_object of hanging_object/category/contact_points/<fancy_dir>/contact_points.json # noqa
cluster_min_points : int, optional
minimum number of points required for the cluster
if -1, set 1/5 of the nuber all the points. by default -1
min_samples : int, optional
dbscan clustering min saples parameter.
The number of samples (or total weight) in a
neighborhood for a point to be considered as
a core point. This includes the point itself.
by default 1.
eps : float, optional
eps paramerter of sklearn dbscan, by default 0.03
rate_thresh : float
Objects whose rate of the number of remaining points is greater than this value are skipped.
num_samples : int, optional
sampling contact points with this value
if -1 remain all points.
| |
<gh_stars>0
# coding: utf-8
"""
LEIA RESTful API for AI
Leia API # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Model(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_all_applications': 'bool',
'allowed_application_ids': 'list[str]',
'application_id': 'str',
'creation_time': 'datetime',
'description': 'str',
'documentation': 'str',
'id': 'str',
'input_types': 'list[ModelInputTypes]',
'md5sum': 'str',
'model_clazz': 'str',
'model_module': 'str',
'model_type': 'ModelTypes',
'name': 'str',
'output_format': 'object',
'short_name': 'str',
'size': 'float',
'speed': 'Speed',
'tags': 'list[str]',
'ttl': 'float'
}
attribute_map = {
'allow_all_applications': 'allow_all_applications',
'allowed_application_ids': 'allowed_application_ids',
'application_id': 'application_id',
'creation_time': 'creation_time',
'description': 'description',
'documentation': 'documentation',
'id': 'id',
'input_types': 'input_types',
'md5sum': 'md5sum',
'model_clazz': 'model_clazz',
'model_module': 'model_module',
'model_type': 'model_type',
'name': 'name',
'output_format': 'output_format',
'short_name': 'short_name',
'size': 'size',
'speed': 'speed',
'tags': 'tags',
'ttl': 'ttl'
}
def __init__(self, allow_all_applications=None, allowed_application_ids=None, application_id=None, creation_time=None, description=None, documentation=None, id=None, input_types=None, md5sum=None, model_clazz=None, model_module=None, model_type=None, name=None, output_format=None, short_name=None, size=None, speed=None, tags=None, ttl=None): # noqa: E501
"""Model - a model defined in Swagger""" # noqa: E501
self._allow_all_applications = None
self._allowed_application_ids = None
self._application_id = None
self._creation_time = None
self._description = None
self._documentation = None
self._id = None
self._input_types = None
self._md5sum = None
self._model_clazz = None
self._model_module = None
self._model_type = None
self._name = None
self._output_format = None
self._short_name = None
self._size = None
self._speed = None
self._tags = None
self._ttl = None
self.discriminator = None
if allow_all_applications is not None:
self.allow_all_applications = allow_all_applications
if allowed_application_ids is not None:
self.allowed_application_ids = allowed_application_ids
if application_id is not None:
self.application_id = application_id
self.creation_time = creation_time
if description is not None:
self.description = description
if documentation is not None:
self.documentation = documentation
self.id = id
self.input_types = input_types
if md5sum is not None:
self.md5sum = md5sum
self.model_clazz = model_clazz
self.model_module = model_module
self.model_type = model_type
self.name = name
if output_format is not None:
self.output_format = output_format
if short_name is not None:
self.short_name = short_name
self.size = size
if speed is not None:
self.speed = speed
if tags is not None:
self.tags = tags
if ttl is not None:
self.ttl = ttl
@property
def allow_all_applications(self):
"""Gets the allow_all_applications of this Model. # noqa: E501
:return: The allow_all_applications of this Model. # noqa: E501
:rtype: bool
"""
return self._allow_all_applications
@allow_all_applications.setter
def allow_all_applications(self, allow_all_applications):
"""Sets the allow_all_applications of this Model.
:param allow_all_applications: The allow_all_applications of this Model. # noqa: E501
:type: bool
"""
self._allow_all_applications = allow_all_applications
@property
def allowed_application_ids(self):
"""Gets the allowed_application_ids of this Model. # noqa: E501
:return: The allowed_application_ids of this Model. # noqa: E501
:rtype: list[str]
"""
return self._allowed_application_ids
@allowed_application_ids.setter
def allowed_application_ids(self, allowed_application_ids):
"""Sets the allowed_application_ids of this Model.
:param allowed_application_ids: The allowed_application_ids of this Model. # noqa: E501
:type: list[str]
"""
self._allowed_application_ids = allowed_application_ids
@property
def application_id(self):
"""Gets the application_id of this Model. # noqa: E501
:return: The application_id of this Model. # noqa: E501
:rtype: str
"""
return self._application_id
@application_id.setter
def application_id(self, application_id):
"""Sets the application_id of this Model.
:param application_id: The application_id of this Model. # noqa: E501
:type: str
"""
self._application_id = application_id
@property
def creation_time(self):
"""Gets the creation_time of this Model. # noqa: E501
:return: The creation_time of this Model. # noqa: E501
:rtype: datetime
"""
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
"""Sets the creation_time of this Model.
:param creation_time: The creation_time of this Model. # noqa: E501
:type: datetime
"""
if creation_time is None:
raise ValueError("Invalid value for `creation_time`, must not be `None`") # noqa: E501
self._creation_time = creation_time
@property
def description(self):
"""Gets the description of this Model. # noqa: E501
:return: The description of this Model. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Model.
:param description: The description of this Model. # noqa: E501
:type: str
"""
self._description = description
@property
def documentation(self):
"""Gets the documentation of this Model. # noqa: E501
:return: The documentation of this Model. # noqa: E501
:rtype: str
"""
return self._documentation
@documentation.setter
def documentation(self, documentation):
"""Sets the documentation of this Model.
:param documentation: The documentation of this Model. # noqa: E501
:type: str
"""
self._documentation = documentation
@property
def id(self):
"""Gets the id of this Model. # noqa: E501
:return: The id of this Model. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Model.
:param id: The id of this Model. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def input_types(self):
"""Gets the input_types of this Model. # noqa: E501
:return: The input_types of this Model. # noqa: E501
:rtype: list[ModelInputTypes]
"""
return self._input_types
@input_types.setter
def input_types(self, input_types):
"""Sets the input_types of this Model.
:param input_types: The input_types of this Model. # noqa: E501
:type: list[ModelInputTypes]
"""
if input_types is None:
raise ValueError("Invalid value for `input_types`, must not be `None`") # noqa: E501
self._input_types = input_types
@property
def md5sum(self):
"""Gets the md5sum of this Model. # noqa: E501
The MD5 sum of the model # noqa: E501
:return: The md5sum of this Model. # noqa: E501
:rtype: str
"""
return self._md5sum
@md5sum.setter
def md5sum(self, md5sum):
"""Sets the md5sum of this Model.
The MD5 sum of the model # noqa: E501
:param md5sum: The md5sum of this Model. # noqa: E501
:type: str
"""
self._md5sum = md5sum
@property
def model_clazz(self):
"""Gets the model_clazz of this Model. # noqa: E501
The Python class name of the model # noqa: E501
:return: The model_clazz of this Model. # noqa: E501
:rtype: str
"""
return self._model_clazz
@model_clazz.setter
def model_clazz(self, model_clazz):
"""Sets the model_clazz of this Model.
The Python class name of the model # noqa: E501
:param model_clazz: The model_clazz of this Model. # noqa: E501
:type: str
"""
if model_clazz is None:
raise ValueError("Invalid value for `model_clazz`, must not be `None`") # noqa: E501
self._model_clazz = model_clazz
@property
def model_module(self):
"""Gets the model_module of this Model. # noqa: E501
The Python module ghosting the code for the model # noqa: E501
:return: The model_module of this Model. # noqa: E501
:rtype: str
"""
return self._model_module
@model_module.setter
def model_module(self, model_module):
"""Sets the model_module of this Model.
The Python module ghosting the code for the model # noqa: E501
:param model_module: The model_module of this Model. # noqa: E501
:type: str
"""
if model_module is None:
raise ValueError("Invalid value for `model_module`, must not be `None`") # noqa: E501
self._model_module = model_module
@property
def model_type(self):
"""Gets the model_type of this Model. # noqa: E501
:return: The model_type of this Model. # noqa: E501
:rtype: ModelTypes
"""
return self._model_type
@model_type.setter
def model_type(self, model_type):
"""Sets the model_type of this Model.
:param model_type: The model_type of this Model. # noqa: E501
:type: ModelTypes
"""
if model_type is None:
raise ValueError("Invalid value for `model_type`, must not be `None`") # noqa: E501
self._model_type = model_type
@property
def name(self):
"""Gets the name of this Model. # noqa: E501
:return: The name of this Model. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Model.
:param name: The name of this Model. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def output_format(self):
"""Gets the output_format of this Model. # noqa: E501
:return: The output_format of this Model. # noqa: E501
:rtype: object
"""
return self._output_format
@output_format.setter
def output_format(self, output_format):
"""Sets the output_format of this Model.
:param output_format: The output_format of this Model. # noqa: E501
:type: object
"""
self._output_format = output_format
@property
def short_name(self):
"""Gets the short_name of this Model. | |
<reponame>08saikiranreddy/ipython
# encoding: utf-8
"""Magic functions for InteractiveShell.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 <NAME> <<EMAIL>> and
# Copyright (C) 2001-2007 <NAME> <<EMAIL>>
# Copyright (C) 2008-2009 The IPython Development Team
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import __builtin__
import __future__
import bdb
import inspect
import os
import sys
import shutil
import re
import time
import textwrap
import types
from cStringIO import StringIO
from getopt import getopt,GetoptError
from pprint import pformat
# cProfile was added in Python2.5
try:
import cProfile as profile
import pstats
except ImportError:
# profile isn't bundled by default in Debian for license reasons
try:
import profile,pstats
except ImportError:
profile = pstats = None
import IPython
from IPython.core import debugger, oinspect
from IPython.core.error import TryNext
from IPython.core.error import UsageError
from IPython.core.fakemodule import FakeModule
from IPython.core.macro import Macro
from IPython.core import page
from IPython.core.prefilter import ESC_MAGIC
from IPython.lib.pylabtools import mpl_runner
from IPython.external.Itpl import itpl, printpl
from IPython.testing import decorators as testdec
from IPython.utils.io import file_read, nlprint
import IPython.utils.io
from IPython.utils.path import get_py_filename
from IPython.utils.process import arg_split, abbrev_cwd
from IPython.utils.terminal import set_term_title
from IPython.utils.text import LSString, SList, StringTypes, format_screen
from IPython.utils.timing import clock, clock2
from IPython.utils.warn import warn, error
from IPython.utils.ipstruct import Struct
import IPython.utils.generics
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def on_off(tag):
"""Return an ON/OFF string for a 1/0 input. Simple utility function."""
return ['OFF','ON'][tag]
class Bunch: pass
def compress_dhist(dh):
head, tail = dh[:-10], dh[-10:]
newhead = []
done = set()
for h in head:
if h in done:
continue
newhead.append(h)
done.add(h)
return newhead + tail
#***************************************************************************
# Main class implementing Magic functionality
# XXX - for some odd reason, if Magic is made a new-style class, we get errors
# on construction of the main InteractiveShell object. Something odd is going
# on with super() calls, Configurable and the MRO... For now leave it as-is, but
# eventually this needs to be clarified.
# BG: This is because InteractiveShell inherits from this, but is itself a
# Configurable. This messes up the MRO in some way. The fix is that we need to
# make Magic a configurable that InteractiveShell does not subclass.
class Magic:
"""Magic functions for InteractiveShell.
Shell functions which can be reached as %function_name. All magic
functions should accept a string, which they can parse for their own
needs. This can make some functions easier to type, eg `%cd ../`
vs. `%cd("../")`
ALL definitions MUST begin with the prefix magic_. The user won't need it
at the command line, but it is is needed in the definition. """
# class globals
auto_status = ['Automagic is OFF, % prefix IS needed for magic functions.',
'Automagic is ON, % prefix NOT needed for magic functions.']
#......................................................................
# some utility functions
def __init__(self,shell):
self.options_table = {}
if profile is None:
self.magic_prun = self.profile_missing_notice
self.shell = shell
# namespace for holding state we may need
self._magic_state = Bunch()
def profile_missing_notice(self, *args, **kwargs):
error("""\
The profile module could not be found. It has been removed from the standard
python packages because of its non-free license. To use profiling, install the
python-profiler package from non-free.""")
def default_option(self,fn,optstr):
"""Make an entry in the options_table for fn, with value optstr"""
if fn not in self.lsmagic():
error("%s is not a magic function" % fn)
self.options_table[fn] = optstr
def lsmagic(self):
"""Return a list of currently available magic functions.
Gives a list of the bare names after mangling (['ls','cd', ...], not
['magic_ls','magic_cd',...]"""
# FIXME. This needs a cleanup, in the way the magics list is built.
# magics in class definition
class_magic = lambda fn: fn.startswith('magic_') and \
callable(Magic.__dict__[fn])
# in instance namespace (run-time user additions)
inst_magic = lambda fn: fn.startswith('magic_') and \
callable(self.__dict__[fn])
# and bound magics by user (so they can access self):
inst_bound_magic = lambda fn: fn.startswith('magic_') and \
callable(self.__class__.__dict__[fn])
magics = filter(class_magic,Magic.__dict__.keys()) + \
filter(inst_magic,self.__dict__.keys()) + \
filter(inst_bound_magic,self.__class__.__dict__.keys())
out = []
for fn in set(magics):
out.append(fn.replace('magic_','',1))
out.sort()
return out
def extract_input_slices(self,slices,raw=False):
"""Return as a string a set of input history slices.
Inputs:
- slices: the set of slices is given as a list of strings (like
['1','4:8','9'], since this function is for use by magic functions
which get their arguments as strings.
Optional inputs:
- raw(False): by default, the processed input is used. If this is
true, the raw input history is used instead.
Note that slices can be called with two notations:
N:M -> standard python form, means including items N...(M-1).
N-M -> include items N..M (closed endpoint)."""
if raw:
hist = self.shell.history_manager.input_hist_raw
else:
hist = self.shell.history_manager.input_hist_parsed
cmds = []
for chunk in slices:
if ':' in chunk:
ini,fin = map(int,chunk.split(':'))
elif '-' in chunk:
ini,fin = map(int,chunk.split('-'))
fin += 1
else:
ini = int(chunk)
fin = ini+1
cmds.append(''.join(hist[ini:fin]))
return cmds
def arg_err(self,func):
"""Print docstring if incorrect arguments were passed"""
print 'Error in arguments:'
print oinspect.getdoc(func)
def format_latex(self,strng):
"""Format a string for latex inclusion."""
# Characters that need to be escaped for latex:
escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
# Magic command names as headers:
cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
re.MULTILINE)
# Magic commands
cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
re.MULTILINE)
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
# The "\n" symbol
newline_re = re.compile(r'\\n')
# Now build the string for output:
#strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
strng)
strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
strng = par_re.sub(r'\\\\',strng)
strng = escape_re.sub(r'\\\1',strng)
strng = newline_re.sub(r'\\textbackslash{}n',strng)
return strng
def parse_options(self,arg_str,opt_str,*long_opts,**kw):
"""Parse options passed to an argument string.
The interface is similar to that of getopt(), but it returns back a
Struct with the options as keys and the stripped argument string still
as a string.
arg_str is quoted as a true sys.argv vector by using shlex.split.
This allows us to easily expand variables, glob files, quote
arguments, etc.
Options:
-mode: default 'string'. If given as 'list', the argument string is
returned as a list (split on whitespace) instead of a string.
-list_all: put all option values in lists. Normally only options
appearing more than once are put in a list.
-posix (True): whether to split the input line in POSIX mode or not,
as per the conventions outlined in the shlex module from the
standard library."""
# inject default options at the beginning of the input line
caller = sys._getframe(1).f_code.co_name.replace('magic_','')
arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
mode = kw.get('mode','string')
if mode not in ['string','list']:
raise ValueError,'incorrect mode given: %s' % mode
# Get options
list_all = kw.get('list_all',0)
posix = kw.get('posix', os.name == 'posix')
# Check if we have more than one argument to warrant extra processing:
odict = {} # Dictionary with options
args = arg_str.split()
if len(args) >= 1:
# If the list of inputs only has 0 or 1 thing in it, there's no
# need to look for options
argv = arg_split(arg_str,posix)
# Do regular option processing
try:
opts,args = getopt(argv,opt_str,*long_opts)
except GetoptError,e:
raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
" ".join(long_opts)))
for o,a in opts:
if o.startswith('--'):
o = o[2:]
else:
o = o[1:]
try:
odict[o].append(a)
except AttributeError:
odict[o] = [odict[o],a]
except KeyError:
if list_all:
odict[o] = [a]
else:
odict[o] = a
# Prepare opts,args for return
opts = Struct(odict)
if mode == 'string':
args = ' '.join(args)
return opts,args
#......................................................................
# And now the actual magic functions
# Functions for IPython shell work (vars,funcs, config, etc)
def magic_lsmagic(self, parameter_s = ''):
"""List currently available magic functions."""
mesc = ESC_MAGIC
print 'Available magic functions:\n'+mesc+\
(' '+mesc).join(self.lsmagic())
print '\n' + Magic.auto_status[self.shell.automagic]
return None
def magic_magic(self, parameter_s = ''):
"""Print information about the magic function system.
Supported formats: -latex, -brief, -rest
"""
mode = ''
try:
if parameter_s.split()[0] == '-latex':
mode = 'latex'
if parameter_s.split()[0] == '-brief':
mode = 'brief'
if parameter_s.split()[0] == '-rest':
mode = 'rest'
rest_docs = []
except:
pass
magic_docs = []
for fname in self.lsmagic():
mname = 'magic_' + fname
for space in (Magic,self,self.__class__):
try:
fn = space.__dict__[mname]
except KeyError:
pass
else:
break
if mode == 'brief':
# only first line
if fn.__doc__:
fndoc = fn.__doc__.split('\n',1)[0]
else:
fndoc = 'No documentation'
else:
if fn.__doc__:
fndoc = fn.__doc__.rstrip()
else:
fndoc = 'No documentation'
| |
a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
:raise: ``NullArgument`` -- ``from`` or ``to`` ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_fixed_start_offset(self, match):
"""Matches fixed offset events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_fixed_start_offset_terms(self):
"""Clears the fixed offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
fixed_start_offset_terms = property(fdel=clear_fixed_start_offset_terms)
@abc.abstractmethod
def match_relative_weekday_start_offset(self, low, high, match):
"""Matches a relative weekday offset amount between the given range inclusive.
:param low: the start of the range
:type low: ``integer``
:param high: the end of the range
:type high: ``integer``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_relative_weekday_start_offset_terms(self):
"""Clears the relative weekday offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
relative_weekday_start_offset_terms = property(fdel=clear_relative_weekday_start_offset_terms)
@abc.abstractmethod
def match_relative_start_weekday(self, weekday, match):
"""Matches a relative weekday.
:param weekday: the weekday
:type weekday: ``cardinal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_relative_start_weekday(self, match):
"""Matches relative weekday offset events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_relative_start_weekday_terms(self):
"""Clears the relative weekday terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
relative_start_weekday_terms = property(fdel=clear_relative_start_weekday_terms)
@abc.abstractmethod
def match_fixed_duration(self, low, high, match):
"""Matches a fixed duration between the given range inclusive.
:param low: the start of the range
:type low: ``osid.calendaring.Duration``
:param high: the end of the range
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_fixed_duration_terms(self):
"""Clears the fixed duration offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
fixed_duration_terms = property(fdel=clear_fixed_duration_terms)
@abc.abstractmethod
def match_end_reference_event_id(self, event_id, match):
"""Sets the end reference event ``Id`` for this query.
:param event_id: an event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_end_reference_event_id_terms(self):
"""Clears the end reference event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
end_reference_event_id_terms = property(fdel=clear_end_reference_event_id_terms)
@abc.abstractmethod
def supports_end_reference_event_query(self):
"""Tests if an ``EventQuery`` is available for querying end reference event terms.
:return: ``true`` if an event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_end_reference_event_query(self):
"""Gets the query for the end reference event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_event_reference_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_end_reference_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
end_reference_event_query = property(fget=get_end_reference_event_query)
@abc.abstractmethod
def match_any_end_reference_event(self, match):
"""Matches any end reference event events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_end_reference_event_terms(self):
"""Clears the end reference event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
end_reference_event_terms = property(fdel=clear_end_reference_event_terms)
@abc.abstractmethod
def match_fixed_end_offset(self, from_, to, match):
"""Matches a fixed offset amount between the given range inclusive.
:param from: the start of the range
:type from: ``osid.calendaring.Duration``
:param to: the end of the range
:type to: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
:raise: ``NullArgument`` -- ``from`` or ``to`` ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_fixed_end_offset(self, match):
"""Matches fixed offset events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_fixed_end_offset_terms(self):
"""Clears the fixed offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
fixed_end_offset_terms = property(fdel=clear_fixed_end_offset_terms)
@abc.abstractmethod
def match_relative_weekday_end_offset(self, low, high, match):
"""Matches a relative weekday offset amount between the given range inclusive.
:param low: the start of the range
:type low: ``integer``
:param high: the end of the range
:type high: ``integer``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_relative_weekday_end_offset_terms(self):
"""Clears the relative weekday offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
relative_weekday_end_offset_terms = property(fdel=clear_relative_weekday_end_offset_terms)
@abc.abstractmethod
def match_relative_end_weekday(self, weekday, match):
"""Matches a relative weekday.
:param weekday: the weekday
:type weekday: ``cardinal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_relative_end_weekday(self, match):
"""Matches relative weekday offset events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_relative_end_weekday_terms(self):
"""Clears the relative weekday terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
relative_end_weekday_terms = property(fdel=clear_relative_end_weekday_terms)
@abc.abstractmethod
def match_location_description(self, location, string_match_type, match):
"""Matches the location description string.
:param location: location string
:type location: ``string``
:param string_match_type: string match type
:type string_match_type: ``osid.type.Type``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``location`` is not of ``string_match_type``
:raise: ``NullArgument`` -- ``location`` or ``string_match_type`` is ``null``
:raise: ``Unsupported`` -- ``supports_string_match_type(string_match_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_location_description(self, match):
"""Matches an event that has any location description assigned.
:param match: ``true`` to match events with any location description, ``false`` to match events with no location
description
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_description_terms(self):
"""Clears the location description terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_description_terms = property(fdel=clear_location_description_terms)
@abc.abstractmethod
def match_location_id(self, location_id, match):
"""Sets the location ``Id`` for this query.
:param location_id: a location ``Id``
:type location_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``location_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_id_terms(self):
"""Clears the location ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_id_terms = property(fdel=clear_location_id_terms)
@abc.abstractmethod
def supports_location_query(self):
"""Tests if a ``LocationQuery`` is available for querying locations.
:return: ``true`` if a location query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_location_query(self):
"""Gets the query for a location.
Multiple retrievals produce a nested ``OR`` term.
:return: the location query
:rtype: ``osid.mapping.LocationQuery``
:raise: ``Unimplemented`` -- ``supports_location_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_query()`` is ``true``.*
"""
return # osid.mapping.LocationQuery
location_query = property(fget=get_location_query)
@abc.abstractmethod
def match_any_location(self, match):
"""Matches an event that has any location assigned.
:param match: ``true`` to match events with any location, ``false`` to match events with no location
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_terms(self):
"""Clears the location terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_terms = property(fdel=clear_location_terms)
@abc.abstractmethod
def match_sponsor_id(self, sponsor_id, match):
"""Sets the sponsor ``Id`` for this query.
:param sponsor_id: a sponsor ``Id``
:type sponsor_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``sponsor_id`` is ``null``
*compliance: mandatory -- This method must be | |
all steps are complete (default is on)
"""
kerberos_attributes: pulumi.Output[dict]
"""
Kerberos configuration for the cluster. Defined below
* `adDomainJoinPassword` (`str`) - The Active Directory password for `ad_domain_join_user`. This provider cannot perform drift detection of this configuration.
* `adDomainJoinUser` (`str`) - Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain. This provider cannot perform drift detection of this configuration.
* `crossRealmTrustPrincipalPassword` (`str`) - Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms. This provider cannot perform drift detection of this configuration.
* `kdcAdminPassword` (`str`) - The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster. This provider cannot perform drift detection of this configuration.
* `realm` (`str`) - The name of the Kerberos realm to which all nodes in a cluster belong. For example, `EC2.INTERNAL`
"""
log_uri: pulumi.Output[str]
"""
S3 bucket to write the log files of the job flow. If a value is not provided, logs are not created
"""
master_instance_group: pulumi.Output[dict]
"""
Configuration block to use an [Instance Group](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-group-configuration.html#emr-plan-instance-groups) for the [master node type](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-master-core-task-nodes.html#emr-plan-master). Cannot be specified if `master_instance_type` argument or `instance_group` configuration blocks are set. Detailed below.
* `bid_price` (`str`) - Bid price for each EC2 instance in the instance group, expressed in USD. By setting this attribute, the instance group is being declared as a Spot Instance, and will implicitly create a Spot request. Leave this blank to use On-Demand Instances.
* `ebs_configs` (`list`) - Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
* `iops` (`float`) - The number of I/O operations per second (IOPS) that the volume supports
* `size` (`float`) - The volume size, in gibibytes (GiB).
* `type` (`str`) - The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
* `volumesPerInstance` (`float`) - The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
* `id` (`str`) - The ID of the EMR Cluster
* `instance_count` (`float`) - Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
* `instance_type` (`str`) - EC2 instance type for all instances in the instance group.
* `name` (`str`) - The name of the step.
"""
master_instance_type: pulumi.Output[str]
"""
Use the `master_instance_group` configuration block `instance_type` argument instead. The EC2 instance type of the master node. Cannot be specified if `master_instance_group` or `instance_group` configuration blocks are set.
"""
master_public_dns: pulumi.Output[str]
"""
The public DNS name of the master EC2 instance.
* `core_instance_group.0.id` - Core node type Instance Group ID, if using Instance Group for this node type.
"""
name: pulumi.Output[str]
"""
The name of the step.
"""
release_label: pulumi.Output[str]
"""
The release label for the Amazon EMR release
"""
scale_down_behavior: pulumi.Output[str]
"""
The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an `instance group` is resized.
"""
security_configuration: pulumi.Output[str]
"""
The security configuration name to attach to the EMR cluster. Only valid for EMR clusters with `release_label` 4.8.0 or greater
"""
service_role: pulumi.Output[str]
"""
IAM role that will be assumed by the Amazon EMR service to access AWS resources
"""
step_concurrency_level: pulumi.Output[float]
"""
The number of steps that can be executed concurrently. You can specify a maximum of 256 steps. Only valid for EMR clusters with `release_label` 5.28.0 or greater. (default is 1)
"""
steps: pulumi.Output[list]
"""
List of steps to run when creating the cluster. Defined below. It is highly recommended to utilize [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) if other steps are being managed outside of this provider.
* `actionOnFailure` (`str`) - The action to take if the step fails. Valid values: `TERMINATE_JOB_FLOW`, `TERMINATE_CLUSTER`, `CANCEL_AND_WAIT`, and `CONTINUE`
* `hadoopJarStep` (`dict`) - The JAR file used for the step. Defined below.
* `args` (`list`) - List of command line arguments passed to the JAR file's main function when executed.
* `jar` (`str`) - Path to a JAR file run during the step.
* `mainClass` (`str`) - Name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.
* `properties` (`dict`) - Key-Value map of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.
* `name` (`str`) - The name of the step.
"""
tags: pulumi.Output[dict]
"""
list of tags to apply to the EMR Cluster
"""
termination_protection: pulumi.Output[bool]
"""
Switch on/off termination protection (default is `false`, except when using multiple master nodes). Before attempting to destroy the resource when termination protection is enabled, this configuration must be applied with its value set to `false`.
"""
visible_to_all_users: pulumi.Output[bool]
"""
Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. Default `true`
"""
def __init__(__self__, resource_name, opts=None, additional_info=None, applications=None, autoscaling_role=None, bootstrap_actions=None, configurations=None, configurations_json=None, core_instance_count=None, core_instance_group=None, core_instance_type=None, custom_ami_id=None, ebs_root_volume_size=None, ec2_attributes=None, instance_groups=None, keep_job_flow_alive_when_no_steps=None, kerberos_attributes=None, log_uri=None, master_instance_group=None, master_instance_type=None, name=None, release_label=None, scale_down_behavior=None, security_configuration=None, service_role=None, step_concurrency_level=None, steps=None, tags=None, termination_protection=None, visible_to_all_users=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an Elastic MapReduce Cluster, a web service that makes it easy to
process large amounts of data efficiently. See [Amazon Elastic MapReduce Documentation](https://aws.amazon.com/documentation/elastic-mapreduce/)
for more information.
To configure [Instance Groups](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-group-configuration.html#emr-plan-instance-groups) for [task nodes](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-master-core-task-nodes.html#emr-plan-task), see the `emr.InstanceGroup` resource.
> Support for [Instance Fleets](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-group-configuration.html#emr-plan-instance-fleets) will be made available in an upcoming release.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
cluster = aws.emr.Cluster("cluster",
additional_info=\"\"\"{
"instanceAwsClientConfiguration": {
"proxyPort": 8099,
"proxyHost": "myproxy.example.com"
}
}
\"\"\",
applications=["Spark"],
bootstrap_actions=[{
"args": [
"instance.isMaster=true",
"echo running on master node",
],
"name": "runif",
"path": "s3://elasticmapreduce/bootstrap-actions/run-if",
}],
configurations_json=\"\"\" [
{
"Classification": "hadoop-env",
"Configurations": [
{
"Classification": "export",
"Properties": {
"JAVA_HOME": "/usr/lib/jvm/java-1.8.0"
}
}
],
"Properties": {}
},
{
"Classification": "spark-env",
"Configurations": [
{
"Classification": "export",
"Properties": {
"JAVA_HOME": "/usr/lib/jvm/java-1.8.0"
}
}
],
"Properties": {}
}
]
\"\"\",
core_instance_group={
"autoscaling_policy": \"\"\"{
"Constraints": {
"MinCapacity": 1,
"MaxCapacity": 2
},
"Rules": [
{
"Name": "ScaleOutMemoryPercentage",
"Description": "Scale out if YARNMemoryAvailablePercentage is less than 15",
"Action": {
"SimpleScalingPolicyConfiguration": {
"AdjustmentType": "CHANGE_IN_CAPACITY",
"ScalingAdjustment": 1,
"CoolDown": 300
}
},
"Trigger": {
"CloudWatchAlarmDefinition": {
"ComparisonOperator": "LESS_THAN",
"EvaluationPeriods": 1,
"MetricName": "YARNMemoryAvailablePercentage",
"Namespace": "AWS/ElasticMapReduce",
"Period": 300,
"Statistic": "AVERAGE",
"Threshold": 15.0,
"Unit": "PERCENT"
}
}
}
]
}
\"\"\",
"bid_price": "0.30",
"ebsConfig": [{
"size": "40",
"type": "gp2",
"volumesPerInstance": 1,
}],
"instance_count": 1,
"instance_type": "c4.large",
},
ebs_root_volume_size=100,
ec2_attributes={
"emrManagedMasterSecurityGroup": aws_security_group["sg"]["id"],
"emrManagedSlaveSecurityGroup": aws_security_group["sg"]["id"],
"instanceProfile": aws_iam_instance_profile["emr_profile"]["arn"],
"subnet_id": aws_subnet["main"]["id"],
},
keep_job_flow_alive_when_no_steps=True,
master_instance_group={
"instance_type": "m4.large",
},
release_label="emr-4.6.0",
service_role=aws_iam_role["iam_emr_service_role"]["arn"],
tags={
"env": "env",
"role": "rolename",
},
termination_protection=False)
```
### Enable Debug Logging
```python
import pulumi
import pulumi_aws as aws
example = aws.emr.Cluster("example",
lifecycle={
"ignoreChanges": [
"stepConcurrencyLevel",
"steps",
],
},
steps=[{
"actionOnFailure": "TERMINATE_CLUSTER",
"hadoopJarStep": {
"args": ["state-pusher-script"],
"jar": "command-runner.jar",
},
"name": "Setup Hadoop Debugging",
}])
```
### Multiple Node Master Instance Group
```python
import pulumi
import pulumi_aws as aws
# Map public IP on launch must be enabled for public (Internet accessible) subnets
example_subnet = aws.ec2.Subnet("exampleSubnet", map_public_ip_on_launch=True)
example_cluster = aws.emr.Cluster("exampleCluster",
core_instance_group={},
ec2_attributes={
"subnet_id": example_subnet.id,
},
master_instance_group={
"instance_count": 3,
},
release_label="emr-5.24.1",
termination_protection=True)
```
## Example bootable config
**NOTE:** This configuration demonstrates a minimal configuration needed to
boot an example EMR Cluster. It is not meant to display best practices. Please
use at | |
names or ids query param. This will fail with a 412 Precondition failed if the resource was changed and the current version of the resource doesn't match the value in the query param.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: NfsExportPolicyResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if versions is not None:
if not isinstance(versions, list):
versions = [versions]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'policy' is set
if policy is None:
raise TypeError("Missing the required parameter `policy` when calling `api23_nfs_export_policies_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'versions' in params:
query_params.append(('versions', params['versions']))
collection_formats['versions'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'policy' in params:
body_params = params['policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/nfs-export-policies', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NfsExportPolicyResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_nfs_export_policies_post_with_http_info(
self,
names=None, # type: List[str]
policy=None, # type: models.NfsExportPolicy
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.NfsExportPolicyResponse
"""POST nfs-export-policies
Create a new NFS export policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_nfs_export_policies_post_with_http_info(names, async_req=True)
>>> result = thread.get()
:param list[str] names: A comma-separated list of resource names. (required)
:param NfsExportPolicy policy:
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: NfsExportPolicyResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'names' is set
if names is None:
raise TypeError("Missing the required parameter `names` when calling `api23_nfs_export_policies_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'policy' in params:
body_params = params['policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/nfs-export-policies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NfsExportPolicyResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_nfs_export_policies_rules_delete_with_http_info(
self,
ids=None, # type: List[str]
names=None, # type: List[str]
versions=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""DELETE nfs-export-policies/rules
Delete one or more NFS export policy rules. One of the following is required: `ids` or `names`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_nfs_export_policies_rules_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param list[str] versions: A comma-separated list of versions. This is an optional query param used for concurrency control. The ordering should match the names or ids query param. This will fail with a 412 Precondition failed if the resource was changed and the current version of the resource doesn't match the value in the query param.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if versions is not None:
if not isinstance(versions, list):
versions = [versions]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'versions' in params:
query_params.append(('versions', params['versions']))
collection_formats['versions'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/nfs-export-policies/rules', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_nfs_export_policies_rules_get_with_http_info(
self,
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
policy_ids=None, # type: List[str]
policy_names=None, # type: List[str]
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.NfsExportPolicyRuleGetResponse
"""GET nfs-export-policies/rules
Displays a list of NFS export policy rules. The default sort is by policy name, then index.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_nfs_export_policies_rules_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param str filter: Exclude resources that don't match the specified criteria.
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param list[str] names: A comma-separated list of resource names. If there is not at least | |
100508,
-62995,
166399177,
100507,
-62994,
166464543,
100506,
-62993,
166529923,
100505,
-62992,
166595274,
100504,
-62991,
166660594,
100503,
-62990,
166725974,
100502,
-1,
166791366,
100501,
166988286,
166202243,
-1,
-62987,
166923910,
100500,
-62986,
166989319,
100499,
-62985,
167054689,
100498,
-62984,
167120073,
100497,
-62983,
167185439,
100496,
-62982,
167250819,
100495,
-62981,
167316170,
100494,
-62980,
167381490,
100493,
-62979,
167446870,
100492,
-1,
167512262,
100491,
167709193,
166922954,
-1,
-62976,
167644806,
100490,
-62975,
167710215,
100489,
-62974,
167775585,
100488,
-62973,
167840969,
100487,
-62972,
167906335,
100486,
-62971,
167971715,
100485,
-62970,
168037066,
100484,
-62969,
168102386,
100483,
-62968,
168167766,
100482,
-1,
168233158,
100481,
168430100,
167643634,
-1,
-62965,
168365702,
100480,
-62964,
168431111,
100479,
-62963,
168496481,
100478,
-62962,
168561865,
100477,
-62961,
168627231,
100476,
-62960,
168692611,
100475,
-62959,
168757962,
100474,
-62958,
168823282,
100473,
-62957,
168888662,
100472,
-1,
168954054,
100471,
169151007,
168364374,
-1,
-62954,
169086598,
100470,
-62953,
169152007,
100469,
-62952,
169217377,
100468,
-62951,
169282761,
100467,
-62950,
169348127,
100466,
-62949,
169413507,
100465,
-62948,
169478858,
100464,
-62947,
169544178,
100463,
-62946,
169609558,
100462,
-1,
169674950,
100461,
169934847,
169085126,
-1,
-62943,
169807494,
100460,
-62942,
169872903,
100459,
-62941,
169938273,
100458,
-62940,
170003657,
100457,
-62939,
170069023,
100456,
-62938,
170134403,
100455,
-62937,
170199754,
100454,
-62936,
170265074,
100453,
-62935,
170330454,
100452,
-1,
170395846,
100451,
170592920,
163252422,
-1,
170658358,
170528390,
-1,
-62931,
170593926,
100450,
-62930,
170659335,
100449,
-62929,
170724705,
100448,
-62928,
170790089,
100447,
-62927,
170855455,
100446,
-62926,
170920835,
100445,
-62925,
170986186,
100444,
-62924,
171051506,
100443,
-62923,
171116886,
100442,
-1,
171182278,
100441,
171379265,
170593799,
-1,
-62920,
171314822,
100440,
-62919,
171380231,
100439,
-62918,
171445601,
100438,
-62917,
171510985,
100437,
-62916,
171576351,
100436,
-62915,
171641731,
100435,
-62914,
171707082,
100434,
-62913,
171772402,
100433,
-62912,
171837782,
100432,
-1,
171903174,
100431,
172100172,
171314529,
-1,
-62909,
172035718,
100430,
-62908,
172101127,
100429,
-62907,
172166497,
100428,
-62906,
172231881,
100427,
-62905,
172297247,
100426,
-62904,
172362627,
100425,
-62903,
172427978,
100424,
-62902,
172493298,
100423,
-62901,
172558678,
100422,
-1,
172624070,
100421,
172821079,
172035273,
-1,
-62898,
172756614,
100420,
-62897,
172822023,
100419,
-62896,
172887393,
100418,
-62895,
172952777,
100417,
-62894,
173018143,
100416,
-62893,
173083523,
100415,
-62892,
173148874,
100414,
-62891,
173214194,
100413,
-62890,
173279574,
100412,
-1,
173344966,
100411,
173541986,
172755999,
-1,
-62887,
173477510,
100410,
-62886,
173542919,
100409,
-62885,
173608289,
100408,
-62884,
173673673,
100407,
-62883,
173739039,
100406,
-62882,
173804419,
100405,
-62881,
173869770,
100404,
-62880,
173935090,
100403,
-62879,
174000470,
100402,
-1,
174065862,
100401,
174262893,
173476739,
-1,
-62876,
174198406,
100400,
-62875,
174263815,
100399,
-62874,
174329185,
100398,
-62873,
174394569,
100397,
-62872,
174459935,
100396,
-62871,
174525315,
100395,
-62870,
174590666,
100394,
-62869,
174655986,
100393,
-62868,
174721366,
100392,
-1,
174786758,
100391,
174983800,
174197450,
-1,
-62865,
174919302,
100390,
-62864,
174984711,
100389,
-62863,
175050081,
100388,
-62862,
175115465,
100387,
-62861,
175180831,
100386,
-62860,
175246211,
100385,
-62859,
175311562,
100384,
-62858,
175376882,
100383,
-62857,
175442262,
100382,
-1,
175507654,
100381,
175704707,
174918130,
-1,
-62854,
175640198,
100380,
-62853,
175705607,
100379,
-62852,
175770977,
100378,
-62851,
175836361,
100377,
-62850,
175901727,
100376,
-62849,
175967107,
100375,
-62848,
176032458,
100374,
-62847,
176097778,
100373,
-62846,
176163158,
100372,
-1,
176228550,
100371,
176425614,
175638870,
-1,
-62843,
176361094,
100370,
-62842,
176426503,
100369,
-62841,
176491873,
100368,
-62840,
176557257,
100367,
-62839,
176622623,
100366,
-62838,
176688003,
100365,
-62837,
176753354,
100364,
-62836,
176818674,
100363,
-62835,
176884054,
100362,
-1,
176949446,
100361,
177209343,
176359622,
-1,
-62832,
177081990,
100360,
-62831,
177147399,
100359,
-62830,
177212769,
100358,
-62829,
177278153,
100357,
-62828,
177343519,
100356,
-62827,
177408899,
100355,
-62826,
177474250,
100354,
-62825,
177539570,
100353,
-1,
177604950,
100352,
177864703,
170528097,
-1,
177867428,
177736579,
-1,
-62821,
177802886,
101100,
-62820,
177868295,
101099,
-62819,
177933665,
101098,
-62818,
177999049,
101097,
-62817,
178064415,
101096,
-62816,
178129795,
101095,
-62815,
178195146,
101094,
-62814,
178260466,
101093,
-62813,
178325846,
101092,
-1,
178391238,
101091,
178588335,
177801930,
-1,
-62810,
178523782,
101090,
-62809,
178589191,
101089,
-62808,
178654561,
101088,
-62807,
178719945,
101087,
-62806,
178785311,
101086,
-62805,
178850691,
101085,
-62804,
178916042,
101084,
-62803,
178981362,
101083,
-62802,
179046742,
101082,
-1,
179112134,
101081,
179309242,
178522610,
-1,
-62799,
179244678,
101080,
-62798,
179310087,
101079,
-62797,
179375457,
101078,
-62796,
179440841,
101077,
-62795,
179506207,
101076,
-62794,
179571587,
101075,
-62793,
179636938,
101074,
-62792,
179702258,
101073,
-62791,
179767638,
101072,
-1,
179833030,
101071,
180030149,
179243350,
-1,
-62788,
179965574,
101070,
-62787,
180030983,
101069,
-62786,
180096353,
101068,
-62785,
180161737,
101067,
-62784,
180227103,
101066,
-62783,
180292483,
101065,
-62782,
180357834,
101064,
-62781,
180423154,
101063,
-62780,
180488534,
101062,
-1,
180553926,
101061,
180751056,
179964102,
-1,
-62777,
180686470,
101060,
-62776,
180751879,
101059,
-62775,
180817249,
101058,
-62774,
180882633,
101057,
-62773,
180947999,
101056,
-62772,
181013379,
101055,
-62771,
181078730,
101054,
-62770,
181144050,
101053,
-62769,
181209430,
101052,
-1,
181274822,
101051,
181534719,
180685855,
-1,
-62766,
181406751,
101106,
-62765,
181472131,
101105,
-62764,
181537482,
101104,
-62763,
181602802,
101103,
-62762,
181668182,
101102,
-1,
181733574,
101101,
-1,
126830765,
94176,
-1,
126759133,
127818,
-1,
126685073,
127883,
182127662,
126638489,
-1,
182193173,
182092017,
-1,
182258461,
182147927,
-1,
182323959,
182213463,
-1,
182389483,
182278999,
-1,
182455011,
182325009,
-1,
-62751,
182421655,
983864,
-62750,
182468701,
983861,
-1,
182521617,
983854,
182717157,
182421655,
983857,
-1,
182683799,
983858,
182848231,
182673509,
983862,
-1,
182804581,
983863,
182979305,
182796381,
983855,
-1,
182927453,
983856,
183173119,
182922079,
983859,
-1,
183053151,
983860,
183241455,
182325009,
-1,
-62739,
183208087,
983732,
-62738,
183255133,
983729,
-1,
183308049,
983722,
183503601,
183208087,
983725,
-1,
183470231,
983726,
183634675,
183459941,
983730,
-1,
183591013,
983731,
183765749,
183582813,
983723,
-1,
183713885,
983724,
183959551,
183708511,
983727,
-1,
183839583,
983728,
184027908,
182294189,
-1,
184093436,
183963409,
-1,
-62726,
184060055,
983710,
-62725,
184107101,
983707,
-1,
184160017,
983700,
184355582,
184060055,
983703,
-1,
184322199,
983704,
184486656,
184311909,
983708,
-1,
184442981,
983709,
184617730,
184434781,
983701,
-1,
184565853,
983702,
184811519,
184560479,
983705,
-1,
184691551,
983706,
184879889,
183973801,
-1,
184945417,
184815377,
-1,
-62713,
184912023,
983688,
-62712,
184959069,
983685,
-1,
185011985,
983678,
185207563,
184912023,
983681,
-1,
185174167,
983682,
185338637,
185163877,
983686,
-1,
185294949,
983687,
185469711,
185286749,
983679,
-1,
185417821,
983680,
185663487,
185412447,
983683,
-1,
185543519,
983684,
185731861,
184815377,
-1,
-62701,
185698455,
983754,
-62700,
185745501,
983751,
-1,
185798417,
983744,
185994007,
185698455,
983747,
-1,
185960599,
983748,
186125081,
185950309,
983752,
-1,
186081381,
983753,
186256155,
186073181,
983745,
-1,
186204253,
983746,
186449919,
186198879,
983749,
-1,
186329951,
983750,
186518341,
182220834,
-1,
186583852,
186464967,
-1,
186649379,
186519313,
-1,
-62687,
186615959,
983886,
-62686,
186663005,
983883,
-1,
186715921,
983876,
186911525,
186615959,
983879,
-1,
186878103,
983880,
187042599,
186867813,
983884,
-1,
186998885,
983885,
187173673,
186990685,
983877,
-1,
187121757,
983878,
187304747,
187116383,
983881,
-1,
187247455,
983882,
-1,
187266325,
983932,
187501369,
186546210,
-1,
187566897,
187436817,
-1,
-62673,
187533463,
983897,
-62672,
187580509,
983894,
-1,
187633425,
983887,
187829043,
187533463,
983890,
-1,
187795607,
983891,
187960117,
187785317,
983895,
-1,
187916389,
983896,
188091191,
187908189,
983888,
-1,
188039261,
983889,
188284927,
188033887,
983892,
-1,
188164959,
983893,
188353341,
187436817,
-1,
-62661,
188319895,
983908,
-62660,
188366941,
983905,
-1,
188419857,
983898,
188615487,
188319895,
983901,
-1,
188582039,
983902,
188746561,
188571749,
983906,
-1,
188702821,
983907,
188877635,
188694621,
983899,
-1,
188825693,
983900,
189071359,
188820319,
983903,
-1,
188951391,
983904,
189139820,
186469623,
-1,
189205344,
189091063,
-1,
189270868,
189156599,
-1,
189336396,
189206289,
-1,
-62646,
189302935,
983831,
-62645,
189349981,
983828,
-1,
189402897,
983821,
189598542,
189302935,
983824,
-1,
189565079,
983825,
189729616,
189554789,
983829,
-1,
189685861,
983830,
189860690,
189677661,
983822,
-1,
189808733,
983823,
190054399,
189803359,
983826,
-1,
189934431,
983827,
190122840,
189206289,
-1,
-62634,
190089367,
983842,
-62633,
190136413,
983839,
-1,
190189329,
983832,
190384986,
190089367,
983835,
-1,
190351511,
983836,
190516060,
190341221,
983840,
-1,
190472293,
983841,
190647134,
190464093,
983833,
-1,
190595165,
983834,
190840831,
190589791,
983837,
-1,
190720863,
983838,
190909284,
189140753,
-1,
-62622,
190875799,
983809,
-62621,
190922845,
983806,
-1,
190975761,
983799,
191171430,
190875799,
983802,
-1,
191137943,
983803,
191302504,
191127653,
983807,
-1,
191258725,
983808,
191433578,
191250525,
983800,
-1,
191381597,
983801,
191627263,
191376223,
983804,
-1,
191507295,
983805,
191695750,
189090499,
-1,
191761274,
191659715,
-1,
191826802,
191696657,
983920,
-62608,
191793303,
983931,
-62607,
191840349,
983928,
-1,
191893265,
983921,
192088948,
191793303,
983924,
-1,
192055447,
983925,
192220022,
192045157,
983929,
-1,
192176229,
983930,
192351096,
192168029,
983922,
-1,
192299101,
983923,
192544767,
192293727,
983926,
-1,
192424799,
983927,
192613246,
191696657,
-1,
-62596,
192579735,
983677,
-62595,
192626781,
983674,
-1,
192679697,
983667,
192875392,
192579735,
983670,
-1,
192841879,
983671,
193006466,
192831589,
983675,
-1,
192962661,
983676,
193137540,
192954461,
983668,
-1,
193085533,
983669,
193331199,
193080159,
983672,
-1,
193211231,
983673,
193399712,
191660366,
-1,
193465236,
193364302,
-1,
193530764,
193400593,
-1,
-62582,
193497239,
983721,
-62581,
193544285,
983718,
-1,
193597201,
983711,
193792910,
193497239,
983714,
-1,
193759383,
983715,
193923984,
193749093,
983719,
-1,
193880165,
983720,
194055058,
193871965,
983712,
-1,
194003037,
983713,
194248703,
193997663,
983716,
-1,
194128735,
983717,
194317208,
193400593,
-1,
-62570,
194283671,
983743,
-62569,
| |
# -*- coding: utf-8 -*-
"""Module implementing the abstract Runner."""
import os
import json
from .abstract_runner_utils import float2str
from .abstract_runner_utils import _add_hp_to_argparse
import time
import abc
import argparse
import warnings
from copy import deepcopy
from deepobs import config as global_config
import glob
class Runner(abc.ABC):
"""Abstract base class for all different runners in DeepOBS.
Captures everything that is common to both frameworks and every runner type.
This includes folder creation amd writing of the output to the folder.
Attributes:
_optimizer_class: See argument optimizer_class
_optimizer_name: The name of the optimizer class
_hyperparameter_names: A nested dictionary that lists all hyperparameters of the optimizer,
their type and their default values
Methods:
run: An abstract method that is overwritten by the tensorflow and pytorch
specific subclasses. It performs the actual run on a testproblem.
training: An abstract method that performs the actual training and is overwritten by the subclasses.
create_output_directory: Creates the output folder of the run.
write_output: Writes the output of the run to the output directory.
"""
def __init__(self, optimizer_class, hyperparameter_names):
""" Creates a new Runner instance
Args:
optimizer_class: The optimizer class of the optimizer that is run on \
the testproblems. For PyTorch this must be a subclass of torch.optim.Optimizer. For \
TensorFlow a subclass of tf.train.Optimizer.
hyperparameter_names: A nested dictionary that lists all hyperparameters of the optimizer,\
their type and their default values (if they have any).
Example
-------
>>> optimizer_class = tf.train.MomentumOptimizer
>>> hyperparms = {'lr': {'type': float},
>>> 'momentum': {'type': float, 'default': 0.99},
>>> 'uses_nesterov': {'type': bool, 'default': False}}
>>> runner = StandardRunner(optimizer_class, hyperparms)
"""
self._optimizer_class = optimizer_class
self._optimizer_name = optimizer_class.__name__
self._hyperparameter_names = hyperparameter_names
def run(self,
testproblem=None,
hyperparams=None,
batch_size=None,
num_epochs=None,
random_seed=None,
data_dir=None,
output_dir=None,
weight_decay=None,
no_logs=None,
train_log_interval=None,
print_train_iter=None,
tb_log=None,
tb_log_dir=None,
skip_if_exists=False,
**training_params):
"""Runs a testproblem with the optimizer_class. Has the following tasks:
1. setup testproblem
2. run the training (must be implemented by subclass)
3. merge and write output
Args:
testproblem (str): Name of the testproblem.
hyperparams (dict): The explizit values of the hyperparameters of the optimizer that are used for training
batch_size (int): Mini-batch size for the training data.
num_epochs (int): The number of training epochs.
random_seed (int): The torch random seed.
data_dir (str): The path where the data is stored.
output_dir (str): Path of the folder where the results are written to.
weight_decay (float): Regularization factor for the testproblem.
no_logs (bool): Whether to write the output or not.
train_log_interval (int): Mini-batch interval for logging.
print_train_iter (bool): Whether to print the training progress at each train_log_interval.
tb_log (bool): Whether to use tensorboard logging or not
tb_log_dir (str): The path where to save tensorboard events.
skip_if_exists (bool): Skip training if the output already exists.
training_params (dict): Kwargs for the training method.
Returns:
dict: {<...meta data...>, \
'test_losses' : test_losses, \
'valid_losses': valid_losses \
'train_losses': train_losses, \
'test_accuracies': test_accuracies, \
'valid_accuracies': valid_accuracies \
'train_accuracies': train_accuracies, \
} \
where <...meta data...> stores the run args.
"""
exists, matches = self.run_exists(
testproblem=testproblem,
hyperparams=hyperparams,
batch_size=batch_size,
num_epochs=num_epochs,
random_seed=random_seed,
data_dir=data_dir,
output_dir=output_dir,
weight_decay=weight_decay,
no_logs=no_logs,
train_log_interval=train_log_interval,
print_train_iter=print_train_iter,
tb_log=tb_log,
tb_log_dir=tb_log_dir,
**training_params)
require_run = not (exists and skip_if_exists)
if require_run:
args = self.parse_args(
testproblem,
hyperparams,
batch_size,
num_epochs,
random_seed,
data_dir,
output_dir,
weight_decay,
no_logs,
train_log_interval,
print_train_iter,
tb_log,
tb_log_dir,
training_params,
)
return self._run(**args)
else:
print("Found output file(s): {}\nSkipping run.".format(matches))
def _run(self,
testproblem=None,
hyperparams=None,
batch_size=None,
num_epochs=None,
random_seed=None,
data_dir=None,
output_dir=None,
weight_decay=None,
no_logs=None,
train_log_interval=None,
print_train_iter=None,
tb_log=None,
tb_log_dir=None,
**training_params):
# Creates a backup copy of the initial parameters. Users might change the dicts during training.
hyperparams_before_training = deepcopy(hyperparams)
training_params_before_training = deepcopy(training_params)
batch_size = self._use_default_batch_size_if_missing(
testproblem, batch_size)
num_epochs = self._use_default_num_epochs_if_missing(
testproblem, num_epochs)
if data_dir is not None:
global_config.set_data_dir(data_dir)
run_directory, file_name = self.generate_output_directory_name(
testproblem, batch_size, num_epochs, weight_decay, random_seed,
output_dir, hyperparams, **training_params)
if tb_log:
if tb_log_dir == 'none':
print(
'Tensorboard logging: No tb_log_dir specified, using settings folder {0:s} as default.'
.format(run_directory))
os.makedirs(run_directory, exist_ok=True)
tb_log_dir = run_directory
tproblem = self.create_testproblem(testproblem, batch_size,
weight_decay, random_seed)
output = self.training(tproblem, hyperparams, num_epochs,
print_train_iter, train_log_interval, tb_log,
tb_log_dir, **training_params)
output = self._post_process_output(output, testproblem, batch_size,
num_epochs, random_seed,
weight_decay,
hyperparams_before_training,
**training_params_before_training)
if not no_logs:
os.makedirs(run_directory, exist_ok=True)
self.write_output(output, run_directory, file_name)
return output
def run_exists(self,
testproblem=None,
hyperparams=None,
batch_size=None,
num_epochs=None,
random_seed=None,
data_dir=None,
output_dir=None,
weight_decay=None,
no_logs=None,
train_log_interval=None,
print_train_iter=None,
tb_log=None,
tb_log_dir=None,
**training_params):
"""Return whether output file for this run already exists.
Args:
See `run` method.
Returns:
bool, list(str): The first parameter is `True` if the `.json` \
output file already exists, else `False`. The list contains \
the paths to the files that match the run.
"""
args = self.parse_args(
testproblem,
hyperparams,
batch_size,
num_epochs,
random_seed,
data_dir,
output_dir,
weight_decay,
no_logs,
train_log_interval,
print_train_iter,
tb_log,
tb_log_dir,
training_params,
)
return self._run_exists(**args)
def _run_exists(self,
testproblem=None,
hyperparams=None,
batch_size=None,
num_epochs=None,
random_seed=None,
data_dir=None,
output_dir=None,
weight_decay=None,
no_logs=None,
train_log_interval=None,
print_train_iter=None,
tb_log=None,
tb_log_dir=None,
**training_params):
batch_size = self._use_default_batch_size_if_missing(
testproblem, batch_size)
num_epochs = self._use_default_num_epochs_if_missing(
testproblem, num_epochs)
run_directory, _ = self.generate_output_directory_name(
testproblem, batch_size, num_epochs, weight_decay, random_seed,
output_dir, hyperparams, **training_params)
file_regex = "{}*.json".format(self._filename_no_date(random_seed))
pattern = os.path.join(run_directory, file_regex)
matches = glob.glob(pattern)
exists = bool(matches)
return exists, matches
def _use_default_batch_size_if_missing(self, testproblem, batch_size):
fall_back_to_default = (batch_size is None)
if fall_back_to_default:
batch_size = self._use_default(testproblem, 'batch_size')
return batch_size
def _use_default_num_epochs_if_missing(self, testproblem, num_epochs):
fall_back_to_default = (num_epochs is None)
if fall_back_to_default:
num_epochs = self._use_default(testproblem, 'num_epochs')
return num_epochs
@staticmethod
def _use_default(testproblem, key):
return global_config.get_testproblem_default_setting(testproblem)[key]
@abc.abstractmethod
def training(self, tproblem, hyperparams, num_epochs, print_train_iter,
train_log_interval, tb_log, tb_log_dir, **training_params):
"""Performs the training and stores the metrices.
Args:
tproblem (deepobs.[tensorflow/pytorch].testproblems.testproblem): The testproblem instance to train on.
hyperparams (dict): The optimizer hyperparameters to use for the training.
num_epochs (int): The number of training epochs.
print_train_iter (bool): Whether to print the training progress at every train_log_interval
train_log_interval (int): Mini-batch interval for logging.
tb_log (bool): Whether to use tensorboard logging or not
tb_log_dir (str): The path where to save tensorboard events.
**training_params (dict): Kwargs for additional training parameters that are implemented by subclass.
Returns:
dict: The logged metrices. Is of the form: \
{'test_losses' : [...], \
'valid_losses': [...], \
'train_losses': [...], \
'test_accuracies': [...], \
'valid_accuracies': [...], \
'train_accuracies': [...] \
} \
where the metrices values are lists that were filled during training.
"""
return
@staticmethod
@abc.abstractmethod
def evaluate(*args, **kwargs):
pass
@staticmethod
@abc.abstractmethod
def create_testproblem(*args, **kwargs):
pass
def _add_training_params_to_argparse(self, parser, args, training_params):
"""Overwrite this method to specify how your
runner should read in additional training_parameters and to add them to argparse.
Args:
parser (argparse.ArgumentParser): The argument parser object.
args (dict): The args that are parsed as locals to the run method.
training_params (dict): Training parameters that are to read in.
"""
pass
def _add_hyperparams_to_argparse(self, parser, args, hyperparams):
"""Overwrite this method to specify how your
runner should read in optimizer hyper_parameters and to add them to argparse.
Args:
parser (argparse.ArgumentParser): The argument parser object.
args (dict): The args that are parsed as locals to the run method.
hyperparams (dict): Hyperparameters that are to read in.
"""
if hyperparams is None: # if no hyperparams dict is passed to run()
for hp_name, hp_specification in self._hyperparameter_names.items(
):
_add_hp_to_argparse(parser, self._optimizer_name,
hp_specification, hp_name)
else: # if there is one, fill the missing params from command line
for hp_name, hp_specification in self._hyperparameter_names.items(
):
if hp_name in hyperparams:
args[hp_name] = hyperparams[hp_name]
else:
_add_hp_to_argparse(parser, self._optimizer_name,
hp_specification, hp_name)
def _add_training_params_to_output_dir_name(self, training_params,
run_folder_name):
"""Overwrite this method to specify how your
runner should format additional training_parameters in the run folder name.
Args:
training_params (dict): The training parameters.
run_folder_name (str): The current name of the run folder that the training parameters are added to.
Returns:
str: The new run folder name.
"""
for tp_name, tp_value in sorted(training_params.items()):
if tp_value is not None:
run_folder_name += "__{0:s}".format(tp_name)
run_folder_name += "__{0:s}".format(
float2str(tp_value) if isinstance(tp_value, float
) else str(tp_value))
return run_folder_name
def _add_hyperparams_to_output_dir_name(self, optimizer_hyperparams,
run_folder_name):
"""Overwrite this method to specify how your
runner should format optimizer hyper_parameters in the run folder name.
Args:
optimizer_hyperparams (dict): The optimizer hyperparameters.
run_folder_name (str): The current name of the run folder that the hyperparameters are added to.
Returns:
str: The new run folder name.
"""
for hp_name, hp_value in sorted(optimizer_hyperparams.items()):
run_folder_name += "__{0:s}".format(hp_name)
run_folder_name += "__{0:s}".format(
float2str(hp_value) if isinstance(hp_value, float
) else str(hp_value))
return run_folder_name
def parse_args(self, testproblem, hyperparams, batch_size, num_epochs,
random_seed, data_dir, output_dir, weight_decay, | |
<reponame>schemacs/supervisor
import unittest
from supervisor.tests.base import DummySupervisor
from supervisor.tests.base import DummyRequest
from supervisor.tests.base import DummySupervisorRPCNamespace
from supervisor.compat import xmlrpclib
from supervisor.compat import httplib
class GetFaultDescriptionTests(unittest.TestCase):
def test_returns_description_for_known_fault(self):
from supervisor import xmlrpc
desc = xmlrpc.getFaultDescription(xmlrpc.Faults.SHUTDOWN_STATE)
self.assertEqual(desc, 'SHUTDOWN_STATE')
def test_returns_unknown_for_unknown_fault(self):
from supervisor import xmlrpc
desc = xmlrpc.getFaultDescription(999999)
self.assertEqual(desc, 'UNKNOWN')
class RPCErrorTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.xmlrpc import RPCError
return RPCError
def _makeOne(self, code, extra=None):
return self._getTargetClass()(code, extra)
def test_sets_text_with_fault_name_only(self):
from supervisor import xmlrpc
e = self._makeOne(xmlrpc.Faults.FAILED)
self.assertEqual(e.text, 'FAILED')
def test_sets_text_with_fault_name_and_extra(self):
from supervisor import xmlrpc
e = self._makeOne(xmlrpc.Faults.FAILED, 'oops')
self.assertEqual(e.text, 'FAILED: oops')
class XMLRPCMarshallingTests(unittest.TestCase):
def test_xmlrpc_marshal(self):
from supervisor import xmlrpc
data = xmlrpc.xmlrpc_marshal(1)
self.assertEqual(data, xmlrpclib.dumps((1,), methodresponse=True))
fault = xmlrpclib.Fault(1, 'foo')
data = xmlrpc.xmlrpc_marshal(fault)
self.assertEqual(data, xmlrpclib.dumps(fault))
class XMLRPCHandlerTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.xmlrpc import supervisor_xmlrpc_handler
return supervisor_xmlrpc_handler
def _makeOne(self, supervisord, subinterfaces):
return self._getTargetClass()(supervisord, subinterfaces)
def test_ctor(self):
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
self.assertEqual(handler.supervisord, supervisor)
from supervisor.xmlrpc import RootRPCInterface
self.assertEqual(handler.rpcinterface.__class__, RootRPCInterface)
def test_match(self):
class DummyRequest2:
def __init__(self, uri):
self.uri = uri
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
self.assertEqual(handler.match(DummyRequest2('/RPC2')), True)
self.assertEqual(handler.match(DummyRequest2('/nope')), False)
def test_continue_request_nosuchmethod(self):
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
data = xmlrpclib.dumps(('a', 'b'), 'supervisor.noSuchMethod')
request = DummyRequest('/what/ever', None, None, None)
handler.continue_request(data, request)
logdata = supervisor.options.logger.data
self.assertEqual(len(logdata), 2)
self.assertEqual(logdata[-2],
'XML-RPC method called: supervisor.noSuchMethod()')
self.assertEqual(logdata[-1],
('XML-RPC method supervisor.noSuchMethod() returned fault: '
'[1] UNKNOWN_METHOD'))
self.assertEqual(len(request.producers), 1)
xml_response = request.producers[0]
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, xml_response)
def test_continue_request_methodsuccess(self):
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
data = xmlrpclib.dumps((), 'supervisor.getAPIVersion')
request = DummyRequest('/what/ever', None, None, None)
handler.continue_request(data, request)
logdata = supervisor.options.logger.data
self.assertEqual(len(logdata), 2)
self.assertEqual(logdata[-2],
'XML-RPC method called: supervisor.getAPIVersion()')
self.assertEqual(logdata[-1],
'XML-RPC method supervisor.getAPIVersion() returned successfully')
self.assertEqual(len(request.producers), 1)
xml_response = request.producers[0]
response = xmlrpclib.loads(xml_response)
from supervisor.rpcinterface import API_VERSION
self.assertEqual(response[0][0], API_VERSION)
self.assertEqual(request._done, True)
self.assertEqual(request.headers['Content-Type'], 'text/xml')
self.assertEqual(request.headers['Content-Length'], len(xml_response))
def test_continue_request_no_params_in_request(self):
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
data = '<?xml version="1.0" encoding="UTF-8"?>' \
'<methodCall>' \
'<methodName>supervisor.getAPIVersion</methodName>' \
'</methodCall>'
request = DummyRequest('/what/ever', None, None, None)
handler.continue_request(data, request)
logdata = supervisor.options.logger.data
self.assertEqual(len(logdata), 2)
self.assertEqual(logdata[-2],
'XML-RPC method called: supervisor.getAPIVersion()')
self.assertEqual(logdata[-1],
'XML-RPC method supervisor.getAPIVersion() returned successfully')
self.assertEqual(len(request.producers), 1)
xml_response = request.producers[0]
response = xmlrpclib.loads(xml_response)
from supervisor.rpcinterface import API_VERSION
self.assertEqual(response[0][0], API_VERSION)
self.assertEqual(request._done, True)
self.assertEqual(request.headers['Content-Type'], 'text/xml')
self.assertEqual(request.headers['Content-Length'], len(xml_response))
def test_continue_request_400_if_method_name_is_empty(self):
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
data = '<?xml version="1.0" encoding="UTF-8"?>' \
'<methodCall><methodName></methodName></methodCall>'
request = DummyRequest('/what/ever', None, None, None)
handler.continue_request(data, request)
logdata = supervisor.options.logger.data
self.assertEqual(len(logdata), 1)
self.assertEqual(logdata[-1],
'XML-RPC request received with no method name')
self.assertEqual(len(request.producers), 0)
self.assertEqual(request._error, 400)
def test_continue_request_500(self):
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
data = xmlrpclib.dumps((), 'supervisor.raiseError')
request = DummyRequest('/what/ever', None, None, None)
handler.continue_request(data, request)
logdata = supervisor.options.logger.data
self.assertEqual(len(logdata), 2)
self.assertEqual(logdata[-2],
'XML-RPC method called: supervisor.raiseError()')
self.assertTrue(logdata[-1].startswith('Traceback'))
self.assertTrue(logdata[-1].endswith('ValueError: error\n'))
self.assertEqual(len(request.producers), 0)
self.assertEqual(request._error, 500)
def test_continue_request_value_is_function(self):
class DummyRPCNamespace(object):
def foo(self):
def inner(self):
return 1
inner.delay = .05
return inner
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace()),
('ns1', DummyRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
data = xmlrpclib.dumps((), 'ns1.foo')
request = DummyRequest('/what/ever', None, None, None)
handler.continue_request(data, request)
logdata = supervisor.options.logger.data
self.assertEqual(len(logdata), 2)
self.assertEqual(logdata[-2],
'XML-RPC method called: ns1.foo()')
self.assertEqual(logdata[-1],
'XML-RPC method ns1.foo() returned successfully')
self.assertEqual(len(request.producers), 0)
self.assertEqual(request._done, False)
def test_iterparse_loads_methodcall(self):
s = """<?xml version="1.0"?>
<methodCall>
<methodName>examples.getStateName</methodName>
<params>
<param>
<value><i4>41</i4></value>
</param>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string></string></value>
</param>
<param>
<!-- xml-rpc spec allows strings without <string> tag -->
<value>bar</value>
</param>
<param>
<value></value>
</param>
<param>
<value><boolean>1</boolean></value>
</param>
<param>
<value><double>-12.214</double></value>
</param>
<param>
<value>
<dateTime.iso8601>19980717T14:08:55</dateTime.iso8601>
</value>
</param>
<param>
<value><base64>eW91IGNhbid0IHJlYWQgdGhpcyE=</base64></value>
</param>
<param>
<struct>
<member><name>j</name><value><i4>5</i4></value></member>
<member><name>k</name><value>abc</value></member>
</struct>
</param>
<param>
<array>
<data>
<value><i4>12</i4></value>
<value><string>abc</string></value>
<value>def</value>
<value><i4>34</i4></value>
</data>
</array>
</param>
<param>
<struct>
<member>
<name>k</name>
<value><array><data>
<value><i4>1</i4></value>
<struct></struct>
</data></array></value>
</member>
</struct>
</param>
</params>
</methodCall>
"""
supervisor = DummySupervisor()
subinterfaces = [('supervisor', DummySupervisorRPCNamespace())]
handler = self._makeOne(supervisor, subinterfaces)
result = handler.loads(s)
params, method = result
import datetime
self.assertEqual(method, 'examples.getStateName')
self.assertEqual(params[0], 41)
self.assertEqual(params[1], 'foo')
self.assertEqual(params[2], '')
self.assertEqual(params[3], 'bar')
self.assertEqual(params[4], '')
self.assertEqual(params[5], True)
self.assertEqual(params[6], -12.214)
self.assertEqual(params[7], datetime.datetime(1998, 7, 17, 14, 8, 55))
self.assertEqual(params[8], "you can't read this!")
self.assertEqual(params[9], {'j': 5, 'k': 'abc'})
self.assertEqual(params[10], [12, 'abc', 'def', 34])
self.assertEqual(params[11], {'k': [1, {}]})
class TraverseTests(unittest.TestCase):
def test_underscore(self):
from supervisor import xmlrpc
self.assertRaises(xmlrpc.RPCError, xmlrpc.traverse, None, '_', None)
def test_notfound(self):
from supervisor import xmlrpc
self.assertRaises(xmlrpc.RPCError, xmlrpc.traverse, None, 'foo', None)
def test_badparams(self):
from supervisor import xmlrpc
self.assertRaises(xmlrpc.RPCError, xmlrpc.traverse, self,
'test_badparams', (1, 2, 3))
def test_success(self):
from supervisor import xmlrpc
L = []
class Dummy:
def foo(self, a):
L.append(a)
dummy = Dummy()
xmlrpc.traverse(dummy, 'foo', [1])
self.assertEqual(L, [1])
class SupervisorTransportTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.xmlrpc import SupervisorTransport
return SupervisorTransport
def _makeOne(self, *arg, **kw):
return self._getTargetClass()(*arg, **kw)
def test_ctor_unix(self):
from supervisor import xmlrpc
transport = self._makeOne('user', 'pass', 'unix:///foo/bar')
conn = transport._get_connection()
self.assertTrue(isinstance(conn, xmlrpc.UnixStreamHTTPConnection))
self.assertEqual(conn.host, 'localhost')
self.assertEqual(conn.socketfile, '/foo/bar')
def test_ctor_unknown(self):
self.assertRaises(ValueError,
self._makeOne, 'user', 'pass', 'unknown:///foo/bar'
)
def test__get_connection_http_9001(self):
transport = self._makeOne('user', 'pass', 'http://127.0.0.1:9001/')
conn = transport._get_connection()
self.assertTrue(isinstance(conn, httplib.HTTPConnection))
self.assertEqual(conn.host, '127.0.0.1')
self.assertEqual(conn.port, 9001)
def test__get_connection_http_80(self):
transport = self._makeOne('user', 'pass', 'http://127.0.0.1/')
conn = transport._get_connection()
self.assertTrue(isinstance(conn, httplib.HTTPConnection))
self.assertEqual(conn.host, '127.0.0.1')
self.assertEqual(conn.port, 80)
def test_request_non_200_response(self):
transport = self._makeOne('user', 'pass', 'http://127.0.0.1/')
dummy_conn = DummyConnection(400, '')
def getconn():
return dummy_conn
transport._get_connection = getconn
self.assertRaises(xmlrpclib.ProtocolError,
transport.request, 'localhost', '/', '')
self.assertEqual(transport.connection, None)
self.assertEqual(dummy_conn.closed, True)
def test_request_400_response(self):
transport = self._makeOne('user', 'pass', 'http://127.0.0.1/')
dummy_conn = DummyConnection(400, '')
def getconn():
return dummy_conn
transport._get_connection = getconn
self.assertRaises(xmlrpclib.ProtocolError,
transport.request, 'localhost', '/', '')
self.assertEqual(transport.connection, None)
self.assertEqual(dummy_conn.closed, True)
self.assertEqual(dummy_conn.requestargs[0], 'POST')
self.assertEqual(dummy_conn.requestargs[1], '/')
self.assertEqual(dummy_conn.requestargs[2], '')
self.assertEqual(dummy_conn.requestargs[3]['Content-Length'], '0')
self.assertEqual(dummy_conn.requestargs[3]['Content-Type'], 'text/xml')
self.assertEqual(dummy_conn.requestargs[3]['Authorization'],
'Basic dXNlcjpwYXNz')
self.assertEqual(dummy_conn.requestargs[3]['Accept'], 'text/xml')
def test_request_200_response(self):
transport = self._makeOne('user', 'pass', 'http://127.0.0.1/')
response = """<?xml version="1.0"?>
<methodResponse>
<params>
<param>
<value><string>South Dakota</string></value>
</param>
</params>
</methodResponse>"""
dummy_conn = DummyConnection(200, response)
def getconn():
return dummy_conn
transport._get_connection = getconn
result = transport.request('localhost', '/', '')
self.assertEqual(transport.connection, dummy_conn)
self.assertEqual(dummy_conn.closed, False)
self.assertEqual(dummy_conn.requestargs[0], 'POST')
self.assertEqual(dummy_conn.requestargs[1], '/')
self.assertEqual(dummy_conn.requestargs[2], '')
self.assertEqual(dummy_conn.requestargs[3]['Content-Length'], '0')
self.assertEqual(dummy_conn.requestargs[3]['Content-Type'], 'text/xml')
self.assertEqual(dummy_conn.requestargs[3]['Authorization'],
'Basic dXNlcjpwYXNz')
self.assertEqual(dummy_conn.requestargs[3]['Accept'], 'text/xml')
self.assertEqual(result, ('South Dakota',))
class TestDeferredXMLRPCResponse(unittest.TestCase):
def _getTargetClass(self):
from supervisor.xmlrpc import DeferredXMLRPCResponse
return DeferredXMLRPCResponse
def _makeOne(self, request=None, callback=None):
if request is None:
request = DummyRequest(None, None, None, None, None)
if callback is None:
callback = Dummy()
callback.delay = 1
return self._getTargetClass()(request, callback)
def test_ctor(self):
callback = Dummy()
callback.delay = 1
inst = self._makeOne(request='request', callback=callback)
self.assertEqual(inst.callback, callback)
self.assertEqual(inst.delay, 1.0)
self.assertEqual(inst.request, 'request')
self.assertEqual(inst.finished, False)
def test_more_finished(self):
inst = self._makeOne()
inst.finished = True
result = inst.more()
self.assertEqual(result, '')
def test_more_callback_returns_not_done_yet(self):
from supervisor.http import NOT_DONE_YET
def callback():
return NOT_DONE_YET
callback.delay = 1
inst = self._makeOne(callback=callback)
self.assertEqual(inst.more(), NOT_DONE_YET)
def test_more_callback_raises_RPCError(self):
from supervisor.xmlrpc import RPCError, Faults
def callback():
raise RPCError(Faults.UNKNOWN_METHOD)
callback.delay = 1
inst = self._makeOne(callback=callback)
self.assertEqual(inst.more(), None)
self.assertEqual(len(inst.request.producers), 1)
self.assertTrue('UNKNOWN_METHOD' in inst.request.producers[0])
self.assertTrue(inst.finished)
def test_more_callback_returns_value(self):
def callback():
return 'abc'
callback.delay = 1
inst = self._makeOne(callback=callback)
self.assertEqual(inst.more(), None)
self.assertEqual(len(inst.request.producers), 1)
self.assertTrue('abc' in inst.request.producers[0])
self.assertTrue(inst.finished)
def test_more_callback_raises_unexpected_exception(self):
def callback():
raise ValueError('foo')
callback.delay = 1
inst = self._makeOne(callback=callback)
inst.traceback = Dummy()
called = []
inst.traceback.print_exc = lambda: called.append(True)
self.assertEqual(inst.more(), None)
self.assertEqual(inst.request._error, 500)
self.assertTrue(inst.finished)
self.assertTrue(called)
def test_getresponse_http_10_with_keepalive(self):
inst = self._makeOne()
inst.request.version = '1.0'
inst.request.header.append('Connection: keep-alive')
inst.getresponse('abc')
self.assertEqual(len(inst.request.producers), 1)
self.assertEqual(inst.request.headers['Connection'], 'Keep-Alive')
def test_getresponse_http_10_no_keepalive(self):
inst = self._makeOne()
inst.request.version = '1.0'
inst.getresponse('abc')
self.assertEqual(len(inst.request.producers), 1)
self.assertEqual(inst.request.headers['Connection'], 'close')
def test_getresponse_http_11_without_close(self):
inst = self._makeOne()
inst.request.version = '1.1'
inst.getresponse('abc')
self.assertEqual(len(inst.request.producers), 1)
self.assertTrue('Connection' not in inst.request.headers)
def test_getresponse_http_11_with_close(self):
inst = self._makeOne()
inst.request.header.append('Connection: close')
inst.request.version = '1.1'
inst.getresponse('abc')
self.assertEqual(len(inst.request.producers), 1)
self.assertEqual(inst.request.headers['Connection'], 'close')
def test_getresponse_http_unknown(self):
inst = self._makeOne()
inst.request.version = None
inst.getresponse('abc')
self.assertEqual(len(inst.request.producers), 1)
self.assertEqual(inst.request.headers['Connection'], 'close')
class TestSystemNamespaceRPCInterface(unittest.TestCase):
def _makeOne(self, namespaces=()):
from supervisor.xmlrpc import SystemNamespaceRPCInterface
return SystemNamespaceRPCInterface(namespaces)
def test_listMethods_gardenpath(self):
inst = self._makeOne()
result = inst.listMethods()
self.assertEqual(
result,
['system.listMethods',
'system.methodHelp',
'system.methodSignature',
'system.multicall',
]
)
def test_listMethods_omits_underscore_attrs(self):
class DummyNamespace(object):
def foo(self): pass
def _bar(self): pass
ns1 = DummyNamespace()
inst = self._makeOne([('ns1', ns1)])
result = inst.listMethods()
self.assertEqual(
result,
['ns1.foo',
'system.listMethods',
'system.methodHelp',
'system.methodSignature',
'system.multicall'
]
)
def test_methodHelp_known_method(self):
inst = self._makeOne()
result = inst.methodHelp('system.listMethods')
self.assertTrue('array' in result)
def test_methodHelp_unknown_method(self):
from supervisor.xmlrpc import RPCError
inst = self._makeOne()
self.assertRaises(RPCError, inst.methodHelp, 'wont.be.found')
def test_methodSignature_known_method(self):
inst = self._makeOne()
result = inst.methodSignature('system.methodSignature')
self.assertEqual(result, ['array', 'string'])
def test_methodSignature_unknown_method(self):
from supervisor.xmlrpc import RPCError
inst = self._makeOne()
self.assertRaises(RPCError, inst.methodSignature, 'wont.be.found')
def test_methodSignature_with_bad_sig(self):
from supervisor.xmlrpc import RPCError
class DummyNamespace(object):
def foo(self):
""" @param string name The thing"""
ns1 = DummyNamespace()
inst = self._makeOne([('ns1', ns1)])
self.assertRaises(RPCError, inst.methodSignature, 'ns1.foo')
def test_multicall_recursion_forbidden(self):
inst = self._makeOne()
call = {'methodName':'system.multicall'}
multiproduce = inst.multicall([call])
result = multiproduce()
self.assertEqual(
result,
[{'faultCode': 2, 'faultString': 'INCORRECT_PARAMETERS'}]
)
def test_multicall_other_exception(self):
inst = self._makeOne()
call = | |
0, 1, 2, 2, 0, 3, 2, 0, 0]
divinatory 1.6 1.42829 [4, 1, 0, 0, 1, 3, 3, 0, 1, 3]
divine 2.6 0.8 [3, 3, 3, 2, 1, 2, 3, 4, 2, 3]
divined 0.8 1.16619 [1, 0, 3, 0, 0, 1, 0, 3, 0, 0]
divinely 2.9 0.7 [3, 2, 3, 3, 2, 4, 3, 2, 4, 3]
diviner 0.3 0.9 [0, 0, 3, 0, 0, 0, 0, 0, 0, 0]
diviners 1.2 1.16619 [0, 1, 0, 2, 2, 0, 3, 1, 3, 0]
divines 0.8 1.249 [0, 0, 3, 0, 0, 0, 3, 2, 0, 0]
divinest 2.7 0.78102 [3, 4, 2, 4, 2, 2, 2, 3, 2, 3]
diving 0.3 0.45826 [1, 0, 0, 0, 0, 1, 0, 1, 0, 0]
divining 0.9 1.37477 [0, -1, 2, 0, 1, 0, 2, 4, 1, 0]
divinise 0.5 1.36015 [0, 2, 0, 0, 0, 0, 0, -2, 2, 3]
divinities 1.8 1.46969 [1, 3, 3, 4, 0, 0, 1, 0, 3, 3]
divinity 2.7 1.00499 [4, 4, 2, 3, 3, 1, 2, 4, 2, 2]
divinize 2.3 1.00499 [4, 2, 2, 3, 4, 1, 1, 2, 2, 2]
dizzy -0.9 0.3 [-1, -1, -1, -1, -1, -1, -1, -1, 0, -1]
dodging -0.4 0.8 [-1, -1, 0, 1, 0, -1, 0, 0, -2, 0]
dodgy -0.9 0.9434 [-1, -1, -1, -3, -1, 1, -1, -1, -1, 0]
dolorous -2.2 0.6 [-2, -2, -2, -3, -3, -3, -2, -2, -1, -2]
dominance 0.8 0.87178 [2, 0, 0, 2, 1, 0, 0, 1, 2, 0]
dominances -0.1 0.9434 [-1, 0, 1, 1, 0, -1, 0, 1, 0, -2]
dominantly 0.2 1.16619 [-2, 0, 0, -1, 2, 1, 2, 0, 0, 0]
dominants 0.2 1.16619 [0, 2, -1, 0, -1, -1, -1, 1, 1, 2]
dominate -0.5 0.92195 [0, -1, 1, -1, 1, -1, -2, 0, -1, -1]
dominates 0.2 1.249 [1, 0, -2, -1, 1, -1, 2, 0, 0, 2]
dominating -1.2 1.98997 [-4, -1, -4, -1, -3, -1, -1, 2, 2, -1]
domination -0.2 0.9798 [0, 1, 0, -1, -1, -1, 0, 2, -1, -1]
dominations -0.3 0.45826 [0, 0, 0, 0, 0, -1, 0, -1, -1, 0]
dominative -0.7 1.18743 [-1, -1, -2, -2, -1, -1, -1, 2, 1, -1]
dominators -0.4 1.8 [-1, -2, -2, -2, 0, 2, 2, -3, 2, 0]
dominatrices -0.2 1.6 [-3, 0, 2, 0, -2, -2, 0, 1, 2, 0]
dominatrix -0.5 0.92195 [0, 0, -1, 0, 0, 1, 0, -1, -2, -2]
dominatrixes 0.6 1.35647 [0, 4, 0, -1, 0, 2, 1, 0, 0, 0]
doom -1.7 1.26886 [-2, -1, -1, -4, -2, -2, 1, -3, -1, -2]
doomed -3.2 0.74833 [-3, -3, -4, -4, -4, -2, -4, -3, -3, -2]
doomful -2.1 0.7 [-3, -2, -3, -1, -2, -3, -2, -1, -2, -2]
dooming -2.8 0.4 [-2, -3, -2, -3, -3, -3, -3, -3, -3, -3]
dooms -1.1 1.57797 [1, -3, -1, -3, -2, -1, -3, 1, 1, -1]
doomsayer -0.7 1.41774 [2, -1, -2, -1, 1, -2, -2, -1, 1, -2]
doomsayers -1.7 0.78102 [-1, -2, -3, 0, -2, -2, -2, -1, -2, -2]
doomsaying -1.5 1.28452 [-3, -2, -2, 0, 1, 0, -3, -2, -2, -2]
doomsayings -1.5 0.92195 [-2, -1, -1, -2, -2, 0, 0, -2, -3, -2]
doomsday -2.8 1.249 [-3, -1, -3, -4, -3, -4, 0, -4, -3, -3]
doomsdayer -2.2 1.249 [-3, -1, -4, -3, -4, -3, -1, -1, -1, -1]
doomsdays -2.4 1.85472 [-3, -2, -4, 1, -4, -3, -2, -4, 1, -4]
doomster -2.2 0.87178 [-2, -1, -2, -3, -1, -3, -1, -3, -3, -3]
doomsters -1.6 0.8 [-3, -1, -2, -2, 0, -2, -2, -1, -1, -2]
doomy -1.1 1.37477 [2, -2, -1, -2, -2, -2, -2, 1, -1, -2]
dork -1.4 0.66332 [-1, -2, -2, -1, -1, -1, -3, -1, -1, -1]
dorkier -1.1 0.53852 [-1, -1, -1, -1, -2, 0, -1, -2, -1, -1]
dorkiest -1.2 0.74833 [-1, -2, -1, -3, -1, 0, -1, -1, -1, -1]
dorks -0.5 0.67082 [-1, 1, -1, -1, -1, -1, 0, 0, -1, 0]
dorky -1.1 1.04403 [-1, 0, -1, 1, -1, -1, -3, -2, -2, -1]
doubt -1.5 0.5 [-1, -1, -2, -2, -1, -1, -2, -1, -2, -2]
doubtable -1.5 0.5 [-1, -1, -2, -1, -2, -1, -2, -2, -1, -2]
doubted -1.1 1.22066 [-1, -2, -2, 2, -1, -1, -2, -2, -2, 0]
doubter -1.6 0.91652 [-1, -3, -2, -1, -1, -1, -2, -2, -3, 0]
doubters -1.3 0.45826 [-1, -1, -1, -1, -1, -2, -1, -2, -2, -1]
doubtful -1.4 0.4899 [-1, -1, -2, -1, -2, -2, -1, -1, -2, -1]
doubtfully -1.2 0.4 [-1, -1, -1, -1, -1, -1, -2, -1, -1, -2]
doubtfulness -1.2 0.4 [-2, -1, -1, -1, -1, -1, -1, -1, -1, -2]
doubting -1.4 0.4899 [-1, -1, -1, -2, -2, -1, -1, -1, -2, -2]
doubtingly -1.4 0.4899 [-2, -2, -1, -1, -1, -1, -1, -2, -2, -1]
doubtless 0.9 1.51327 [2, 2, 1, 2, -2, 2, -2, 1, 1, 2]
doubtlessly 1.2 0.9798 [2, 1, 1, 2, 0, -1, 2, 1, 2, 2]
doubtlessness 0.8 0.9798 [2, 1, 2, 0, 0, 0, 2, -1, 1, 1]
doubts -1.2 0.6 [-2, -1, -1, -1, -2, -2, -1, 0, -1, -1]
douche -1.5 1.68819 [-3, -2, -3, 1, 1, -2, -3, -2, 1, -3]
douchebag -3.0 0.44721 [-3, -3, -3, -3, -3, -3, -2, -3, -4, -3]
downcast -1.8 0.74833 [-1, -1, -1, -2, -2, -2, -1, -3, -3, -2]
downhearted -2.3 0.78102 [-1, -2, -2, -4, -2, -2, -2, -3, -3, -2]
downside -1.0 0.7746 [-1, -1, -1, -1, -1, -1, -2, 1, -2, -1]
drag -0.9 0.83066 [-1, -2, -1, -1, -2, -1, -1, 1, 0, -1]
dragged -0.2 1.07703 [-2, -1, 0, 0, -1, 0, 0, 1, 2, -1]
drags -0.7 0.64031 [0, -1, 0, -1, -1, -2, -1, 0, 0, -1]
drained -1.5 0.5 [-1, -1, -2, -2, -1, -2, -1, -2, -1, -2]
dread -2.0 0.63246 [-2, -3, -2, -2, -2, -2, -3, -1, -1, -2]
dreaded -2.7 0.64031 [-2, -3, -3, -3, -4, -3, -2, -2, -2, -3]
dreadful -1.9 1.86815 [-4, -2, -2, 2, -1, -4, -1, 0, -3, -4]
dreadfully -2.7 1.26886 [-4, -4, -3, -4, -3, -1, -2, -1, -1, -4]
dreadfulness -3.2 0.87178 [-3, -4, -2, -3, -4, -4, -2, -2, -4, -4]
dreadfuls -2.4 1.2 [-4, -3, -3, -2, -3, -2, -4, 0, -1, -2]
dreading -2.4 0.8 [-3, -2, -2, -2, -2, -2, -3, -4, -3, -1]
dreadlock -0.4 0.66332 [0, 0, 0, 0, 0, -1, -2, 0, -1, 0]
dreadlocks -0.2 0.9798 [0, 0, 0, 0, 0, -1, -2, 2, 0, -1]
dreadnought -0.6 1.35647 [-2, 0, 0, 0, -3, 0, -1, -2, 0, 2]
dreadnoughts -0.4 0.66332 [0, -1, -1, 0, 0, 0, 0, 0, -2, 0]
dreads -1.4 1.42829 [0, -1, 0, 0, -3, -3, 0, -4, -2, -1]
dream 1.0 1.18322 [0, 1, 2, 0, 0, 3, 0, 3, 1, 0]
dreams 1.7 1.1 [2, 2, 3, 0, 1, 1, 1, 4, 1, 2]
dreary -1.4 0.4899 [-1, -1, -2, -1, -1, -2, -2, -1, -2, -1]
droopy -0.8 0.74833 [-1, -1, 0, -1, -2, 0, 0, -1, 0, -2]
drop -1.1 0.53852 [0, -1, -1, -1, -2, -1, -2, -1, -1, -1]
drown -2.7 1.00499 [-4, -2, -2, -4, -4, -2, -3, -1, -3, -2]
drowned -2.9 0.7 [-2, -3, -3, -3, -2, -4, -4, -2, -3, -3]
drowns -2.2 1.6 [-3, -3, -3, -4, -2, -3, -1, -2, 2, -3]
drunk -1.4 0.91652 [-3, -1, 0, -2, 0, -1, -1, -2, -2, -2]
dubious -1.5 0.5 [-1, -2, -2, -1, -1, -2, -1, -1, -2, -2]
dud -1.0 0.89443 [-1, -1, -1, 0, -3, 0, -1, 0, -1, -2]
dull -1.7 0.45826 [-2, -2, -2, -1, -2, -2, -2, -1, -1, -2]
dullard -1.6 0.66332 [-2, -1, -1, -2, -2, -1, -1, -2, -1, -3]
dullards -1.8 0.87178 [-1, -3, -1, -1, -3, -1, -1, -2, -3, -2]
dulled -1.5 0.5 [-2, -1, -2, -1, -1, -1, -2, -2, -1, -2]
duller -1.7 0.64031 [-3, -1, -2, -2, -2, -1, -2, -1, -1, -2]
dullest -1.7 1.00499 [-1, -4, -1, -1, -2, -3, -2, -1, -1, -1]
dulling -1.1 0.7 [-1, -2, 0, -1, -2, -2, 0, -1, -1, -1]
dullish -1.1 0.53852 [-2, -1, -1, -1, -1, -1, -1, -1, 0, -2]
dullness -1.4 0.8 [-1, -1, -1, -1, -1, -1, -3, -1, -3, -1]
dullnesses -1.9 1.04403 [-3, -2, -1, -1, -3, -1, -4, -1, -2, -1]
dulls -1.0 0.44721 [-1, -1, -1, -1, -1, -1, 0, -1, -1, -2]
dullsville -2.4 0.8 [-2, -2, -4, -3, -2, -2, -3, -2, -3, -1]
dully -1.1 0.3 [-1, -1, -1, -1, -1, -1, -1, -1, -1, -2]
dumb -2.3 0.9 [-4, -2, -2, -2, -2, -2, -4, -2, -2, -1]
dumbass -2.6 1.0198 [-3, -3, -4, -4, -1, -2, -3, -3, -1, -2]
dumbbell -0.8 0.9798 [0, -1, -3, -2, 0, -1, 0, 0, -1, 0]
dumbbells -0.2 0.4 [0, -1, 0, 0, 0, 0, 0, -1, 0, 0]
dumbcane -0.3 0.45826 [0, 0, 0, 0, 0, -1, -1, 0, -1, 0]
dumbcanes -0.6 1.2 [0, 0, -1, -1, -1, 2, 0, -1, -3, -1]
dumbed -1.4 0.4899 [-2, -1, -2, -2, -2, -1, -1, -1, -1, -1]
dumber -1.5 0.5 [-2, -1, -2, -1, -2, -1, -1, -2, -2, -1]
dumbest -2.3 1.00499 [-3, -1, -3, -4, -2, -1, -2, -3, -1, -3]
dumbfound -0.1 1.92094 [3, -2, -1, -1, 1, 1, -3, 3, -1, -1]
dumbfounded -1.6 1.11355 [-2, 0, -2, 0, -2, -1, -4, -1, -2, -2]
dumbfounder -1.0 0.89443 [-2, 0, 0, 0, -2, -1, -2, 0, -2, -1]
dumbfounders -1.0 0.89443 [-1, -3, -1, 0, -2, 0, -1, 0, -1, -1]
dumbfounding -0.8 0.74833 [-1, -2, 0, -1, -1, 0, 0, -1, 0, -2]
dumbfounds -0.3 1.26886 [0, -1, -1, 0, 0, -1, -1, -2, 0, 3]
dumbhead -2.6 0.66332 [-3, -4, -3, -2, -2, -3, -3, -2, -2, -2]
dumbheads -1.9 0.83066 [-2, -2, -2, -1, -2, -1, -2, -4, -1, -2]
dumbing -0.5 1.0247 [-1, 2, -1, 0, -1, -2, -1, 0, 0, -1]
dumbly -1.3 1.00499 [-2, -1, -2, -3, 1, -2, -1, -1, -1, -1]
dumbness -1.9 0.53852 [-2, -2, -2, -2, -2, -3, -2, -2, -1, -1]
dumbs -1.5 0.67082 [-1, -1, -1, -3, -2, -1, -2, -1, -2, -1]
dumbstruck -1.0 1.34164 [-1, -2, 0, 0, -2, 1, -3, 1, -2, -2]
dumbwaiter 0.2 1.07703 [0, 0, 0, 0, 2, 0, 2, 0, -2, 0]
dumbwaiters -0.1 0.3 [0, 0, 0, 0, 0, 0, -1, 0, 0, 0]
dump -1.6 0.91652 [-3, -2, -1, -2, -1, -3, -2, -1, -1, 0]
dumpcart -0.6 0.8 [0, -2, -1, 0, 0, 0, -1, 0, -2, 0]
dumped -1.7 0.78102 [-2, -3, -2, -1, -1, -1, -3, -2, -1, -1]
dumper -1.2 0.87178 [-2, -3, -1, -1, -1, -2, 0, -1, 0, -1]
dumpers -0.8 0.6 [0, 0, -2, -1, -1, 0, -1, -1, -1, -1]
dumpier -1.4 0.66332 [-2, -1, -2, -1, -1, -2, 0, -2, -1, -2]
dumpiest -1.6 1.35647 [-1, -2, -2, | |
<gh_stars>0
"""
# Zero Knowledge Proofs in Python
Examples of discrete-log zero-knowledge proofs implemented in Python
More specifically, these are non-interactive, zero-knowledge,
proofs of knowledge. They can be analyzed and proven secure
in the random oracle model (the random oracle here is instantiated
with the SHA2 hash function).
Lecture notes:
https://www.cs.jhu.edu/~susan/600.641/scribes/lecture10.pdf
https://www.cs.jhu.edu/~susan/600.641/scribes/lecture11.pdf
You must fill in the portions labelled #TODO. See the README.md in this
directory for submission instructions. Points are awarded as marked.
Total possible points: 100
"""
"""
## Import Elliptic Curves
The zero-knowledge proof schemes we work through here
can work with any DLog group. This implementation makes use of
the secp256k1 elliptic curve group. We call an element of this group
(i.e., a point on the curve), simply a Point.
The order of this group, p, is a 256-bit prime number. Furthermore, p
happens to be extremely close to 2^256. Because of this, we can sample
exponents easily by choosing a random 32-byte number, and with high probability,
will be within [0,p).
uint256_from_str(rnd_bytes(32)) is an exponent.
Sometimes this will be represented by the object Fp, which automatically handles
arithmetic modulo p. The underlying 'long' value can be extracted as `p.n` if
`type(p) is Fp`.
"""
import secp256k1
from secp256k1 import Point, q, Fq, order, p, Fp, G, curve, ser, deser, uint256_from_str, uint256_to_str
import os, random
# p is the order (the # of elements in) the group, i.e., the number of points on the curve
# order = p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
print(order)
print(Fp) # Fp is the group of exponents (integers mod p)
# ser/deser: convert Point -> string and vice versa
# ser : Point -> str, deser : str -> Point
"""
"""
print(repr(G))
print(repr(p * G))
print(deser(ser(G)))
Hx = Fq(0xbc4f48d7a8651dc97ae415f0b47a52ef1a2702098202392b88bc925f6e89ee17)
Hy = Fq(0x361b27b55c10f94ec0630b4c7d28f963221a0031632092bf585825823f6e27df)
H = Point(curve, Hx,Hy)
# H = random_point(seed=sha2("H")) # An alternate generator
## Pick a random point on the curve (given a seed)
def random_point(seed=None, rnd_bytes=os.urandom):
def sha2_to_long(x):
from Crypto.Hash import SHA256
return int(SHA256.new(x).hexdigest(),16)
if seed is None: seed = rnd_bytes(32)
# assert type(seed) == str and len(seed) == 32
x = sha2_to_long(seed)
while True:
try:
p = secp256k1.solve(Fq(x))
except ValueError:
seed = sha2(('random_point:' + str(seed)))
x = sha2_to_long(seed)
continue
break
return p
"""
## Honest verifier model
In our three-round interactive proof protocols, we will always have the verifier
choose a random challenge.
In practice, it's more convenient to use the Random Oracle model and instantiate
this with a hash function.
This codebase allows the use of either option. The Prover function takes in a
"getChallenge" method, which it can invoke after generating its Commit message.
The Verifier takes in a "getTranscript" message, which it can use to check that
the commitment was chosen before the challenge.
"""
## Interactive challenger
def make_honest_verifier_challenger():
"""
Returns:
- a function "getChallenge(Commit)" that returns a 32-byte string
This function can be passed to the Prover, which it can use to request
the challenge from the verifier.
- a function "getTranscript(Commit)" that checks the right commitment
was queried and returns the same 32-byte string
This function can be passed to the Verifier, which it can use to inspect
the Commit message.
"""
transcript = []
def getChallenge(Commit):
assert transcript == []
assert type(Commit) is str
result = os.urandom(32)
transcript.extend((Commit, result))
return result
def getTranscript(Commit):
assert transcript != []
assert transcript[0] == Commit
return transcript[1]
return getChallenge, getTranscript
## Random Oracle Model
# Find sha2 hash of a string
def sha2(x):
from Crypto.Hash import SHA256
return SHA256.new(x.encode("utf-8")).digest()
"""
## Preliminary example: Proof of knowledge of discrete logarithm
In this part, we provide a scheme offers a discrete log proof of `ZKP{ (a): A = a*G }`.
Note that the statement `A` is a parameter to the scheme, as it must
be known to both the prover and verifier.
The Prover takes several additional arguments:
- `rnd_bytes`, such that `rnd_bytes(n)` returns an `n`-byte random string. By default, will use the operating system os.urandom.
(Note: as this function is non-blocking, may be a poor choice if the OS runs out of entropy)
- getChallenge, a function that requests the challenge from the verifier.
This takes in `Commit`, an arbitrary length string, and returns a randomly chosen value.
By default, we will use the sha2 hash as a heuristic Random Oracle, giving us the Non-Interactive
version of this protocol.
These can be overridden in later section as part of the security proof constructions.
"""
def dlog_prover(A, a, getChallenge=sha2, rnd_bytes=os.urandom):
assert a*G == A
# blinding factor
k = uint256_from_str(rnd_bytes(32)) % order
# commitment
K = k*G
# Invoke the random oracle to receive a challenge
c = uint256_from_str(getChallenge(ser(K)))
# response
s = Fp(k + c*a)
return (K,s)
def dlog_verifier(A, prf, getTranscript=sha2):
(K,s) = prf
assert type(A) is type(K) is Point
assert type(s) is Fp
# Recompute c w/ the information given
c = uint256_from_str(getTranscript(ser(K)))
# Check the verification condition
assert s.n *G == K + c*A
return True
def dlog_test():
a = uint256_from_str(os.urandom(32))
A = a*G
getChallenge, getTranscript = make_honest_verifier_challenger()
prf = dlog_prover(A, a, getChallenge)
assert dlog_verifier(A, prf, getTranscript)
print('Dlog correctness test complete!')
dlog_test()
"""
## Part 1: Make a Pedersen commitment to your secret key.
Provide a ZK proof that your commitment is correct.
Zk{ (x,r): X = x*G, C = x*G + r*H }
By completing this proof, you prove you still have knowledge of your key!
The verifier is provided for you. (Since we will publicly verify the proofs). You must complete the prover.
"""
def make_pedersen_commitment(x, rnd_bytes=os.urandom):
r = uint256_from_str(rnd_bytes(32))
C = x * G + r * H
return C, r
def pedersen_prover(C, X, x, r, getChallenge=sha2, rnd_bytes=os.urandom):
"""
Params:
x and r are elements of Fp
C,X are Points
Returns:
prf, of the form (KX,KC,sx,sr)
"""
assert X == x * G
assert C == x * G + r * H
# TODO: fill in your code here (10 points)
# blinding factor
t_1 = uint256_from_str(rnd_bytes(32)) % order
t_2 = uint256_from_str(rnd_bytes(32)) % order
# commitment
KX = t_1*G
KC = t_1*G + t_2*H
# Invoke the random oracle to receive a challenge
c = uint256_from_str(getChallenge(ser(KX) + ser(KC)))
# response
sx = Fp(t_1 + c*x)
sr = Fp(t_2 + c*r)
return (KX,KC,sx,sr)
def pedersen_verifier(C, X, prf, getTranscript=sha2):
(KX,KC,sx,sr) = prf
assert type(KX) == type(KC) == Point
assert type(sx) == type(sr) == Fp
# Recompute c w/ the information given
c = uint256_from_str(getTranscript(ser(KX) + ser(KC)))
assert sx.n *G == KX + c*X
assert sx.n *G + sr.n *H == KC + c*C
return True
def pedersen_test():
getChallenge, getTranscript = make_honest_verifier_challenger()
x = uint256_from_str(os.urandom(32))
X = x * G
C,r = make_pedersen_commitment(x)
prf = pedersen_prover(C, X, x, r, getChallenge)
(KX, KC, sx, sr) = prf
print(repr((ser(C), ser(KX),ser(KC),uint256_to_str(sx.n).hex(),uint256_to_str(sr.n).hex())))
assert pedersen_verifier(C, X, prf, getTranscript)
print("Pedersen correctness test complete!")
pedersen_test()
"""
## Part 1 b): Make a single Pedersen commitment to a vector of secrets
Zk{ (x1...xn,r1...rn): C1 = x1*G + r1*H, C2 = x2*G + r2*H, .. Cn = xn*G + rn*H }
The verifier is provided for you. (Since we will publicly verify the proofs). You must complete the prover.
"""
def pedersen_vector_prover(C_arr, x_arr, r_arr, getChallenge=sha2, rnd_bytes=os.urandom):
"""
Params:
x_arr, r_arr are arrays of elements in Fp
C_arr are arrays of Points
Returns:
prf, of the form (K,sx,sr) where K is points and sx and sr are points in Fp
Note that here you are able to prove that knowledge of n points with only communicating 1 ppints and 2 scalars.
"""
# Make sure all commitments are correct
for C_elem, x_elem, r_elem in zip(C_arr,x_arr,r_arr):
assert C_elem == x_elem*G + r_elem*H
# TODO: Your code goes here: 10 points
assert len(C_arr) == len(x_arr) == len(r_arr)
# blinding factor
t = uint256_from_str(rnd_bytes(32)) % order
# commitment
C0 = t*G
# Invoke the random oracle to receive a challenge
c = uint256_from_str(getChallenge(ser(C0)))
# response
sx, sr = t, 0
e = c
for _, x_elem, r_elem in zip(C_arr, x_arr, r_arr):
sx += Fp(e*x_elem)
sr += Fp(e*r_elem)
e *= c
return (C0, sx, sr)
def pedersen_vector_verifier(C_arr, prf, getTranscript=sha2, rnd_bytes=os.urandom):
(C0, sx, sr) = prf
assert type(C0) == Point
assert type(sx) == type(sr) == Fp
c = Fp(uint256_from_str(getTranscript(ser(C0))))
e = c
C_final = C0
for C_elem in C_arr:
C_final = C_final + e.n*C_elem
e | |
Optional[pulumi.Input[str]]:
"""
Description of the security group.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="fromPort")
def from_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "from_port")
@from_port.setter
def from_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "from_port", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Labels to assign to this security group.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="predefinedTarget")
def predefined_target(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "predefined_target")
@predefined_target.setter
def predefined_target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "predefined_target", value)
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "security_group_id")
@security_group_id.setter
def security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_id", value)
@property
@pulumi.getter(name="toPort")
def to_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "to_port")
@to_port.setter
def to_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "to_port", value)
@property
@pulumi.getter(name="v4CidrBlocks")
def v4_cidr_blocks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "v4_cidr_blocks")
@v4_cidr_blocks.setter
def v4_cidr_blocks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "v4_cidr_blocks", value)
@property
@pulumi.getter(name="v6CidrBlocks")
def v6_cidr_blocks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "v6_cidr_blocks")
@v6_cidr_blocks.setter
def v6_cidr_blocks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "v6_cidr_blocks", value)
@pulumi.input_type
class VpcSubnetDhcpOptionsArgs:
def __init__(__self__, *,
domain_name: Optional[pulumi.Input[str]] = None,
domain_name_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ntp_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] domain_name: Domain name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domain_name_servers: Domain name server IP addresses.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ntp_servers: NTP server IP addresses.
"""
if domain_name is not None:
pulumi.set(__self__, "domain_name", domain_name)
if domain_name_servers is not None:
pulumi.set(__self__, "domain_name_servers", domain_name_servers)
if ntp_servers is not None:
pulumi.set(__self__, "ntp_servers", ntp_servers)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> Optional[pulumi.Input[str]]:
"""
Domain name.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="domainNameServers")
def domain_name_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Domain name server IP addresses.
"""
return pulumi.get(self, "domain_name_servers")
@domain_name_servers.setter
def domain_name_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "domain_name_servers", value)
@property
@pulumi.getter(name="ntpServers")
def ntp_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
NTP server IP addresses.
"""
return pulumi.get(self, "ntp_servers")
@ntp_servers.setter
def ntp_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ntp_servers", value)
@pulumi.input_type
class YdbDatabaseDedicatedLocationArgs:
def __init__(__self__, *,
region: Optional[pulumi.Input['YdbDatabaseDedicatedLocationRegionArgs']] = None):
"""
:param pulumi.Input['YdbDatabaseDedicatedLocationRegionArgs'] region: Region for the Yandex Database cluster.
The structure is documented below.
"""
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input['YdbDatabaseDedicatedLocationRegionArgs']]:
"""
Region for the Yandex Database cluster.
The structure is documented below.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input['YdbDatabaseDedicatedLocationRegionArgs']]):
pulumi.set(self, "region", value)
@pulumi.input_type
class YdbDatabaseDedicatedLocationRegionArgs:
def __init__(__self__, *,
id: pulumi.Input[str]):
"""
:param pulumi.Input[str] id: Region ID for the Yandex Database cluster.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
Region ID for the Yandex Database cluster.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@pulumi.input_type
class YdbDatabaseDedicatedScalePolicyArgs:
def __init__(__self__, *,
fixed_scale: pulumi.Input['YdbDatabaseDedicatedScalePolicyFixedScaleArgs']):
"""
:param pulumi.Input['YdbDatabaseDedicatedScalePolicyFixedScaleArgs'] fixed_scale: Fixed scaling policy for the Yandex Database cluster.
The structure is documented below.
"""
pulumi.set(__self__, "fixed_scale", fixed_scale)
@property
@pulumi.getter(name="fixedScale")
def fixed_scale(self) -> pulumi.Input['YdbDatabaseDedicatedScalePolicyFixedScaleArgs']:
"""
Fixed scaling policy for the Yandex Database cluster.
The structure is documented below.
"""
return pulumi.get(self, "fixed_scale")
@fixed_scale.setter
def fixed_scale(self, value: pulumi.Input['YdbDatabaseDedicatedScalePolicyFixedScaleArgs']):
pulumi.set(self, "fixed_scale", value)
@pulumi.input_type
class YdbDatabaseDedicatedScalePolicyFixedScaleArgs:
def __init__(__self__, *,
size: pulumi.Input[int]):
"""
:param pulumi.Input[int] size: Number of instances for the Yandex Database cluster.
"""
pulumi.set(__self__, "size", size)
@property
@pulumi.getter
def size(self) -> pulumi.Input[int]:
"""
Number of instances for the Yandex Database cluster.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: pulumi.Input[int]):
pulumi.set(self, "size", value)
@pulumi.input_type
class YdbDatabaseDedicatedStorageConfigArgs:
def __init__(__self__, *,
group_count: pulumi.Input[int],
storage_type_id: pulumi.Input[str]):
"""
:param pulumi.Input[int] group_count: Amount of storage groups of selected type for the Yandex Database cluster.
:param pulumi.Input[str] storage_type_id: Storage type ID for the Yandex Database cluster.
Available presets can be obtained via `yc ydb storage-type list` command.
"""
pulumi.set(__self__, "group_count", group_count)
pulumi.set(__self__, "storage_type_id", storage_type_id)
@property
@pulumi.getter(name="groupCount")
def group_count(self) -> pulumi.Input[int]:
"""
Amount of storage groups of selected type for the Yandex Database cluster.
"""
return pulumi.get(self, "group_count")
@group_count.setter
def group_count(self, value: pulumi.Input[int]):
pulumi.set(self, "group_count", value)
@property
@pulumi.getter(name="storageTypeId")
def storage_type_id(self) -> pulumi.Input[str]:
"""
Storage type ID for the Yandex Database cluster.
Available presets can be obtained via `yc ydb storage-type list` command.
"""
return pulumi.get(self, "storage_type_id")
@storage_type_id.setter
def storage_type_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_type_id", value)
@pulumi.input_type
class GetAlbBackendGroupGrpcBackendArgs:
def __init__(__self__, *,
healthcheck: 'GetAlbBackendGroupGrpcBackendHealthcheckArgs',
load_balancing_config: 'GetAlbBackendGroupGrpcBackendLoadBalancingConfigArgs',
name: str,
port: int,
target_group_ids: Sequence[str],
tls: 'GetAlbBackendGroupGrpcBackendTlsArgs',
weight: int):
"""
:param 'GetAlbBackendGroupGrpcBackendHealthcheckArgs' healthcheck: Healthcheck specification that will be used by this backend. Structure is documented below.
:param 'GetAlbBackendGroupGrpcBackendLoadBalancingConfigArgs' load_balancing_config: Load Balancing Config specification that will be used by this backend. Structure is documented below.
:param str name: - Name of the Backend Group.
:param int port: Port for incoming traffic.
:param Sequence[str] target_group_ids: References target groups for the backend.
:param 'GetAlbBackendGroupGrpcBackendTlsArgs' tls: Tls specification that will be used by this backend. Structure is documented below.
:param int weight: Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights.
"""
pulumi.set(__self__, "healthcheck", healthcheck)
pulumi.set(__self__, "load_balancing_config", load_balancing_config)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "target_group_ids", target_group_ids)
pulumi.set(__self__, "tls", tls)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def healthcheck(self) -> 'GetAlbBackendGroupGrpcBackendHealthcheckArgs':
"""
Healthcheck specification that will be used by this backend. Structure is documented below.
"""
return pulumi.get(self, "healthcheck")
@healthcheck.setter
def healthcheck(self, value: 'GetAlbBackendGroupGrpcBackendHealthcheckArgs'):
pulumi.set(self, "healthcheck", value)
@property
@pulumi.getter(name="loadBalancingConfig")
def load_balancing_config(self) -> 'GetAlbBackendGroupGrpcBackendLoadBalancingConfigArgs':
"""
Load Balancing Config specification that will be used by this backend. Structure is documented below.
"""
return pulumi.get(self, "load_balancing_config")
@load_balancing_config.setter
def load_balancing_config(self, value: 'GetAlbBackendGroupGrpcBackendLoadBalancingConfigArgs'):
pulumi.set(self, "load_balancing_config", value)
@property
@pulumi.getter
def name(self) -> str:
"""
- Name of the Backend Group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: str):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def port(self) -> int:
"""
Port for incoming traffic.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: int):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="targetGroupIds")
def target_group_ids(self) -> Sequence[str]:
"""
References target groups for the backend.
"""
return pulumi.get(self, "target_group_ids")
@target_group_ids.setter
def target_group_ids(self, value: Sequence[str]):
pulumi.set(self, "target_group_ids", value)
@property
@pulumi.getter
def tls(self) -> 'GetAlbBackendGroupGrpcBackendTlsArgs':
"""
Tls specification that will be used by this backend. Structure is documented below.
"""
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: 'GetAlbBackendGroupGrpcBackendTlsArgs'):
pulumi.set(self, "tls", value)
@property
@pulumi.getter
def weight(self) -> int:
"""
Weight of the backend. Traffic will be split between backends of the same BackendGroup according to their weights.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: int):
pulumi.set(self, "weight", value)
@pulumi.input_type
class GetAlbBackendGroupGrpcBackendHealthcheckArgs:
def __init__(__self__, *,
grpc_healthcheck: 'GetAlbBackendGroupGrpcBackendHealthcheckGrpcHealthcheckArgs',
healthcheck_port: int,
healthy_threshold: int,
http_healthcheck: 'GetAlbBackendGroupGrpcBackendHealthcheckHttpHealthcheckArgs',
interval: str,
interval_jitter_percent: float,
stream_healthcheck: 'GetAlbBackendGroupGrpcBackendHealthcheckStreamHealthcheckArgs',
timeout: str,
unhealthy_threshold: int):
"""
:param 'GetAlbBackendGroupGrpcBackendHealthcheckGrpcHealthcheckArgs' grpc_healthcheck: Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below.
:param int healthcheck_port: Optional alternative port for health checking.
:param int healthy_threshold: Number of consecutive successful health checks required to promote endpoint into the healthy state. 0 means 1. Note that during startup, only a single successful health check is required to mark a host healthy.
:param 'GetAlbBackendGroupGrpcBackendHealthcheckHttpHealthcheckArgs' http_healthcheck: Http Healthcheck specification that will be used by this healthcheck. Structure is documented below.
:param str interval: Interval between health checks.
:param float interval_jitter_percent: An optional jitter amount as a percentage of interval. If specified, during every interval value of (interval_ms * interval_jitter_percent / 100) will be added to the wait time.
:param 'GetAlbBackendGroupGrpcBackendHealthcheckStreamHealthcheckArgs' stream_healthcheck: Stream Healthcheck specification that will be used by this healthcheck. Structure is documented below.
:param str timeout: Time to wait for a health check response.
:param int unhealthy_threshold: Number of consecutive failed health checks required to demote endpoint into the unhealthy state. 0 means 1. Note that for HTTP health checks, a single 503 immediately makes endpoint unhealthy.
"""
pulumi.set(__self__, "grpc_healthcheck", grpc_healthcheck)
pulumi.set(__self__, "healthcheck_port", healthcheck_port)
pulumi.set(__self__, "healthy_threshold", healthy_threshold)
pulumi.set(__self__, "http_healthcheck", http_healthcheck)
pulumi.set(__self__, "interval", interval)
pulumi.set(__self__, "interval_jitter_percent", interval_jitter_percent)
pulumi.set(__self__, "stream_healthcheck", stream_healthcheck)
pulumi.set(__self__, "timeout", timeout)
pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold)
@property
@pulumi.getter(name="grpcHealthcheck")
def grpc_healthcheck(self) -> 'GetAlbBackendGroupGrpcBackendHealthcheckGrpcHealthcheckArgs':
"""
Grpc Healthcheck specification that will be used by this healthcheck. Structure is documented below.
"""
return pulumi.get(self, "grpc_healthcheck")
@grpc_healthcheck.setter
def grpc_healthcheck(self, value: 'GetAlbBackendGroupGrpcBackendHealthcheckGrpcHealthcheckArgs'):
pulumi.set(self, "grpc_healthcheck", value)
@property
@pulumi.getter(name="healthcheckPort")
def healthcheck_port(self) -> int:
"""
Optional alternative port for health checking.
"""
| |
import numpy as np
import random
import itertools
import sys
from lattice_mc import atom, jump, transitions, cluster
from lattice_mc.error import BlockedLatticeError
from collections import Counter
class Lattice:
"""
Lattice class
"""
def __init__( self, sites, cell_lengths ):
"""
Initialise a Lattice instance.
Args:
sites (List(Site)): List of sites contained in the lattice.
cell_lengths (np.array(x,y,z)): Vector of cell lengths for the simulation cell.
Returns:
None
"""
self.cell_lengths = cell_lengths
self.sites = sites
self.number_of_sites = len( self.sites )
self.site_labels = set( [ site.label for site in self.sites ] )
self.site_populations = Counter( [ site.label for site in self.sites ] )
self.enforce_periodic_boundary_conditions()
self.initialise_site_lookup_table()
self.nn_energy = False
self.cn_energies = False
self.site_energies = False
self.jump_lookup_table = False
for site in self.sites:
site.p_neighbours = [ self.site_with_id( i ) for i in site.neighbours ]
self.reset()
def enforce_periodic_boundary_conditions( self ):
"""
Ensure that all lattice sites are within the central periodic image of the simulation cell.
Sites that are outside the central simulation cell are mapped back into this cell.
Args:
None
Returns:
None
"""
for s in self.sites:
for i in range(3):
if s.r[i] < 0.0:
s.r[i] += self.cell_lengths[i]
if s.r[i] > self.cell_lengths[i]:
s.r[i] -= self.cell_lengths[i]
def reset( self ):
"""
Reset all time-dependent counters for this lattice and its constituent sites
Args:
None
Returns:
None
"""
self.time = 0.0
for site in self.sites:
site.time_occupied = 0.0
def initialise_site_lookup_table( self ):
"""
Create a lookup table allowing sites in this lattice to be queried using `self.site_lookup[n]` where `n` is the identifying site numbe.
Args:
None
Returns:
None
"""
self.site_lookup = {}
for site in self.sites:
self.site_lookup[ site.number ] = site
def site_with_id( self, number ):
"""
Select the site with a specific id number.
Args:
number (Int): The identifying number for a specific site.
Returns:
(Site): The site with id number equal to `number`
"""
return self.site_lookup[ number ]
def vacant_sites( self ):
"""
The set of sites not occupied by atoms.
Args:
None
Returns:
List(Site): List of sites that are vacant.
"""
return ( site for site in self.sites if not site.is_occupied )
def occupied_sites( self ):
"""
The set of sites occupied by atoms.
Args:
None
Returns:
List(Site): List of sites that are occupied.
"""
return ( site for site in self.sites if site.is_occupied )
def vacant_site_numbers( self ):
"""
List of site id numbers for all sites that are vacant.
Args:
None
Returns:
List(Int): List of site id numbers for vacant sites.
"""
return [ site.number for site in self.sites if not site.is_occupied ]
def occupied_site_numbers( self ):
"""
List of site id numbers for all sites that are occupied.
Args:
None
Returns:
List(Int): List of site id numbers for occupied sites.
"""
return [ site.number for site in self.sites if site.is_occupied ]
def potential_jumps( self ):
"""
All nearest-neighbour jumps not blocked by volume exclusion
(i.e. from occupied to neighbouring unoccupied sites).
Args:
None
Returns:
(List(Jump)): List of possible jumps.
"""
jumps = []
if self.number_of_occupied_sites <= self.number_of_sites / 2:
for occupied_site in self.occupied_sites():
unoccupied_neighbours = [ site for site in [ self.site_with_id( n ) for n in occupied_site.neighbours ] if not site.is_occupied ]
for vacant_site in unoccupied_neighbours:
jumps.append( jump.Jump( occupied_site, vacant_site, self.nn_energy, self.cn_energies, self.jump_lookup_table ) )
else:
for vacant_site in self.vacant_sites():
occupied_neighbours = [ site for site in [ self.site_with_id( n ) for n in vacant_site.neighbours ] if site.is_occupied ]
for occupied_site in occupied_neighbours:
jumps.append( jump.Jump( occupied_site, vacant_site, self.nn_energy, self.cn_energies, self.jump_lookup_table ) )
return jumps
def update( self, jump ):
"""
Update the lattice state by accepting a specific jump
Args:
jump (Jump): The jump that has been accepted.
Returns:
None.
"""
atom = jump.initial_site.atom
dr = jump.dr( self.cell_lengths )
#print( "atom {} jumped from site {} to site {}".format( atom.number, jump.initial_site.number, jump.final_site.number ) )
jump.final_site.occupation = atom.number
jump.final_site.atom = atom
jump.final_site.is_occupied = True
jump.initial_site.occupation = 0
jump.initial_site.atom = None
jump.initial_site.is_occupied = False
# TODO: updating atom counters could be contained in an atom.move_to( site ) method
atom.site = jump.final_site
atom.number_of_hops += 1
atom.dr += dr
atom.summed_dr2 += np.dot( dr, dr )
def populate_sites( self, number_of_atoms, selected_sites=None ):
"""
Populate the lattice sites with a specific number of atoms.
Args:
number_of_atoms (Int): The number of atoms to populate the lattice sites with.
selected_sites (:obj:List, optional): List of site labels if only some sites are to be occupied. Defaults to None.
Returns:
None
"""
if number_of_atoms > self.number_of_sites:
raise ValueError
if selected_sites:
atoms = [ atom.Atom( initial_site = site ) for site in random.sample( [ s for s in self.sites if s.label in selected_sites ], number_of_atoms ) ]
else:
atoms = [ atom.Atom( initial_site = site ) for site in random.sample( self.sites, number_of_atoms ) ]
self.number_of_occupied_sites = number_of_atoms
return atoms
def jump( self ):
"""
Select a jump at random from all potential jumps, then update the lattice state.
Args:
None
Returns:
None
"""
potential_jumps = self.potential_jumps()
if not potential_jumps:
raise BlockedLatticeError('No moves are possible in this lattice')
all_transitions = transitions.Transitions( self.potential_jumps() )
random_jump = all_transitions.random()
delta_t = all_transitions.time_to_jump()
self.time += delta_t
self.update_site_occupation_times( delta_t )
self.update( random_jump )
return( all_transitions.time_to_jump() )
def update_site_occupation_times( self, delta_t ):
"""
Increase the time occupied for all occupied sites by delta t
Args:
delta_t (Float): Timestep.
Returns:
None
"""
for site in self.occupied_sites():
site.time_occupied += delta_t
def site_occupation_statistics( self ):
"""
Average site occupation for each site type
Args:
None
Returns:
(Dict(Str:Float)): Dictionary of occupation statistics, e.g.::
{ 'A' : 2.5, 'B' : 25.3 }
"""
if self.time == 0.0:
return None
occupation_stats = { label : 0.0 for label in self.site_labels }
for site in self.sites:
occupation_stats[ site.label ] += site.time_occupied
for label in self.site_labels:
occupation_stats[ label ] /= self.time
return occupation_stats
def set_site_energies( self, energies ):
"""
Set the energies for every site in the lattice according to the site labels.
Args:
energies (Dict(Str:Float): Dictionary of energies for each site label, e.g.::
{ 'A' : 1.0, 'B', 0.0 }
Returns:
None
"""
self.site_energies = energies
for site_label in energies:
for site in self.sites:
if site.label == site_label:
site.energy = energies[ site_label ]
def set_nn_energy( self, delta_E ):
"""
Set the lattice nearest-neighbour energy.
Args:
delta_E (Float): The nearest-neighbour energy E_nn.
Returns:
None
"""
self.nn_energy = delta_E
def set_cn_energies( self, cn_energies ):
"""
Set the coordination number dependent energies for this lattice.
Args:
cn_energies (Dict(Str:Dict(Int:Float))): Dictionary of dictionaries specifying the coordination number dependent energies for each site type. e.g.::
{ 'A' : { 0 : 0.0, 1 : 1.0, 2 : 2.0 }, 'B' : { 0 : 0.0, 1 : 2.0 } }
Returns:
None
"""
for site in self.sites:
site.set_cn_occupation_energies( cn_energies[ site.label ] )
self.cn_energies = cn_energies
def site_coordination_numbers( self ):
"""
Returns a dictionary of the coordination numbers for each site label. e.g.::
{ 'A' : { 4 }, 'B' : { 2, 4 } }
Args:
none
Returns:
coordination_numbers (Dict(Str:Set(Int))): dictionary of coordination
numbers for each site label.
"""
coordination_numbers = {}
for l in self.site_labels:
coordination_numbers[ l ] = set( [ len( site.neighbours ) for site in self.sites if site.label is l ] )
return coordination_numbers
def max_site_coordination_numbers( self ):
"""
Returns a dictionary of the maximum coordination number for each site label.
e.g.::
{ 'A' : 4, 'B' : 4 }
Args:
none
Returns:
max_coordination_numbers (Dict(Str:Int)): dictionary of maxmimum coordination
number for each site label.
"""
return { l : max( c ) for l, c in self.site_coordination_numbers().items() }
def site_specific_coordination_numbers( self ):
"""
Returns a dictionary of coordination numbers for each site type.
Args:
None
Returns:
(Dict(Str:List(Int))) : Dictionary of coordination numbers for each site type, e.g.::
{ 'A' : [ 2, 4 ], 'B' : [ 2 ] }
"""
specific_coordination_numbers = {}
for site in self.sites:
specific_coordination_numbers[ site.label ] = site.site_specific_neighbours()
return specific_coordination_numbers
def connected_site_pairs( self ):
"""
Returns | |
__author__ = 'calvin'
import configparser
import datetime
import glob
import json
import logging
import os
import re
import shutil
import smtplib
import sys
import time
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from threading import Thread
import jinja2
from api import upload_report, upload_many_reports, HQ_DEFAULT_TIMEOUT, SMTP_DEFAULT_TIMEOUT
from process import CrashReportingProcess
from tools import analyze_traceback, repr as safe_repr
class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': <PASSWORD>, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].items():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.items()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = configparser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline | |
from __future__ import unicode_literals
import datetime
import os
import unittest
from django import get_version
from django.db import models
from django.template import Template, Context
from django.test import SimpleTestCase
from django.test.utils import override_settings
from six.moves import range
try:
from django.utils.encoding import python_2_unicode_compatible
except ImportError:
# Django > 2
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
return klass
from .models import (
SingleBrick,
ListBrick,
BaseWall,
Criterion,
SORTING_DESC,
SORTING_ASC,
BaseWallFactory,
wall_factory,
)
from djangobricks.exceptions import TemplateNameNotFound
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a noop skipIf for python 2.6
def _skipIf(test, message=''):
def wrapper(method):
if test:
return lambda *args, **kwargs: None
else:
return method
return wrapper
if hasattr(unittest, 'skipIf'):
skipIf = unittest.skipIf
else:
skipIf = _skipIf
def default():
return 1
def callback_filter_a(brick):
return brick.item._meta.model_name == 'testmodela'
def callback_filter_b(brick):
return brick.item._meta.model_name == 'testmodelb'
def callback_filter_always_true(brick):
return True
class TestSingleBrick(SingleBrick):
template_name = 'single_brick.html'
class TestListBrick(ListBrick):
template_name = 'list_brick.html'
class TestNoTemplateSingleBrick(SingleBrick): pass
class NotABrick(object): pass
class TestBrickWall(BaseWall): pass
@python_2_unicode_compatible
class TestModelA(models.Model):
name = models.CharField(max_length=8)
popularity = models.PositiveIntegerField()
pub_date = models.DateTimeField()
is_sticky = models.BooleanField(default=False)
def __str__(self):
return self.name
def callable_popularity(self):
return self.popularity
@python_2_unicode_compatible
class TestModelB(models.Model):
name = models.CharField(max_length=8)
date_add = models.DateTimeField()
popularity = models.PositiveIntegerField()
is_sticky = models.BooleanField(default=False)
def __str__(self):
return self.name
def pub_date(self):
return self.date_add
@python_2_unicode_compatible
class TestModelC(models.Model):
name = models.CharField(max_length=8)
pub_date = models.DateTimeField()
popularity = models.PositiveIntegerField()
is_sticky = models.BooleanField(default=False)
def __str__(self):
return self.name
class TestWallFactory(BaseWallFactory):
def get_content(self):
return (
(TestSingleBrick, TestModelA.objects.all()),
(TestSingleBrick, TestModelB.objects.all()),
)
class TestWallFactoryNoCriteria(BaseWallFactory):
def get_content(self):
return (
(TestSingleBrick, TestModelA.objects.all()),
(TestSingleBrick, TestModelB.objects.all()),
)
class TestWrongContentWallFactory(BaseWallFactory):
def get_content(self):
return (
(NotABrick, TestModelA.objects.all()),
)
@override_settings(TEMPLATE_DIRS=['%s/../tests/templates' % CURRENT_DIR])
class BrickTest(SimpleTestCase):
allow_database_queries = True
def setUp(self):
self.bricks = []
def tearDown(self):
TestModelA.objects.all().delete()
TestModelB.objects.all().delete()
TestModelC.objects.all().delete()
self.bricks = []
def _create_model_a_objects_and_bricks(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
objectA2 = TestModelA.objects.create(name='objectA2', popularity=4,
pub_date=datetime.datetime(2011, 1, 1, 12, 0), is_sticky=False)
objectA3 = TestModelA.objects.create(name='objectA3', popularity=3,
pub_date=datetime.datetime(2012, 1, 1, 12, 0), is_sticky=True)
objectA4 = TestModelA.objects.create(name='objectA4', popularity=2,
pub_date=datetime.datetime(2013, 1, 1, 12, 0), is_sticky=False)
self.brickA1 = SingleBrick(objectA1)
self.brickA2 = SingleBrick(objectA2)
self.brickA3 = SingleBrick(objectA3)
self.brickA4 = SingleBrick(objectA4)
for i in range(1, 5):
self.bricks.append(getattr(self, 'brickA%s' % i))
def _create_model_b_objects_and_bricks(self):
objectB1 = TestModelB.objects.create(name='objectB1', popularity=10,
date_add=datetime.datetime(2006, 1, 1, 12, 0), is_sticky=False)
objectB2 = TestModelB.objects.create(name='objectB2', popularity=9,
date_add=datetime.datetime(2007, 1, 1, 12, 0), is_sticky=False)
objectB3 = TestModelB.objects.create(name='objectB3', popularity=8,
date_add=datetime.datetime(2008, 1, 1, 12, 0), is_sticky=True)
objectB4 = TestModelB.objects.create(name='objectB4', popularity=7,
date_add=datetime.datetime(2009, 1, 1, 12, 0), is_sticky=False)
self.brickB1 = SingleBrick(objectB1)
self.brickB2 = SingleBrick(objectB2)
self.brickB3 = SingleBrick(objectB3)
self.brickB4 = SingleBrick(objectB4)
for i in range(1, 5):
self.bricks.append(getattr(self, 'brickB%s' % i))
def _create_model_c_objects_and_bricks(self):
objectC1 = TestModelC.objects.create(name='objectC1', popularity=20,
pub_date=datetime.datetime(2002, 1, 1, 12, 0), is_sticky=False)
objectC2 = TestModelC.objects.create(name='objectC2', popularity=19,
pub_date=datetime.datetime(2003, 1, 1, 12, 0), is_sticky=False)
objectC3 = TestModelC.objects.create(name='objectC3', popularity=18,
pub_date=datetime.datetime(2004, 1, 1, 12, 0), is_sticky=True)
objectC4 = TestModelC.objects.create(name='objectC4', popularity=17,
pub_date=datetime.datetime(2005, 1, 1, 12, 0), is_sticky=False)
self.brickC1 = ListBrick([objectC1, objectC2])
self.brickC2 = ListBrick([objectC3, objectC4])
for i in range(1, 3):
self.bricks.append(getattr(self, 'brickC%s' % i))
# Slicing, iteration, length
def test_slicing(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks)
self.assertEqual(wall[0], self.brickA1)
self.assertEqual(wall[:1], [self.brickA1])
self.assertEqual(wall[1:3], [self.brickA2, self.brickA3])
def test_iteration(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks)
for i, brick in enumerate(wall):
i += 1
self.assertEqual(brick, getattr(self, 'brickA%s' % i))
def test_length(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks)
self.assertEqual(len(wall), 4)
# Instantiation
def test_single_brick_init(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
objectA2 = TestModelA.objects.create(name='objectA2', popularity=4,
pub_date=datetime.datetime(2011, 1, 1, 12, 0), is_sticky=False)
objectA3 = TestModelA.objects.create(name='objectA3', popularity=3,
pub_date=datetime.datetime(2012, 1, 1, 12, 0), is_sticky=True)
objectA4 = TestModelA.objects.create(name='objectA4', popularity=2,
pub_date=datetime.datetime(2013, 1, 1, 12, 0), is_sticky=False)
bricks = SingleBrick.get_bricks_for_queryset(TestModelA.objects.all())
wall = TestBrickWall(bricks)
self.assertEqual(wall[0].item, objectA1)
self.assertEqual(wall[1].item, objectA2)
self.assertEqual(wall[2].item, objectA3)
self.assertEqual(wall[3].item, objectA4)
def test_list_brick_init(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
objectA2 = TestModelA.objects.create(name='objectA2', popularity=4,
pub_date=datetime.datetime(2011, 1, 1, 12, 0), is_sticky=False)
objectA3 = TestModelA.objects.create(name='objectA3', popularity=3,
pub_date=datetime.datetime(2012, 1, 1, 12, 0), is_sticky=True)
objectA4 = TestModelA.objects.create(name='objectA4', popularity=2,
pub_date=datetime.datetime(2013, 1, 1, 12, 0), is_sticky=False)
bricks = ListBrick.get_bricks_for_queryset(TestModelA.objects.all())
wall = TestBrickWall(bricks)
self.assertEqual(wall[0].items, [objectA1, objectA2, objectA3, objectA4])
# Missing criterion attribute
def test_missing_criterion_attribute(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
criterion = Criterion('i_dont_exist')
self.assertIsNone(criterion.get_value_for_item(objectA1))
# Callable criterion
def test_callable_criterion(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
criterion = Criterion('callable_popularity')
self.assertEqual(criterion.get_value_for_item(objectA1), 5)
def test_callable_criterion_in_wall(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('callable_popularity'), SORTING_DESC),
))
expected = [self.brickA1, self.brickA2, self.brickA3, self.brickA4]
self.assertEqual(list(wall), expected)
# Callable default criterion
def test_callable_default_criterion(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
criterion = Criterion('i_dont_exist', default=default)
self.assertEqual(criterion.get_value_for_item(objectA1), 1)
def test_callable_default_criterion_in_wall(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('i_dont_exist', default=default), SORTING_DESC),
))
expected = [self.brickA1, self.brickA2, self.brickA3, self.brickA4]
self.assertEqual(list(wall), expected)
# Callback criterion
def test_callback_criterion(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
objectA2 = TestModelA.objects.create(name='objectA2', popularity=4,
pub_date=datetime.datetime(2011, 1, 1, 12, 0), is_sticky=False)
objectA3 = TestModelA.objects.create(name='objectA3', popularity=3,
pub_date=datetime.datetime(2012, 1, 1, 12, 0), is_sticky=True)
objectA4 = TestModelA.objects.create(name='objectA4', popularity=2,
pub_date=datetime.datetime(2013, 1, 1, 12, 0), is_sticky=False)
item_list = [objectA1, objectA2, objectA3, objectA4]
criterion = Criterion('popularity', max)
self.assertEqual(criterion.get_value_for_list(item_list), 5)
def test_custom_callback_criterion(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
objectA2 = TestModelA.objects.create(name='objectA2', popularity=4,
pub_date=datetime.datetime(2011, 1, 1, 12, 0), is_sticky=False)
objectA3 = TestModelA.objects.create(name='objectA3', popularity=3,
pub_date=datetime.datetime(2012, 1, 1, 12, 0), is_sticky=True)
objectA4 = TestModelA.objects.create(name='objectA4', popularity=4,
pub_date=datetime.datetime(2013, 1, 1, 12, 0), is_sticky=False)
item_list = [objectA1, objectA2, objectA3, objectA4]
criterion = Criterion('popularity', lambda x:sum(x)/len(x))
self.assertEqual(criterion.get_value_for_list(item_list), 4)
def test_callback_criterion_in_wall(self):
self._create_model_c_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity', callback=min), SORTING_ASC),
))
expected = [self.brickC2, self.brickC1]
self.assertEqual(list(wall), expected)
# Callback default criterion
def test_callback_default_criterion(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
objectA2 = TestModelA.objects.create(name='objectA2', popularity=4,
pub_date=datetime.datetime(2011, 1, 1, 12, 0), is_sticky=False)
objectA3 = TestModelA.objects.create(name='objectA3', popularity=3,
pub_date=datetime.datetime(2012, 1, 1, 12, 0), is_sticky=True)
objectA4 = TestModelA.objects.create(name='objectA4', popularity=2,
pub_date=datetime.datetime(2013, 1, 1, 12, 0), is_sticky=False)
item_list = [objectA1, objectA2, objectA3, objectA4]
criterion = Criterion('i_dont_exist', max, default=10)
self.assertEqual(criterion.get_value_for_list(item_list), 10)
def test_callback_default_empty_list_criterion(self):
criterion = Criterion('_', max, default=10)
self.assertEqual(criterion.get_value_for_list([]), 10)
# Callback callable default criterion
def test_callback_callable_default_criterion(self):
objectA1 = TestModelA.objects.create(name='objectA1', popularity=5,
pub_date=datetime.datetime(2010, 1, 1, 12, 0), is_sticky=False)
objectA2 = TestModelA.objects.create(name='objectA2', popularity=4,
pub_date=datetime.datetime(2011, 1, 1, 12, 0), is_sticky=False)
objectA3 = TestModelA.objects.create(name='objectA3', popularity=3,
pub_date=datetime.datetime(2012, 1, 1, 12, 0), is_sticky=True)
objectA4 = TestModelA.objects.create(name='objectA4', popularity=2,
pub_date=datetime.datetime(2013, 1, 1, 12, 0), is_sticky=False)
item_list = [objectA1, objectA2, objectA3, objectA4]
criterion = Criterion('i_dont_exist', max, default=default)
self.assertEqual(criterion.get_value_for_list(item_list), 1)
def test_callback_callable_default_empty_list_criterion(self):
criterion = Criterion('_', max, default=default)
self.assertEqual(criterion.get_value_for_list([]), 1)
# Wrong call
def test_callback_value_error_criterion(self):
criterion = Criterion('_')
with self.assertRaises(ValueError):
criterion.get_value_for_list('im_wrong')
# Single keys - Single bricks- Single models
def test_single_key_desc_sorting_single_bricks_single_models(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity'), SORTING_DESC),
))
expected = [self.brickA1, self.brickA2, self.brickA3, self.brickA4]
self.assertEqual(list(wall), expected)
def test_single_key_asc_sorting_single_bricks_single_models(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity'), SORTING_ASC),
))
expected = [self.brickA4, self.brickA3, self.brickA2, self.brickA1]
self.assertEqual(list(wall), expected)
# Multi keys - Single bricks - Single Models
def test_multi_key_1_sorting_single_bricks_single_models(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('is_sticky'), SORTING_DESC),
(Criterion('popularity'), SORTING_DESC),
))
expected = [self.brickA3, self.brickA1, self.brickA2, self.brickA4]
self.assertEqual(list(wall), expected)
def test_multi_key_2_sorting_single_bricks_single_models(self):
self._create_model_a_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('is_sticky'), SORTING_ASC),
(Criterion('popularity'), SORTING_DESC),
))
expected = [self.brickA1, self.brickA2, self.brickA4, self.brickA3]
self.assertEqual(list(wall), expected)
# Single keys - Single bricks - Multi models
def test_single_key_desc_sorting_single_bricks_multi_models(self):
self._create_model_a_objects_and_bricks()
self._create_model_b_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity'), SORTING_DESC),
))
expected = [self.brickB1, self.brickB2, self.brickB3, self.brickB4,
self.brickA1, self.brickA2, self.brickA3, self.brickA4]
self.assertEqual(list(wall), expected)
def test_single_key_asc_sorting_single_bricks_multi_models(self):
self._create_model_a_objects_and_bricks()
self._create_model_b_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity'), SORTING_ASC),
))
expected = [self.brickA4, self.brickA3, self.brickA2, self.brickA1,
self.brickB4, self.brickB3, self.brickB2, self.brickB1]
self.assertEqual(list(wall), expected)
# Multi keys - Single bricks - Multi models
def test_multi_key_1_sorting_single_bricks_multi_models(self):
self._create_model_a_objects_and_bricks()
self._create_model_b_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('is_sticky'), SORTING_DESC),
(Criterion('popularity'), SORTING_DESC),
))
expected = [self.brickB3, self.brickA3, self.brickB1, self.brickB2,
self.brickB4, self.brickA1, self.brickA2, self.brickA4]
self.assertEqual(list(wall), expected)
def test_multi_key_2_sorting_single_bricks_multi_models(self):
self._create_model_a_objects_and_bricks()
self._create_model_b_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('is_sticky'), SORTING_ASC),
(Criterion('popularity'), SORTING_DESC),
))
expected = [self.brickB1, self.brickB2, self.brickB4, self.brickA1,
self.brickA2, self.brickA4, self.brickB3, self.brickA3]
self.assertEqual(list(wall), expected)
# Single keys - Multi bricks - Single models
def test_single_key_1_sorting_multi_bricks_single_models(self):
self._create_model_c_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity', callback=max), SORTING_DESC),
))
expected = [self.brickC1, self.brickC2]
self.assertEqual(list(wall), expected)
def test_single_key_2_sorting_multi_bricks_single_models(self):
self._create_model_c_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity', callback=min), SORTING_DESC),
))
expected = [self.brickC1, self.brickC2]
self.assertEqual(list(wall), expected)
def test_single_key_3_sorting_multi_bricks_single_models(self):
self._create_model_c_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity', callback=max), SORTING_ASC),
))
expected = [self.brickC2, self.brickC1]
self.assertEqual(list(wall), expected)
def test_single_key_4_sorting_multi_bricks_single_models(self):
self._create_model_c_objects_and_bricks()
wall = TestBrickWall(self.bricks, criteria=(
(Criterion('popularity', callback=min), SORTING_ASC),
))
expected = [self.brickC2, self.brickC1]
self.assertEqual(list(wall), expected)
# Multi keys - Multi bricks - Single models
def test_multi_key_1_sorting_multi_bricks_single_models(self):
self._create_model_c_objects_and_bricks()
wall = | |
= None
class LaunchTemplateBlockDeviceMapping(BaseModel):
DeviceName: Optional[String] = None
VirtualName: Optional[String] = None
Ebs: Optional[LaunchTemplateEbsBlockDevice] = None
NoDevice: Optional[String] = None
class LaunchTemplateBlockDeviceMappingList(BaseModel):
__root__: list[LaunchTemplateBlockDeviceMapping]
class LaunchTemplateEbsBlockDeviceRequest(BaseModel):
Encrypted: Optional[Boolean] = None
DeleteOnTermination: Optional[Boolean] = None
Iops: Optional[Integer] = None
KmsKeyId: Optional[KmsKeyId] = None
SnapshotId: Optional[SnapshotId4] = None
VolumeSize: Optional[Integer] = None
VolumeType: Optional[VolumeType] = None
Throughput: Optional[Integer] = None
class LaunchTemplateBlockDeviceMappingRequest(BaseModel):
DeviceName: Optional[String] = None
VirtualName: Optional[String] = None
Ebs: Optional[LaunchTemplateEbsBlockDeviceRequest] = None
NoDevice: Optional[String] = None
class LaunchTemplateCapacityReservationSpecificationResponse(BaseModel):
CapacityReservationPreference: Optional[CapacityReservationPreference] = None
CapacityReservationTarget: Optional[CapacityReservationTargetResponse] = None
class LaunchTemplateCpuOptions(BaseModel):
CoreCount: Optional[Integer] = None
ThreadsPerCore: Optional[Integer] = None
class LaunchTemplateElasticInferenceAcceleratorCount(BaseModel):
__root__: conint(ge=1)
class LaunchTemplateElasticInferenceAccelerator(BaseModel):
Type: String
Count: Optional[LaunchTemplateElasticInferenceAcceleratorCount] = None
class LaunchTemplateElasticInferenceAcceleratorResponse(BaseModel):
Type: Optional[String] = None
Count: Optional[Integer] = None
class LaunchTemplateElasticInferenceAcceleratorResponseList(BaseModel):
__root__: list[LaunchTemplateElasticInferenceAcceleratorResponse]
class LaunchTemplateEnclaveOptions(BaseModel):
Enabled: Optional[Boolean] = None
class LaunchTemplateErrorCode(Enum):
launchTemplateIdDoesNotExist = 'launchTemplateIdDoesNotExist'
launchTemplateIdMalformed = 'launchTemplateIdMalformed'
launchTemplateNameDoesNotExist = 'launchTemplateNameDoesNotExist'
launchTemplateNameMalformed = 'launchTemplateNameMalformed'
launchTemplateVersionDoesNotExist = 'launchTemplateVersionDoesNotExist'
unexpectedError = 'unexpectedError'
class LaunchTemplateHibernationOptions(BaseModel):
Configured: Optional[Boolean] = None
class LaunchTemplateHttpTokensState(Enum):
optional = 'optional'
required = 'required'
class LaunchTemplateIamInstanceProfileSpecification(BaseModel):
Arn: Optional[String] = None
Name: Optional[String] = None
class LaunchTemplateInstanceMetadataEndpointState(Enum):
disabled = 'disabled'
enabled = 'enabled'
class LaunchTemplateInstanceMetadataOptionsState(Enum):
pending = 'pending'
applied = 'applied'
class LaunchTemplateInstanceMetadataProtocolIpv6(Enum):
disabled = 'disabled'
enabled = 'enabled'
class LaunchTemplateInstanceMetadataOptions(BaseModel):
State: Optional[LaunchTemplateInstanceMetadataOptionsState] = None
HttpTokens: Optional[LaunchTemplateHttpTokensState] = None
HttpPutResponseHopLimit: Optional[Integer] = None
HttpEndpoint: Optional[LaunchTemplateInstanceMetadataEndpointState] = None
HttpProtocolIpv6: Optional[LaunchTemplateInstanceMetadataProtocolIpv6] = None
class LaunchTemplateInstanceNetworkInterfaceSpecification(BaseModel):
AssociateCarrierIpAddress: Optional[Boolean] = None
AssociatePublicIpAddress: Optional[Boolean] = None
DeleteOnTermination: Optional[Boolean] = None
Description: Optional[String] = None
DeviceIndex: Optional[Integer] = None
Groups: Optional[GroupIdStringList] = None
InterfaceType: Optional[String] = None
Ipv6AddressCount: Optional[Integer] = None
Ipv6Addresses: Optional[InstanceIpv6AddressList] = None
NetworkInterfaceId: Optional[NetworkInterfaceId2] = None
PrivateIpAddress: Optional[String] = None
PrivateIpAddresses: Optional[PrivateIpAddressSpecificationList] = None
SecondaryPrivateIpAddressCount: Optional[Integer] = None
SubnetId: Optional[SubnetId1] = None
NetworkCardIndex: Optional[Integer] = None
Ipv4Prefixes: Optional[Ipv4PrefixListResponse] = None
Ipv4PrefixCount: Optional[Integer] = None
Ipv6Prefixes: Optional[Ipv6PrefixListResponse] = None
Ipv6PrefixCount: Optional[Integer] = None
class LaunchTemplateInstanceNetworkInterfaceSpecificationList(BaseModel):
__root__: list[LaunchTemplateInstanceNetworkInterfaceSpecification]
class LaunchTemplateInstanceNetworkInterfaceSpecificationRequest(BaseModel):
AssociateCarrierIpAddress: Optional[Boolean] = None
AssociatePublicIpAddress: Optional[Boolean] = None
DeleteOnTermination: Optional[Boolean] = None
Description: Optional[String] = None
DeviceIndex: Optional[Integer] = None
Groups: Optional[SecurityGroupIdStringList] = None
InterfaceType: Optional[String] = None
Ipv6AddressCount: Optional[Integer] = None
Ipv6Addresses: Optional[InstanceIpv6AddressListRequest] = None
NetworkInterfaceId: Optional[NetworkInterfaceId2] = None
PrivateIpAddress: Optional[String] = None
PrivateIpAddresses: Optional[PrivateIpAddressSpecificationList] = None
SecondaryPrivateIpAddressCount: Optional[Integer] = None
SubnetId: Optional[SubnetId1] = None
NetworkCardIndex: Optional[Integer] = None
Ipv4Prefixes: Optional[Ipv4PrefixList] = None
Ipv4PrefixCount: Optional[Integer] = None
Ipv6Prefixes: Optional[Ipv6PrefixList] = None
Ipv6PrefixCount: Optional[Integer] = None
class LaunchTemplateLicenseConfiguration(BaseModel):
LicenseConfigurationArn: Optional[String] = None
class LaunchTemplateLicenseConfigurationRequest(BaseModel):
LicenseConfigurationArn: Optional[String] = None
class LaunchTemplateLicenseList(BaseModel):
__root__: list[LaunchTemplateLicenseConfiguration]
class LaunchTemplateOverrides(BaseModel):
InstanceType: Optional[InstanceType] = None
SpotPrice: Optional[String] = None
SubnetId: Optional[String] = None
AvailabilityZone: Optional[String] = None
WeightedCapacity: Optional[Double] = None
Priority: Optional[Double] = None
class LaunchTemplatePlacement(BaseModel):
AvailabilityZone: Optional[String] = None
Affinity: Optional[String] = None
GroupName: Optional[String] = None
HostId: Optional[String] = None
Tenancy: Optional[Tenancy] = None
SpreadDomain: Optional[String] = None
HostResourceGroupArn: Optional[String] = None
PartitionNumber: Optional[Integer] = None
class LaunchTemplateSpecification4(BaseModel):
LaunchTemplateId: Optional[LaunchTemplateId] = None
LaunchTemplateName: Optional[String] = None
Version: Optional[String] = None
class SpotInstanceType(Enum):
one_time = 'one-time'
persistent = 'persistent'
class ResourceType(Enum):
capacity_reservation = 'capacity-reservation'
client_vpn_endpoint = 'client-vpn-endpoint'
customer_gateway = 'customer-gateway'
carrier_gateway = 'carrier-gateway'
dedicated_host = 'dedicated-host'
dhcp_options = 'dhcp-options'
egress_only_internet_gateway = 'egress-only-internet-gateway'
elastic_ip = 'elastic-ip'
elastic_gpu = 'elastic-gpu'
export_image_task = 'export-image-task'
export_instance_task = 'export-instance-task'
fleet = 'fleet'
fpga_image = 'fpga-image'
host_reservation = 'host-reservation'
image = 'image'
import_image_task = 'import-image-task'
import_snapshot_task = 'import-snapshot-task'
instance = 'instance'
instance_event_window = 'instance-event-window'
internet_gateway = 'internet-gateway'
ipv4pool_ec2 = 'ipv4pool-ec2'
ipv6pool_ec2 = 'ipv6pool-ec2'
key_pair = 'key-pair'
launch_template = 'launch-template'
local_gateway = 'local-gateway'
local_gateway_route_table = 'local-gateway-route-table'
local_gateway_virtual_interface = 'local-gateway-virtual-interface'
local_gateway_virtual_interface_group = 'local-gateway-virtual-interface-group'
local_gateway_route_table_vpc_association = (
'local-gateway-route-table-vpc-association'
)
local_gateway_route_table_virtual_interface_group_association = (
'local-gateway-route-table-virtual-interface-group-association'
)
natgateway = 'natgateway'
network_acl = 'network-acl'
network_interface = 'network-interface'
network_insights_analysis = 'network-insights-analysis'
network_insights_path = 'network-insights-path'
placement_group = 'placement-group'
prefix_list = 'prefix-list'
replace_root_volume_task = 'replace-root-volume-task'
reserved_instances = 'reserved-instances'
route_table = 'route-table'
security_group = 'security-group'
security_group_rule = 'security-group-rule'
snapshot = 'snapshot'
spot_fleet_request = 'spot-fleet-request'
spot_instances_request = 'spot-instances-request'
subnet = 'subnet'
traffic_mirror_filter = 'traffic-mirror-filter'
traffic_mirror_session = 'traffic-mirror-session'
traffic_mirror_target = 'traffic-mirror-target'
transit_gateway = 'transit-gateway'
transit_gateway_attachment = 'transit-gateway-attachment'
transit_gateway_connect_peer = 'transit-gateway-connect-peer'
transit_gateway_multicast_domain = 'transit-gateway-multicast-domain'
transit_gateway_route_table = 'transit-gateway-route-table'
volume = 'volume'
vpc = 'vpc'
vpc_endpoint = 'vpc-endpoint'
vpc_endpoint_service = 'vpc-endpoint-service'
vpc_peering_connection = 'vpc-peering-connection'
vpn_connection = 'vpn-connection'
vpn_gateway = 'vpn-gateway'
vpc_flow_log = 'vpc-flow-log'
class LaunchTemplatesMonitoring(BaseModel):
Enabled: Optional[Boolean] = None
class LicenseConfiguration(BaseModel):
LicenseConfigurationArn: Optional[String] = None
class LicenseSpecificationListRequest(BaseModel):
__root__: list[LicenseConfigurationRequest]
class ListingStatus(Enum):
active = 'active'
pending = 'pending'
cancelled = 'cancelled'
closed = 'closed'
class LoadPermission(BaseModel):
UserId: Optional[String] = None
Group: Optional[PermissionGroup] = None
class LoadPermissionRequest(BaseModel):
Group: Optional[PermissionGroup] = None
UserId: Optional[String] = None
class LocalGatewayRouteType(Enum):
static = 'static'
propagated = 'propagated'
class LocalGatewayRouteState(Enum):
pending = 'pending'
active = 'active'
blackhole = 'blackhole'
deleting = 'deleting'
deleted = 'deleted'
class PrefixListState(Enum):
create_in_progress = 'create-in-progress'
create_complete = 'create-complete'
create_failed = 'create-failed'
modify_in_progress = 'modify-in-progress'
modify_complete = 'modify-complete'
modify_failed = 'modify-failed'
restore_in_progress = 'restore-in-progress'
restore_complete = 'restore-complete'
restore_failed = 'restore-failed'
delete_in_progress = 'delete-in-progress'
delete_complete = 'delete-complete'
delete_failed = 'delete-failed'
class MaxIpv4AddrPerInterface(BaseModel):
__root__: int
class MaxIpv6AddrPerInterface(BaseModel):
__root__: int
class MaxNetworkInterfaces(BaseModel):
__root__: int
class MaxResults108(BaseModel):
__root__: int
class MaximumNetworkCards(BaseModel):
__root__: int
class MembershipType(Enum):
static = 'static'
igmp = 'igmp'
class MemorySize(BaseModel):
__root__: int
class ModifyAddressAttributeRequest(BaseModel):
AllocationId: AllocationId1
DomainName: Optional[String] = None
DryRun: Optional[Boolean] = None
class ModifyAvailabilityZoneOptInStatus(Enum):
opted_in = 'opted-in'
not_opted_in = 'not-opted-in'
class ModifyAvailabilityZoneGroupRequest(BaseModel):
GroupName: String
OptInStatus: ModifyAvailabilityZoneOptInStatus
DryRun: Optional[Boolean] = None
class ModifyCapacityReservationRequest(BaseModel):
CapacityReservationId: CapacityReservationId
InstanceCount: Optional[Integer] = None
EndDate: Optional[DateTime] = None
EndDateType: Optional[EndDateType] = None
Accept: Optional[Boolean] = None
DryRun: Optional[Boolean] = None
class ModifyClientVpnEndpointRequest(BaseModel):
ClientVpnEndpointId: ClientVpnEndpointId1
ServerCertificateArn: Optional[String] = None
ConnectionLogOptions: Optional[ConnectionLogOptions1] = None
DnsServers: Optional[DnsServersOptionsModifyStructure] = None
VpnPort: Optional[Integer] = None
Description: Optional[String] = None
SplitTunnel: Optional[Boolean] = None
DryRun: Optional[Boolean] = None
SecurityGroupIds: Optional[ClientVpnSecurityGroupIdSet] = None
VpcId: Optional[VpcId2] = None
SelfServicePortal: Optional[SelfServicePortal] = None
ClientConnectOptions: Optional[ClientConnectOptions] = None
class ModifyDefaultCreditSpecificationRequest(BaseModel):
DryRun: Optional[Boolean] = None
InstanceFamily: UnlimitedSupportedInstanceFamily
CpuCredits: String
class ModifyEbsDefaultKmsKeyIdRequest(BaseModel):
KmsKeyId: KmsKeyId
DryRun: Optional[Boolean] = None
class OperationType(Enum):
add = 'add'
remove = 'remove'
class UserIdStringList(BaseModel):
__root__: list[String]
class UserGroupStringList(BaseModel):
__root__: list[String]
class ProductCodeStringList(BaseModel):
__root__: list[String]
class ModifyHostsRequest(BaseModel):
AutoPlacement: Optional[AutoPlacement] = None
HostIds: RequestHostIdList
HostRecovery: Optional[HostRecovery] = None
InstanceType: Optional[String] = None
InstanceFamily: Optional[String] = None
class ModifyIdFormatRequest(BaseModel):
Resource: String
UseLongIds: Boolean
class ModifyIdentityIdFormatRequest(BaseModel):
PrincipalArn: String
Resource: String
UseLongIds: Boolean
class ModifyInstanceCapacityReservationAttributesRequest(BaseModel):
InstanceId: InstanceId
CapacityReservationSpecification: CapacityReservationSpecification
DryRun: Optional[Boolean] = None
class ModifyInstanceCreditSpecificationRequest(BaseModel):
DryRun: Optional[Boolean] = None
ClientToken: Optional[String] = None
InstanceCreditSpecifications: InstanceCreditSpecificationListRequest
class ModifyInstanceEventStartTimeRequest(BaseModel):
DryRun: Optional[Boolean] = None
InstanceId: InstanceId
InstanceEventId: String
NotBefore: DateTime
class ModifyInstanceMetadataOptionsRequest(BaseModel):
InstanceId: InstanceId
HttpTokens: Optional[HttpTokensState] = None
HttpPutResponseHopLimit: Optional[Integer] = None
HttpEndpoint: Optional[InstanceMetadataEndpointState] = None
DryRun: Optional[Boolean] = None
HttpProtocolIpv6: Optional[InstanceMetadataProtocolState] = None
class ModifyInstancePlacementRequest(BaseModel):
Affinity: Optional[Affinity2] = None
GroupName: Optional[PlacementGroupName] = None
HostId: Optional[DedicatedHostId] = None
InstanceId: InstanceId
Tenancy: Optional[HostTenancy] = None
PartitionNumber: Optional[Integer] = None
HostResourceGroupArn: Optional[String] = None
class ModifyLaunchTemplateRequest(BaseModel):
DryRun: Optional[Boolean] = None
ClientToken: Optional[String] = None
LaunchTemplateId: Optional[LaunchTemplateId] = None
LaunchTemplateName: Optional[LaunchTemplateName] = None
DefaultVersion: Optional[String] = None
class RemovePrefixListEntries(BaseModel):
__root__: list[RemovePrefixListEntry] = Field(..., max_items=100, min_items=0)
class ModifyManagedPrefixListRequest(BaseModel):
DryRun: Optional[Boolean] = None
PrefixListId: PrefixListResourceId
CurrentVersion: Optional[Long] = None
PrefixListName: Optional[String] = None
AddEntries: Optional[AddPrefixListEntries] = None
RemoveEntries: Optional[RemovePrefixListEntries] = None
MaxEntries: Optional[Integer] = None
class NetworkInterfaceAttachmentChanges(BaseModel):
AttachmentId: Optional[NetworkInterfaceAttachmentId] = None
DeleteOnTermination: Optional[Boolean] = None
class ModifyNetworkInterfaceAttributeRequest(BaseModel):
Attachment: Optional[NetworkInterfaceAttachmentChanges] = None
Description: Optional[AttributeValue] = None
DryRun: Optional[Boolean] = None
Groups: Optional[SecurityGroupIdStringList] = None
NetworkInterfaceId: NetworkInterfaceId2
SourceDestCheck: Optional[AttributeBooleanValue] = None
class ModifySubnetAttributeRequest(BaseModel):
AssignIpv6AddressOnCreation: Optional[AttributeBooleanValue] = None
MapPublicIpOnLaunch: Optional[AttributeBooleanValue] = None
SubnetId: SubnetId1
MapCustomerOwnedIpOnLaunch: Optional[AttributeBooleanValue] = None
CustomerOwnedIpv4Pool: Optional[CoipPoolId] = None
class TrafficMirrorNetworkServiceList(BaseModel):
__root__: list[TrafficMirrorNetworkService]
class ModifyTrafficMirrorFilterNetworkServicesRequest(BaseModel):
TrafficMirrorFilterId: TrafficMirrorFilterId1
AddNetworkServices: Optional[TrafficMirrorNetworkServiceList] = None
RemoveNetworkServices: Optional[TrafficMirrorNetworkServiceList] = None
DryRun: Optional[Boolean] = None
class TrafficMirrorFilterRuleFieldList(BaseModel):
__root__: list[TrafficMirrorFilterRuleField]
class ModifyTrafficMirrorFilterRuleRequest(BaseModel):
TrafficMirrorFilterRuleId: TrafficMirrorFilterRuleId2
TrafficDirection: Optional[TrafficDirection] = None
RuleNumber: Optional[Integer] = None
RuleAction: Optional[TrafficMirrorRuleAction] = None
DestinationPortRange: Optional[TrafficMirrorPortRangeRequest] = None
SourcePortRange: Optional[TrafficMirrorPortRangeRequest] = None
Protocol: Optional[Integer] = None
DestinationCidrBlock: Optional[String] = None
SourceCidrBlock: Optional[String] = None
Description: Optional[String] = None
RemoveFields: Optional[TrafficMirrorFilterRuleFieldList] = None
DryRun: Optional[Boolean] = None
class TrafficMirrorSessionFieldList(BaseModel):
__root__: list[TrafficMirrorSessionField]
class ModifyTrafficMirrorSessionRequest(BaseModel):
TrafficMirrorSessionId: TrafficMirrorSessionId1
TrafficMirrorTargetId: Optional[TrafficMirrorTargetId1] = None
TrafficMirrorFilterId: Optional[TrafficMirrorFilterId1] = None
PacketLength: Optional[Integer] = None
SessionNumber: Optional[Integer] = None
VirtualNetworkId: Optional[Integer] = None
Description: Optional[String] = None
RemoveFields: Optional[TrafficMirrorSessionFieldList] = None
DryRun: Optional[Boolean] = None
class ModifyTransitGatewayOptions(BaseModel):
AddTransitGatewayCidrBlocks: Optional[TransitGatewayCidrBlockStringList] = None
RemoveTransitGatewayCidrBlocks: Optional[TransitGatewayCidrBlockStringList] = None
VpnEcmpSupport: Optional[VpnEcmpSupportValue] = None
DnsSupport: Optional[DnsSupportValue] = None
AutoAcceptSharedAttachments: Optional[AutoAcceptSharedAttachmentsValue] = None
DefaultRouteTableAssociation: Optional[DefaultRouteTableAssociationValue] = None
AssociationDefaultRouteTableId: Optional[TransitGatewayRouteTableId] = None
DefaultRouteTablePropagation: Optional[DefaultRouteTablePropagationValue] = None
PropagationDefaultRouteTableId: Optional[TransitGatewayRouteTableId] = None
class ModifyTransitGatewayPrefixListReferenceRequest(BaseModel):
TransitGatewayRouteTableId: TransitGatewayRouteTableId
PrefixListId: PrefixListResourceId
TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] = None
Blackhole: Optional[Boolean] = None
DryRun: Optional[Boolean] = None
class ModifyTransitGatewayRequest(BaseModel):
TransitGatewayId: TransitGatewayId
Description: Optional[String] = | |
tuple
Set of parameters.
"""
# Current shape
H, W, D = X.shape
# Reshape arrays
X = X.reshape((H*W, D))
if self.init_params == 'random':
# Dirichlet concentration hyperparameters
at = np.ones((self.K,))*(H*W)/2
# Normal precision-scale hyperparameters
bt = np.ones((self.K,))*(H*W)/2
# Wishart degrees of freedom
nt = np.ones((self.K,))*(H*W)/2
mt = np.zeros((self.K, D))
Wt = np.zeros((D, D, self.K))
for k in range(self.K):
# Hypermeans
mt[k, :] = np.mean(X, axis=0) + rnd.randn(1, D)*.1
# Hyperprecisions
Wt[:, :, k] = np.eye(D)
# Initialize variational posterior responsibilities
rho = np.ones((H, W, self.K)) / self.K
elif self.init_params in ('kmeans', 'k-means'):
# Fit k-means to data and obtain cluster assignment
label = KMeans(n_clusters=self.K, n_init=1).fit(X).labels_
# Set rho based on cluster labels
rho = np.zeros((H*W, self.K))
rho[np.arange(H*W), label] = 1
# Dirichlet concentration hyperparameters
at = np.sum(rho, axis=0)
# Normal precision-scale hyperparameters
bt = np.sum(rho, axis=0)
# Wishart degrees of freedom
nt = np.sum(rho, axis=0)
mt = np.zeros((self.K, D))
Wt = np.zeros((D, D, self.K))
for k in range(self.K):
# Hypermeans
mt[k, :] = np.sum(rho[:, [k]] * X, axis=0) / np.sum(rho[:, k])
# Hyperprecisions
Wt[:, :, k] = np.eye(D)
else:
raise ValueError('Provided method not recognized.')
return (at, bt, nt, mt, Wt), rho
def free_energy(self, X, rho, thetat, report=True):
"""
Compute free energy term to monitor progress.
Parameters
----------
X : array
Observed image (height by width by channels).
rho : array
Array of variational parameters (height by width by channels).
thetat : array
Parameters of variational posteriors.
theta0 : array
Parameters of variational priors.
report : bool
Print value of free energy function.
Returns
-------
rho : array
Updated array of variational parameters.
"""
# Shapes
H, W, D = X.shape
# Reshape arrays
X = X.reshape((H*W, D))
rho = rho.reshape((H*W, self.K))
# Unpack parameter sets
a0, b0, n0, m0, W0 = self.theta0
at, bt, nt, mt, Wt = thetat
# Preallocate terms for energy function
E1 = 0
E2 = 0
E3 = 0
E4 = 0
E5 = 0
E6 = 0
E7 = 0
# Loop over classes
for k in range(self.K):
''' Convenience variables '''
# Proportion assigned to each component
Nk = np.sum(rho[:, k], axis=0)
# Responsibility-weighted mean
xk = np.sum(rho[:, [k]] * X, axis=0) / Nk
# Reponsibility-weighted variance
Sk = ((X - xk) * rho[:, [k]]).T @ (X - xk) / Nk
# Mahalanobis distance from hypermean
mWm = (mt[k, :] - m0[k, :]).T @ Wt[:, :, k] @ (mt[k, :] - m0[k, :])
# Mahalanobis distance from responsibility-weighted mean
xWx = (xk - mt[k, :]) @ Wt[:, :, k] @ (xk - mt[k, :]).T
# Entropy-based terms
Elog_pik = digamma(at[k]) - digamma(np.sum(at))
Elog_Lak = (D*np.log(2) +
self.log_det(Wt[:, :, k]) +
self.multivariate_digamma(nt[k], D))
''' Energy function '''
# First term
E1 += Nk/2*(Elog_Lak - D / bt[k] -
nt[k]*(np.trace(Sk @ Wt[:, :, k]) + xWx) -
D*np.log(2*np.pi))
# Second term
E2 += np.sum(rho[:, k] * Elog_pik, axis=0)
# Third term
E3 += (a0[k] - 1)*Elog_pik + (gammaln(np.sum(a0)) -
np.sum(gammaln(a0))) / self.K
# Fourth term
E4 += 1/2*(D*np.log(b0[k] / (2*np.pi)) +
Elog_Lak -
D*b0[k]/bt[k] -
b0[k]*nt[k]*mWm +
(n0[k] - D - 1)*Elog_Lak -
2*self.log_partition_Wishart(Wt[:, :, k], nt[k]) +
nt[k]*np.trace(inv(W0[:, :, k])*Wt[:, :, k]))
# Ignore underflow error from log rho
with np.errstate(under='ignore') and np.errstate(divide='ignore'):
# Set -inf to most negative number
lrho = np.maximum(np.log(rho[:, k]), np.finfo(rho.dtype).min)
# Fifth term
E5 += np.sum(rho[:, k] * lrho, axis=0)
# Sixth term
E6 += (at[k] - 1)*Elog_pik + (gammaln(np.sum(at)) -
np.sum(gammaln(at))) / self.K
# Seventh term
E7 += (Elog_Lak/2 +
D/2*np.log(bt[k] / (2*np.pi)) -
D/2 - self.entropy_Wishart(Wt[:, :, k], nt[k]))
# Compute free energy term
F = E1 + E2 + E3 + E4 - E5 - E6 - E7
# Print free energy
if report:
print('Free energy = ' + str(F))
return F
def expectation_step(self, X, thetat, savefn=''):
"""
Perform expectation step.
Parameters
----------
X : array
Observed image (height by width by channels).
thetat : array
Current iteration of parameters of variational posteriors.
Returns
-------
rho : array
Updated array of variational parameters / responsibilities.
"""
# Shape of variational parameter array
H, W, D = X.shape
# Reshape arrays
X = X.reshape((H*W, D))
# Unpack tuple of hyperparameters
at, bt, nt, mt, Wt = thetat
# Initialize logarithmic rho
log_rho = np.zeros((H*W, self.K))
for k in range(self.K):
# Compute expected log mixing coefficient
E1 = digamma(at[k]) - digamma(np.sum(at))
# Compute exponentiated expected log precision
E2 = (D*np.log(2) + self.log_det(Wt[:, :, k]) +
self.multivariate_digamma(nt[k], D))
# Compute expected hypermean and hyperprecision
E3 = D/bt[k] + self.distW(X - mt[k, :], nt[k]*Wt[:, :, k])
# Update variational parameter at current pixels
log_rho[:, k] = E1 + E2/2 - E3/2
# Subtract largest number from log_rho
log_rho = log_rho - np.max(log_rho, axis=1)[:, np.newaxis]
# Exponentiate and normalize
rho = np.exp(log_rho) / np.sum(np.exp(log_rho), axis=1)[:, np.newaxis]
# Check for underflow problems
if np.any(np.sum(rho, axis=1) == 0.0):
raise RuntimeError('Variational parameter underflow.')
return rho.reshape((H, W, self.K))
def maximization_step(self, X, rho, thetat):
"""
Perform maximization step from variational-Bayes-EM.
Parameters
----------
X : array
Observed image (height by width by channels).
rho : array
Array of variational parameters (height by width by classes).
thetat : array
Current iteration of hyperparameters of posteriors.
Returns
-------
thetat : array
Next iteration of hyperparameters of posteriors.
"""
# Shape of image
H, W, D = X.shape
# Reshape arrays
X = X.reshape((H*W, D))
rho = rho.reshape((H*W, self.K))
# Unpack parameter sets
a0, b0, n0, m0, W0 = self.theta0
at, bt, nt, mt, Wt = thetat
# Iterate over classes
for k in range(self.K):
# Total responsibility for class k
Nk = np.sum(rho[:, k], axis=0)
# Responsibility-weighted mean for class k
xk = np.sum(rho[:, [k]] * X, axis=0) / Nk
# Responsibility-weighted covariance for class k
Sk = ((X - xk) * rho[:, [k]]).T @ (X - xk) / Nk
# Update alpha
at[k] = a0[k] + Nk
# Update nu
nt[k] = n0[k] + Nk
# Update beta
bt[k] = b0[k] + Nk
# Update hypermean
mt[k, :] = (b0[k]*m0[k, :] + Nk*xk) / (b0[k] + Nk)
# Update hyperprecision
Wt[:, :, k] = inv(inv(W0[:, :, k]) + Nk*Sk + (b0[k]*Nk) / bt[k] *
(xk - m0[k, :]).T @ (xk - m0[k, :]))
return at, bt, nt, mt, Wt
def expectation_maximization(self, X):
"""
Perform Variational Bayes Expectation-Maximization.
Parameters
----------
X : array (instances by features)
Data array.
Returns
-------
rho : array (instances by components)
Variational parameters of posterior for label image.
"""
# Get shape of image
H, W, D = X.shape
# Initialize posterior hyperparameters
thetat, rho = self.initialize_posteriors(X)
# Initialize old energy variable
F_ = np.inf
for t in range(self.max_iter):
# Monitor progress every tenth iteration
if t % (self.max_iter/10) == 0:
# Report progress
print('Iteration ' + '{0:03}'.format(t+1) + '/' +
str(self.max_iter) + '\t', end='')
# Compute free energy to monitor progress
F = self.free_energy(X, rho, thetat, report=True)
if np.abs(F - F_) <= self.tol:
print('Step size is below tolerance threshold.')
break
# Update old energy
F_ = F
# Expectation step
rho = self.expectation_step(X, thetat, savefn=('rho_t' + str(t)))
# Expectation step
thetat = self.maximization_step(X, rho, thetat)
# Return segmentation along with estimated parameters
return rho, thetat
def segment(self, X):
"""
Fit model to data and segment image.
Parameters
----------
X : array.
Observed image (height by width by channels).
Returns
-------
pred : array
Segmentation produced by the model.
post : array
Posterior indicator distributions.
theta : tuple of arrays
Posterior hyperparameters of parameter distributions.
"""
# Check shape of image
H, W, D = X.shape
# Check if dimensionality of given data matches prior dimensionality.
if not self.D == D:
# Report
print('Re-setting priors.')
# Set dimensionality attribute
self.D = D
# Set prior hyperparameters
self.set_prior_hyperparameters(D=D, K=self.K)
# Perform VB-EM for segmenting the image
post, params = self.expectation_maximization(X)
# Compute most likely | |
= b'c'
else:
state = b'o'
f.write(b"%s %s %s\n" % (hex(node), state, label))
f.close()
repo.ui.log(
b'branchcache',
b'wrote %s with %d labels and %d nodes\n',
_branchcachedesc(repo),
len(self._entries),
nodecount,
)
except (IOError, OSError, error.Abort) as inst:
# Abort may be raised by read only opener, so log and continue
repo.ui.debug(
b"couldn't write branch cache: %s\n"
% stringutil.forcebytestr(inst)
)
def update(self, repo, revgen):
"""Given a branchhead cache, self, that may have extra nodes or be
missing heads, and a generator of nodes that are strictly a superset of
heads missing, this function updates self to be correct.
"""
starttime = util.timer()
cl = repo.changelog
# collect new branch entries
newbranches = {}
getbranchinfo = repo.revbranchcache().branchinfo
for r in revgen:
branch, closesbranch = getbranchinfo(r)
newbranches.setdefault(branch, []).append(r)
if closesbranch:
self._closednodes.add(cl.node(r))
# new tip revision which we found after iterating items from new
# branches
ntiprev = self.tiprev
# Delay fetching the topological heads until they are needed.
# A repository without non-continous branches can skip this part.
topoheads = None
# If a changeset is visible, its parents must be visible too, so
# use the faster unfiltered parent accessor.
parentrevs = repo.unfiltered().changelog.parentrevs
for branch, newheadrevs in pycompat.iteritems(newbranches):
# For every branch, compute the new branchheads.
# A branchhead is a revision such that no descendant is on
# the same branch.
#
# The branchheads are computed iteratively in revision order.
# This ensures topological order, i.e. parents are processed
# before their children. Ancestors are inclusive here, i.e.
# any revision is an ancestor of itself.
#
# Core observations:
# - The current revision is always a branchhead for the
# repository up to that point.
# - It is the first revision of the branch if and only if
# there was no branchhead before. In that case, it is the
# only branchhead as there are no possible ancestors on
# the same branch.
# - If a parent is on the same branch, a branchhead can
# only be an ancestor of that parent, if it is parent
# itself. Otherwise it would have been removed as ancestor
# of that parent before.
# - Therefore, if all parents are on the same branch, they
# can just be removed from the branchhead set.
# - If one parent is on the same branch and the other is not
# and there was exactly one branchhead known, the existing
# branchhead can only be an ancestor if it is the parent.
# Otherwise it would have been removed as ancestor of
# the parent before. The other parent therefore can't have
# a branchhead as ancestor.
# - In all other cases, the parents on different branches
# could have a branchhead as ancestor. Those parents are
# kept in the "uncertain" set. If all branchheads are also
# topological heads, they can't have descendants and further
# checks can be skipped. Otherwise, the ancestors of the
# "uncertain" set are removed from branchheads.
# This computation is heavy and avoided if at all possible.
bheads = self._entries.setdefault(branch, [])
bheadset = {cl.rev(node) for node in bheads}
uncertain = set()
for newrev in sorted(newheadrevs):
if not bheadset:
bheadset.add(newrev)
continue
parents = [p for p in parentrevs(newrev) if p != nullrev]
samebranch = set()
otherbranch = set()
for p in parents:
if p in bheadset or getbranchinfo(p)[0] == branch:
samebranch.add(p)
else:
otherbranch.add(p)
if otherbranch and not (len(bheadset) == len(samebranch) == 1):
uncertain.update(otherbranch)
bheadset.difference_update(samebranch)
bheadset.add(newrev)
if uncertain:
if topoheads is None:
topoheads = set(cl.headrevs())
if bheadset - topoheads:
floorrev = min(bheadset)
ancestors = set(cl.ancestors(newheadrevs, floorrev))
bheadset -= ancestors
bheadrevs = sorted(bheadset)
self[branch] = [cl.node(rev) for rev in bheadrevs]
tiprev = bheadrevs[-1]
if tiprev > ntiprev:
ntiprev = tiprev
if ntiprev > self.tiprev:
self.tiprev = ntiprev
self.tipnode = cl.node(ntiprev)
if not self.validfor(repo):
# cache key are not valid anymore
self.tipnode = nullid
self.tiprev = nullrev
for heads in self.iterheads():
tiprev = max(cl.rev(node) for node in heads)
if tiprev > self.tiprev:
self.tipnode = cl.node(tiprev)
self.tiprev = tiprev
self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
duration = util.timer() - starttime
repo.ui.log(
b'branchcache',
b'updated %s in %.4f seconds\n',
_branchcachedesc(repo),
duration,
)
self.write(repo)
class remotebranchcache(branchcache):
"""Branchmap info for a remote connection, should not write locally"""
def write(self, repo):
pass
# Revision branch info cache
_rbcversion = b'-v1'
_rbcnames = b'rbc-names' + _rbcversion
_rbcrevs = b'rbc-revs' + _rbcversion
# [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
_rbcrecfmt = b'>4sI'
_rbcrecsize = calcsize(_rbcrecfmt)
_rbcnodelen = 4
_rbcbranchidxmask = 0x7FFFFFFF
_rbccloseflag = 0x80000000
class revbranchcache(object):
"""Persistent cache, mapping from revision number to branch name and close.
This is a low level cache, independent of filtering.
Branch names are stored in rbc-names in internal encoding separated by 0.
rbc-names is append-only, and each branch name is only stored once and will
thus have a unique index.
The branch info for each revision is stored in rbc-revs as constant size
records. The whole file is read into memory, but it is only 'parsed' on
demand. The file is usually append-only but will be truncated if repo
modification is detected.
The record for each revision contains the first 4 bytes of the
corresponding node hash, and the record is only used if it still matches.
Even a completely trashed rbc-revs fill thus still give the right result
while converging towards full recovery ... assuming no incorrectly matching
node hashes.
The record also contains 4 bytes where 31 bits contains the index of the
branch and the last bit indicate that it is a branch close commit.
The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
and will grow with it but be 1/8th of its size.
"""
def __init__(self, repo, readonly=True):
assert repo.filtername is None
self._repo = repo
self._names = [] # branch names in local encoding with static index
self._rbcrevs = bytearray()
self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
try:
bndata = repo.cachevfs.read(_rbcnames)
self._rbcsnameslen = len(bndata) # for verification before writing
if bndata:
self._names = [
encoding.tolocal(bn) for bn in bndata.split(b'\0')
]
except (IOError, OSError):
if readonly:
# don't try to use cache - fall back to the slow path
self.branchinfo = self._branchinfo
if self._names:
try:
data = repo.cachevfs.read(_rbcrevs)
self._rbcrevs[:] = data
except (IOError, OSError) as inst:
repo.ui.debug(
b"couldn't read revision branch cache: %s\n"
% stringutil.forcebytestr(inst)
)
# remember number of good records on disk
self._rbcrevslen = min(
len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
)
if self._rbcrevslen == 0:
self._names = []
self._rbcnamescount = len(self._names) # number of names read at
# _rbcsnameslen
def _clear(self):
self._rbcsnameslen = 0
del self._names[:]
self._rbcnamescount = 0
self._rbcrevslen = len(self._repo.changelog)
self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
util.clearcachedproperty(self, b'_namesreverse')
@util.propertycache
def _namesreverse(self):
return {b: r for r, b in enumerate(self._names)}
def branchinfo(self, rev):
"""Return branch name and close flag for rev, using and updating
persistent cache."""
changelog = self._repo.changelog
rbcrevidx = rev * _rbcrecsize
# avoid negative index, changelog.read(nullrev) is fast without cache
if rev == nullrev:
return changelog.branchinfo(rev)
# if requested rev isn't allocated, grow and cache the rev info
if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
return self._branchinfo(rev)
# fast path: extract data from cache, use it if node is matching
reponode = changelog.node(rev)[:_rbcnodelen]
cachenode, branchidx = unpack_from(
_rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx
)
close = bool(branchidx & _rbccloseflag)
if close:
branchidx &= _rbcbranchidxmask
if cachenode == b'\0\0\0\0':
pass
elif cachenode == reponode:
try:
return self._names[branchidx], close
except IndexError:
# recover from invalid reference to unknown branch
self._repo.ui.debug(
b"referenced branch names not found"
b" - rebuilding revision branch cache from scratch\n"
)
self._clear()
else:
# rev/node map has changed, invalidate the cache from here up
self._repo.ui.debug(
b"history modification detected - truncating "
b"revision branch cache to revision %d\n" % rev
)
truncate = rbcrevidx + _rbcrecsize
del self._rbcrevs[truncate:]
self._rbcrevslen = min(self._rbcrevslen, truncate)
# fall back to slow path and make sure it will be written | |
<reponame>lhorne-gavant/OpenPubArchive-Content-Server-1
import re
import sys
from datetime import datetime
from optparse import OptionParser
from configLib.opasCoreConfig import solr_docs, solr_authors, solr_gloss, solr_docs_term_search, solr_authors_term_search
import logging
logger = logging.getLogger(__name__)
from namesparser import HumanNames
rx_space_req = "(\s+|\s*)"
rx_space_opt = "(\s*|\s*)"
rx_space_end_opt = "(\s*|\s*)$"
rx_space_start_opt = "^(\s*|\s*)?"
rx_year = "\(?\s*(?P<yr>(18|19|20)[0-9]{2,2})\s*\)?"
rx_title = ".*?"
rx_space_or_colon = "((\s*\:\s*)|\s+)"
rx_vol = "((?P<vol>([\(]?[12]?[0-9][0-9]?[\)]?))\:)"
# rx_pgrg = f"(pp\.?\s+)|{rx_vol}(?P<pgrg>[1-9][0-9]{0,3}([-][1-9][0-9]{0,3})?)"
rx_pgrg = "(?P<pgrg>[1-9][0-9]{0,3}([-][1-9][0-9]{0,3})?)"
rx_vol_pgrg = "(.*?(\s|,)|^)" + rx_vol + rx_pgrg + ".*"
rx_year_pgrg = rx_space_start_opt + rx_year + rx_space_or_colon + rx_pgrg + rx_space_end_opt
rx_year_vol_pgrg = rx_year + rx_vol_pgrg + rx_space_end_opt
rx_author_name = "(?P<author>[A-Z][a-z]+)(\,\s+(([A-Z]\.?\s?){0,2})\s*)?"
rx_author_connector = "(and|,)"
rx_front_junk = "(\[|\()?[0-9]+(\]|\))?"
# rx_author_and_year = rx_space_start_opt + rx_author_name + rx_space_req + rx_year + rx_space_end_opt
# rx_author_year_pgrg = rx_author_and_year + ".*?" + rx_pgrg
rx_author_name_list = "(?P<author_list>([A-Z][A-z]+\,?\s+?(([A-Z]\.?\s?){0,2})((\,\s+)|(\s*and\s+))?)+)"
# rx_author_name_list_year = rx_author_name_list + rx_space_req + rx_year
rx_author_list_and_year = "(?P<author_list>[A-Z][A-z\s\,\.\-]+?)" + rx_space_req + rx_year
rx_series_of_author_last_names = "(?P<author_list>([A-Z][a-z]+((\,\s+)|(\s*and\s+))?)+)"
rx_doi = "((h.*?://)?(.*?/))?(?P<doi>(10\.[0-9]{4,4}/[A-z0-9\.\-/]+)|(doi.org/[A-z0-9\-\./]+))"
# schema fields must have a _ in them to use.
rx_solr_field = "(?P<schema_field>([a-z]+_[a-z_]{2,13})|text|authors)\:(?P<schema_value>(.*$))"
rx_syntax = "(?P<syntax>^[a-z]{3,9})\:\:(?P<query>.+$)"
rx_pepdoi = "(?P<prefix>PEP\/\.)(?P<locator>[A-Z\-]{2,10}\.[0-9]{3,3}\.[0-9]{4,4}([PN]{1,2}[0-9]{4,4})?"
pat_prefix_amps = re.compile("^\s*&& ")
def cleanup_solr_query(solrquery):
"""
Clean up whitespace and extra symbols that happen when building up query or solr query filter
"""
ret_val = solrquery.strip()
ret_val = ' '.join(ret_val.split()) #solrquery = re.sub("\s+", " ", solrquery)
ret_val = re.sub("\(\s+", "(", ret_val)
ret_val = re.sub("\s+\)", ")", ret_val)
if ret_val is not None:
# no need to start with '*:* && '. Remove it.
ret_val = ret_val.replace("*:* && ", "")
ret_val = ret_val.replace("*:* {", "{") # if it's before a solr join for level 2 queries
ret_val = pat_prefix_amps.sub("", ret_val)
ret_val = re.sub("\s+(AND)\s+", " && ", ret_val, flags=re.IGNORECASE)
ret_val = re.sub("\s+(OR)\s+", " || ", ret_val, flags=re.IGNORECASE)
return ret_val
#-----------------------------------------------------------------------------
def is_value_in_field(value,
field="title",
core="docs",
match_type="exact", # exact, ordered, proximate, or bool
limit=10):
"""
Returns True if the value is found in the field specified in the docs core.
Args:
value (str): String prefix of term to check.
field (str): Where to look for term
match_type (str): exact, ordered, or bool
limit (int, optional): Paging mechanism, return is limited to this number of items.
Returns:
True if the value is in the specified field
Docstring Tests:
>>> is_value_in_field("Object Relations Theories and the Developmental Tilt", "title")
True
>>> is_value_in_field("Contemporary Psychoanalysis", "art_sourcetitlefull")
True
>>> is_value_in_field("Contemporary Psych", "art_sourcetitlefull")
False
>>> is_value_in_field("Contemp. Psychoanal.", "art_sourcetitleabbr")
True
>>> is_value_in_field("Tuckett, D", "title")
False
"""
ret_val = False
cores = {
"docs": solr_docs,
"authors": solr_authors,
}
try:
solr_core = cores[core]
except Exception as e:
logger.warning(f"Core selection: {core}. 'docs' is default {e}")
solr_core = solr_docs
if match_type == "exact":
q = f'{field}:"{value}"'
elif match_type == "ordered":
q = f'{field}:"{value}"~10'
elif match_type == "proximate":
q = f'{field}:"{value}"~25'
else:
q = f'{field}:({value})'
try:
results = solr_core.query(q=q,
fields = f"{field}",
rows = limit,
)
except Exception as e:
logger.warning(f"Solr query: {q} fields {field} {e}")
results = []
if len(results) > 0:
ret_val = True
return ret_val
#-----------------------------------------------------------------------------
def is_term_in_index(term_partial,
term_field="art_authors",
core="docs",
limit=10,
order="index"):
"""
Returns True if the term_partial matches the index specified
Args:
term_partial (str): String prefix of term to check.
term_field (str): Where to look for term
limit (int, optional): Paging mechanism, return is limited to this number of items.
offset (int, optional): Paging mechanism, start with this item in limited return set, 0 is first item.
order (str, optional): Return the list in this order, per Solr documentation. Defaults to "index", which is the Solr determined indexing order.
Returns:
True if the term is in the specified field
Docstring Tests:
>>> is_term_in_index("Tuckett, D.", term_field="art_author_id", core="authors")
True
>>> is_term_in_index("Tuckett", term_field="art_author_id", core="authors")
True
>>> is_term_in_index("Tuckett", limit=5)
True
>>> is_term_in_index("Tucke*")
True
"""
ret_val = False
core_term_indexers = {
"docs": solr_docs_term_search,
"authors": solr_authors_term_search,
}
try:
term_index = core_term_indexers[core]
except:
# error
logger.error("Specified core does not have a term index configured")
else:
if "*" in term_partial or "?" in term_partial: # or "." in term_partial:
# Wildcard expected, not RE
term_partial = term_partial.lower().replace("*", ".*")
results = term_index( terms_fl=term_field,
terms_regex=term_partial,
terms_limit=limit,
terms_sort=order # index or count
)
for n in results.terms[term_field].keys():
m = re.match(term_partial, n)
if m is not None:
ret_val = True
break
else:
results = term_index( terms_fl=term_field,
terms_prefix=term_partial.lower(),
terms_sort=order, # index or count
terms_limit=limit
)
for n in results.terms[term_field].keys():
n_partial = re.split("[\s,]+", n)
term_adj = term_partial.lower()
if term_adj == n or term_adj == n_partial[0]:
ret_val = True
break
return ret_val
def name_id_list(names_mess):
ret_val = []
names = HumanNames(names_mess)
try:
for n in names.human_names:
if n.last != "":
name_id = n.last + f", {n.first[0]}."
ret_val.append(name_id)
else:
ret_val.append(n.first)
except Exception as e:
logger.warning(f"name parse: {names_mess} {e}")
print (e)
return ret_val
def author_name_to_wildcard(author_list_str: str):
ret_val = re.sub(" and ", " && ", author_list_str, re.IGNORECASE)
ret_val = re.sub(",(\s[A-Z]\.){1,2}([\s,]?)", '* ', ret_val, flags=re.IGNORECASE)
return ret_val
def dict_clean_none_terms(d: dict):
return {
k:v.strip()
for k, v in d.items()
if v is not None
}
def smart_search(smart_search_text):
"""
Function to take an input string and parse out information to do a search against the DOCS core schema.
Some simple syntax is implemented to help identify certain functionality.
schema_field:terms = Solr field from schema, search terms against it. (Will be presented
to solr as field:(terms). Multiple terms are permitted, but currently, only one field
specification is permitted. Field names are converted to lower case automatically.
art_doi:10.1111/j.1745-8315.2012.00606.x
art_authors_text:[tuckett and fonagy]
doi = Just enter a DOI
10.1111/j.1745-8315.2012.00606.x
AuthorName Year = One or more names (initial capital), followed by a year
Tuckett and Fonagy 1972
Tuckett and Fonagy (1972)
>>> smart_search("<NAME>. & <NAME>. (1978)")
{'schema_field': 'art_authors_citation', 'schema_value': "'<NAME>.' && '<NAME>.'"}
>>> smart_search("authors:<NAME>.")
{'schema_field': 'authors', 'schema_value': 'Tuckett, D.'}
>>> smart_search("Tuckett 1982")
{'author_list': 'Tuckett', 'yr': '1982'}
>>> smart_search("solr::art_authors_text:[tuckett and fonagy]")
{'syntax': 'solr', 'query': 'art_authors_text:[tuckett and fonagy]'}
>>> smart_search("009:0015")
>>> smart_search("<NAME>. ( 1938), Some elementary lessons in psycho-analysis, Standard Edition. 23:279-286. pp. London: Hogarth Press, 1964.")
{'author_list': '<NAME>.', 'yr': '1938', 'vol': '23', 'pgrg': '279-286'}
>>> smart_search("Tuckett")
{'author': 'Tuckett'}
>>> tst = []
>>> tst.append("<NAME>. (1836), An essay on nature. In: The Selected Writings of <NAME>, ed. <NAME>. New York: New American Library, 1965, pp. 186-187.")
>>> tst.append("<NAME>. and <NAME>. ( 1959). The Points of View and Assumptions of Metapsychology. Int. J. Psycho-Anal.40:153-162")
>>> tst.append("<NAME>. ( 1938), Some elementary lessons in psycho-analysis, Standard Edition. 23:279-286. pp. London: Hogarth Press, 1964.")
>>> tst.append("<NAME>. ( 1962). Psychoanalysis, Scientific Method, and Philosophy. J. Amer. Psychoanal. Assn.10:617-637")
>>> for n in tst: smart_search(n)
{'author_list': '<NAME>.', 'yr': '1836'}
{'author_list': '<NAME>. and <NAME>.', 'yr': '1959'}
{'author_list': '<NAME>.', 'yr': '1938', 'vol': '23', 'pgrg': '279-286'}
{'author_list': '<NAME>.', 'yr': '1962'}
"""
# recognize Smart Search inputs
ret_val = {}
# get rid of leading spaces and zeros
smart_search_text = smart_search_text.lstrip(" 0")
if re.match("[A-Z\-]{2,9}\.[0-9]{3,3}[A-Z]?\.[0-9]{4,4}[A-Z]?", smart_search_text, flags=re.IGNORECASE):
loc_corrected = smart_search_text.upper()
if is_value_in_field(loc_corrected, "art_id"):
ret_val = {"art_id": loc_corrected}
if ret_val == {}:
patterns1 = {
rx_author_list_and_year : "author_list_and_year",
rx_year_pgrg : "rx_year_pgrg",
".*?" + rx_vol_pgrg : "rx_vol_pgrg",
rx_doi : "rx_doi",
rx_solr_field: "rx_solr_field",
rx_syntax: "rx_syntax",
}
#patterns2 = {
#rx_series_of_author_last_names: "author_list",
#rx_author_name_list: "author_list",
#rx_author_name: "author_list",
#}
for rx_str, label in patterns1.items():
m = re.match(rx_str, smart_search_text)
if m is not None:
ret_val = {**ret_val, **m.groupdict()}
#for rx_str, label in patterns2.items():
#m = re.match(rx_str, smart_search_text)
#if m is not None:
#if ret_val.get(label) is None: # Pass 2 - if not already found
#ret_val = {**ret_val, **m.groupdict()}
if ret_val == {}:
# nothing found yet.
# see if it's a title
words = smart_search_text.split(" ")
word_count = len(words)
words = [re.sub('\"|\\\:', "", n) for n in words]
words = " ".join(words)
words = cleanup_solr_query(words)
if word_count == 1 and len(words) > 3:
# could still be an author name
if is_value_in_field(words, core="authors", field="authors"):
ret_val["schema_field"] = "art_authors_citation"
ret_val["schema_value"] = f"{words}"
elif is_value_in_field(words, "title", match_type="ordered"):
ret_val["title"] = words
elif is_value_in_field(words, core="doc", field="art_authors_citation"):
# see if it's a list of names
ret_val["schema_field"] = "art_authors_citation"
ret_val["schema_value"] = f"{words}"
elif is_value_in_field(words, core="doc", field="text", match_type="proximate"):
ret_val["wordsearch"] = re.sub(":", "\:", smart_search_text)
else:
# try to build a list of names, and check them individually
new_q = ""
names = name_id_list(smart_search_text)
for name in names:
| |
np.sort(np.append(addindexes,index))
subindexes = np.sort(np.append(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in np.where(np.abs(changes)>1)[0]:
if changes[index] < 0:
for i in range(np.abs(changes[index])-1):
subindexes = np.sort(np.append(subindexes,index))
else:
for i in range(np.abs(changes[index])-1):
addindexes = np.sort(np.append(addindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(addindexes):
subindexes = np.append(subindexes, (np.ones((len(addindexes)-len(subindexes)))*(len(truetime)-1)).astype(int))
fragmentspertraj.append(len(subindexes))
for m in range(min(len(subindexes),len(addindexes))):
traj = solutions[k][:, addindexes[m]:subindexes[m]+1]
traj_ind = changes[addindexes[m]:subindexes[m]+1]
startind = ind[addindexes[m]]
minusloc = [0] + np.where(traj_ind < 0)[0].astype(int).tolist()
fragment = np.array([])
iterind = startind
if subindexes[m]-addindexes[m] > 0:
if len(minusloc) > 1:
if m <= truefrags:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[0, minusloc[-1]+1:].flatten())
else:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[m-truefrags, minusloc[-1]+1:].flatten())
else:
fragment = solutions[k][startind][addindexes[m]:subindexes[m]+1].flatten()
fragtimes.append(addindexes[m]+1)
fragmented_trajectories.append(fragment)
#if m <= truefrags:
#kes.append(genelength/truetime[len(fragment)])
if len(fragment) > maxlen:
maxlen = len(fragment)
fragarray = np.zeros((len(fragmented_trajectories), maxlen))
for i in range(len(fragmented_trajectories)):
fragarray[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
ssa_obj.fragments = fragarray
ssa_obj.fragtimes = fragtimes
ssa_obj.frag_per_traj = fragmentspertraj
ssa_obj.full_frags = truefrags
ssa_obj.all_results = all_results
if probePosition.shape[0] > 1:
for i in range(probePosition.shape[0]):
if i > 0:
autocorr_vec2, mean_autocorr2, error_autocorr2, dwelltime2, ke_sim2 = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec = np.vstack((autocorr_vec,autocorr_vec2))
mean_autocorr = np.vstack((mean_autocorr,mean_autocorr2))
error_autocorr = np.vstack((error_autocorr,error_autocorr2))
dwelltime.append(dwelltime2)
ke_sim.append(ke_sim2)
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec[i], truetime, 0, genelength)
dwelltime = [dwelltime]
ke_sim = [ke_sim]
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec, truetime, 0, genelength)
acov,nacov = self.get_all_autocovariances(intensity_vec,truetime,genelength )
ssa_obj.autocorr_vec = autocorr_vec
ssa_obj.mean_autocorr = mean_autocorr
ssa_obj.error_autocorr = error_autocorr
ssa_obj.autocorr_vec_norm = autocorr_vec_norm
ssa_obj.mean_autocorr_norm = mean_autocorr_norm
ssa_obj.error_autocorr_norm = error_autocorr_norm
ssa_obj.dwelltime = dwelltime
ssa_obj.ke_sim = ke_sim
ssa_obj.ke_true = float(genelength)/np.mean(ssa_obj.ribtimes)
ssa_obj.probe = probePosition
try:
ssa_obj.autocovariance_dict = acov
ssa_obj.autocovariance_norm_dict = nacov
except:
pass
return ssa_obj
def get_negative_intensity(self,solution,gene_length,pv,tvec,ti,stop_frap):
startindex = np.where(tvec >= ti)[0][0]
stop_frap = np.where(tvec >= stop_frap)[0][0]
solution = solution.T
fragmented_trajectories = []
fragtimes = []
endfragtimes = []
maxlen = 0
fragmentspertraj= []
ind = np.array([next(j for j in range(0,solution.shape[0]) if int(solution[j, i]) == 0 or int(solution[j, i]) == -1) for i in range(0, solution.shape[1])])
changes = ind[1:] - ind[:-1]
addindexes = np.where(changes > 0)[0]
subindexes = np.where(changes < 0)[0]
sub = solution[:,1:] - solution[:,:-1]
neutralindexes = np.unique(np.where(sub < 0)[1])
neutralindexes = np.setxor1d(neutralindexes, subindexes)
for index in neutralindexes:
pre = solution[:,index]
post = solution[:,index+1]
changecount = 0
while len(np.where(post - pre < 0)[0]) > 0:
post = np.append([gene_length],post)
pre = np.append(pre,0)
changecount+=1
for i in range(changecount):
addindexes = np.sort(np.append(addindexes,index))
subindexes = np.sort(np.append(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in np.where(np.abs(changes)>1)[0]:
if changes[index] < 0:
for i in range(np.abs(changes[index])-1):
subindexes = np.sort(np.append(subindexes,index))
else:
for i in range(np.abs(changes[index])-1):
addindexes = np.sort(np.append(addindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(addindexes):
subindexes = np.append(subindexes, (np.ones((len(addindexes)-len(subindexes)))*(len(tvec)-1)).astype(int))
fragmentspertraj.append(len(subindexes))
for m in range(min(len(subindexes),len(addindexes))):
traj = solution[:, addindexes[m]:subindexes[m]+1]
traj_ind = changes[addindexes[m]:subindexes[m]+1]
startind = ind[addindexes[m]]
minusloc = [0] + np.where(traj_ind < 0)[0].astype(int).tolist()
fragment = np.array([])
iterind = startind
if subindexes[m]-addindexes[m] > 0:
if len(minusloc) > 1:
if m <= truefrags:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[0, minusloc[-1]+1:].flatten())
else:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[m-truefrags, minusloc[-1]+1:].flatten())
else:
fragment = solution[startind][addindexes[m]:subindexes[m]+1].flatten()
fragtimes.append(addindexes[m]+1)
if addindexes[m]+1 + len(fragment) > len(tvec):
endfragtimes.append(len(tvec))
else:
endfragtimes.append(addindexes[m]+1 + len(fragment))
fragmented_trajectories.append(fragment)
#if m <= truefrags:
#kes.append(genelength/truetime[len(fragment)])
if len(fragment) > maxlen:
maxlen = len(fragment)
fragarray = np.zeros((len(fragmented_trajectories), maxlen))
for i in range(len(fragmented_trajectories)):
fragarray[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
affected_frags = []
fragindexes = []
for i in range(len(fragtimes)):
if np.sum([fragtimes[i]> np.array([startindex, stop_frap]), endfragtimes[i] > np.array([startindex, stop_frap])]) in [1,2,3]:
affected_frags.append(i)
fragindexes.append([fragtimes[i],endfragtimes[i]])
#affected_frags = np.intersect1d(np.where(np.array(fragtimes) >= startindex), np.where(np.array(fragtimes)<= stop_frap))
if len(fragindexes)> 0:
findexes = np.array(fragindexes)
frange = findexes[:,1]-stop_frap
afterfrapribs = findexes[np.where(frange > 0 )]
relevantfrags = np.array(affected_frags)[np.where(frange > 0 )]
if len(relevantfrags) > 0:
cooked_ribs = 0#(len(affected_frags) - len(relevantfrags))*max(pv)
stopfrapindex = stop_frap - afterfrapribs[:,0]
rfrags = fragarray[relevantfrags]
np.diag(rfrags[:,stopfrapindex])
laglen = afterfrapribs[:,1] - stop_frap
posistions_at_end_of_FRAP = np.diag(rfrags[:,stopfrapindex])
offset = pv[posistions_at_end_of_FRAP.astype(int)]
trailing_intensity = np.zeros((max(laglen)))
for i in range(len(laglen)):
trailing_intensity[:laglen[i]] -= offset[i]
trailing_intensity= trailing_intensity-cooked_ribs
else:
trailing_intensity = np.array([0])
else:
trailing_intensity = np.array([0])
return trailing_intensity
def ssa_solver_append(self, ssa_obj, n=100):
nRepetitions = ssa_obj.n_traj
all_k = ssa_obj.k
no_ribosomes_per_mrna = ssa_obj.no_rib_per_mrna
ribosome_density = ssa_obj.rib_density
ribosome_means = ssa_obj.rib_means
rib_vec = ssa_obj.rib_vec
intensity_vec = ssa_obj.intensity_vec
time_vec_fixed = ssa_obj.time_vec_fixed
non_consider_time = ssa_obj.start_time
evaluating_inhibitor = ssa_obj.evaluating_inhibitor
evaluating_frap = ssa_obj.evaluating_frap
time_inhibit = ssa_obj.time_inhibit
truetime = ssa_obj.time
tstep = len(ssa_obj.time)
npoints = tstep #non_consider_time + tstep
rib_vec = []
solutions = []
pv = ssa_obj.probe
genelength = len(pv[0])-1
evf = int(evaluating_frap)
evi = int(evaluating_inhibitor)
try:
intime = float(time_inhibit)
except:
intime = 0
solutionssave = []
st = time.time()
n_traj = n
force_python = False
try:
if force_python == True:
st[0]
rib_vec = []
solutions = []
solutionssave = []
N_rib = 200
all_results = np.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=np.int32)
all_ribtimes = np.zeros((n_traj,int(1.3*all_k[0]*truetime[-1])),dtype=np.float64)
result = np.zeros((len(time_vec_fixed)*N_rib), dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
k = np.array(all_k)
seeds = np.random.randint(0, 0x7FFFFFF, n_traj)
all_frapresults = np.zeros((n_traj,N_rib*len(time_vec_fixed)),dtype=np.int32)
all_collisions = np.zeros((n_traj,int(1.3*all_k[0]*truetime[-1])),dtype=np.int32)
all_nribs = np.zeros((n_traj,1))
all_col_points = []
for i in range(n_traj):
result = np.zeros((len(time_vec_fixed)*N_rib), dtype=np.int32)
ribtimes = np.zeros((int(1.3*k[0]*truetime[-1])),dtype=np.float64)
frapresult = np.zeros((len(time_vec_fixed)*N_rib),dtype=np.int32)
coltimes = np.zeros((int(1.3*k[0]*truetime[-1])),dtype=np.int32)
colpointsx = np.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=np.int32)
colpointst = np.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=np.float64)
nribs = np.array([0],dtype=np.int32)
ssa_translation.run_SSA(result, ribtimes, coltimes, colpointsx,colpointst, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
#ssa_translation.run_SSA(result, ribtimes, coltimes, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
all_results[i, :] = result
all_frapresults[i,:] = frapresult
all_ribtimes[i,:] = ribtimes
all_collisions[i,:] = coltimes
all_nribs[i,:] = nribs
endcolrec = np.where(colpointsx == 0)[0][0]
colpoints = np.vstack((colpointsx[:endcolrec],colpointst[:endcolrec]))
all_col_points.append(colpoints.T)
for i in range(n_traj):
soln = all_results[i, :].reshape((N_rib, len(time_vec_fixed)))
validind = np.where(np.sum(soln,axis=1)!=0)[0]
if np.max(validind) != N_rib-1:
validind = np.append(np.where(np.sum(soln,axis=1)!=0)[0],np.max(validind)+1)
so = soln[(validind,)]
solutionssave.append(so)
solutions.append(soln)
collisions = np.array([[]])
watched_ribs = []
for i in range(n_traj):
totalrib = all_nribs[i]
if totalrib > all_collisions.shape[1]:
collisions = np.append(collisions, all_collisions[i][:])
watched_ribs.append(int(all_collisions.shape[1]))
else:
collisions = np.append(collisions, all_collisions[i][:int(totalrib[0])])
watched_ribs.append(int(totalrib[0]))
sttime = time.time() - st
except:
print('C++ library failed, Using Python Implementation')
rib_vec = []
solutions = []
solutionssave = []
N_rib = 200
collisions = np.array([[]])
all_results = np.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=np.int32)
all_col_points = []
watched_ribs = []
for i in range(n_traj):
soln,all_ribtimes,Ncol,col_points = self.SSA(all_k, truetime, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
#soln = soln.reshape((1, (len(time_vec_fixed)*N_rib)))
collisions = np.append(collisions,Ncol)
watched_ribs.append(int(len(collisions)))
validind = np.where(np.sum(soln,axis=1)!=0)[0]
all_col_points.append(np.array(col_points))
if np.max(validind) != N_rib-1:
validind = np.append(np.where(np.sum(soln,axis=1)!=0)[0],np.max(validind)+1)
so = soln[(validind,)]
solutionssave.append(so)
solutions.append(soln)
result = soln.reshape((1, (len(time_vec_fixed)*N_rib)))
all_results[i, :] = result
sttime = time.time() - st
#rb = sparse.lil_matrix((len(time_vec_fixed),genelength),dtype=int)
#for j in range(soln.shape[1]):
#if len(np.where(soln[:,j]!=0)[0]) !=0:
#print(np.where(soln[:,j]!=0)[0])
#rb[j,np.where(soln[:,j]!=0)[0]] = 1
#for value in soln[:,j][np.where(soln[:,j]!=0)[0]].astype(int):
#rb[j, value-1] = 1
#rib_vec.append(rb)
no_ribosomes = np.zeros((n_traj, (genelength+1)))
startindex = np.where(truetime >= non_consider_time)[0][0]
#all_results = all_results[:,startindex*N_rib:]
for i in range(len(solutions)):
for j in range(len(solutions[0][0][startindex:])):
rib_pos = solutions[i][startindex:, j][np.nonzero(solutions[i][startindex:, j])]
no_ribosomes[i, rib_pos.astype(int)] += 1
no_ribosomes = no_ribosomes[:, 1:]
ribosome_means = np.mean(no_ribosomes, axis=0)
ribosome_density = ribosome_means/npoints
no_ribosomes_per_mrna = np.mean(no_ribosomes)
if pv.shape[0] <=1:
I = np.zeros((n_traj, len(time_vec_fixed[startindex:])))
else:
I = np.zeros((int(pv.shape[0]),n_traj, len(time_vec_fixed[startindex:])))
#I = np.zeros((1,tstep+1))
if evaluating_frap == False:
if pv.shape[0] <=1:
for i | |
rows become None instead of NaN
# (aggregation sum of int + NaN = float, but we want int, so we use
# int + None = int to stop decimals from appearing in the size sums)
# - re-sort by price based on side
# - bids: high to low
# - asks: low to high
# - Re-index the frame by current sorted positions so the concat joins correctly.
# - 'drop=True' means don't add a new column with the previous index value
# condition dataframe reorganization on the input list existing.
# for some smaller symbols, bids or asks may not get returned
# by the flaky ibkr depth APIs
if t.domBids:
fixedBids = (
pd.DataFrame(t.domBids)
.groupby("price", as_index=False)
.agg({"size": sum, "marketMaker": list})
.convert_dtypes()
.sort_values(by=["price"], ascending=False)
.reset_index(drop=True)
)
# format floats as currency strings with proper cent padding
fixedBids["price"] = fixedBids["price"].apply(lambda x: f"{x:,.2f}")
fixedBids["marketMaker"] = sorted(fixedBids["marketMaker"])
else:
fixedBids = pd.DataFrame()
if t.domAsks:
fixedAsks = (
pd.DataFrame(t.domAsks)
.groupby("price", as_index=False)
.agg({"size": sum, "marketMaker": list})
.convert_dtypes()
.sort_values(by=["price"], ascending=True)
.reset_index(drop=True)
)
fixedAsks["price"] = fixedAsks["price"].apply(lambda x: f"{x:,.2f}")
fixedAsks["marketMaker"] = sorted(fixedAsks["marketMaker"])
else:
fixedAsks = pd.DataFrame()
fmtJoined = {"Bids": fixedBids, "Asks": fixedAsks}
# Create an order book with high bids and low asks first.
# Note: due to the aggregations above, the bids and asks
# may have different row counts. Extra rows will be
# marked as <NA> by pandas (and we can't fill them
# as blank because the cols have been coerced to
# specific data types via 'convert_dtypes()')
both = pd.concat(fmtJoined, axis=1)
printFrame(
both,
f"{contract.symbol} :: {contract.localSymbol} Grouped by Price",
)
# Note: the 't.domTicks' field is just the "update feed"
# which ib_insync merges into domBids/domAsks
# automatically, so we don't need to care about
# the values inside t.domTicks
if i < self.count - 1:
await asyncio.sleep(3)
self.ib.cancelMktDepth(contract, isSmartDepth=useSmart)
del self.depthState[contract]
@dataclass
class IOpRID(IOp):
"""Retrieve ib_insync request ID and server Next Request ID"""
def argmap(self):
# rid has no args!
return []
async def run(self):
logger.info("CLI Request ID: {}", self.ib.client._reqIdSeq)
logger.info(
"Server Next Request ID: {} (see server log)", self.ib.client.reqIds(0)
)
@dataclass
class IOpOrderModify(IOp):
"""Modify an existing order using interactive prompts."""
def argmap(self):
# No args, we just use interactive prompts for now
return []
async def run(self):
# "openTrades" include the contract, order, and order status.
# "openOrders" only includes the order objects with no contract or status.
ords = self.ib.openTrades()
# logger.info("current orderS: {}", pp.pformat(ords))
promptOrder = [
Q(
"Current Order",
choices=[
Choice(
f"{o.order.action:<4} {o.order.totalQuantity:<6} {o.contract.localSymbol or o.contract.symbol:<21} {o.order.orderType} {o.order.tif} lmt:${fmtPrice(o.order.lmtPrice):<7} aux:${fmtPrice(o.order.auxPrice):<7}",
o,
)
for o in sorted(ords, key=tradeOrderCmp)
],
),
Q("New Limit Price"),
Q("New Stop Price"),
Q("New Quantity"),
]
pord = await self.state.qask(promptOrder)
try:
trade = pord["Current Order"]
lmt = pord["New Limit Price"]
stop = pord["New Stop Price"]
qty = pord["New Quantity"]
contract = trade.contract
ordr = trade.order
if not (lmt or stop or qty):
# User didn't provide new data, so stop processing
return None
if lmt:
ordr.lmtPrice = float(lmt)
if stop:
ordr.auxPrice = float(stop)
if qty:
ordr.totalQuantity = float(qty)
except:
return None
trade = self.ib.placeOrder(contract, ordr)
logger.info("Updated: {}", pp.pformat(trade))
@dataclass
class IOpOrder(IOp):
"""Quick order entry with full order described on command line."""
def argmap(self):
# TODO: write a parser for this language instead of requiring fixed orders for each parameter
# allow symbol on command line, optionally
# BUY IWM QTY 500 PRICE 245 ORD LIMIT/LIM/LMT AF AS REL MP AMF AMS MOO MOC
return [
DArg("bs", verify=lambda x: x.lower() in {"b", "s", "buy", "sell"}),
DArg("symbol"),
DArg("q", verify=lambda x: x.lower() in {"q", "qty"}),
DArg("qty", convert=float, verify=lambda x: x != 0),
DArg("p", verify=lambda x: x.lower() in {"p", "price"}),
DArg("price", convert=float, verify=lambda x: x >= 0),
DArg("a", verify=lambda x: x.lower() in {"a", "algo"}),
DArg(
"algo",
convert=lambda x: x.upper(),
verify=lambda x: x in ALGOMAP.keys(),
errmsg=f"Available algos: {pp.pformat(ALGOMAP)}",
),
]
async def run(self):
if " " in self.symbol:
# is spread, so do bag
isSpread = True
orderReq = self.state.ol.parse(bOrder)
contract = await self.state.bagForSpread(orderReq)
else:
# else, is symbol
isSpread = False
contract = contractForName(self.symbol)
if contract is None:
logger.error("Not submitting order because contract can't be formatted!")
return None
if not isSpread:
# spreads are qualified when they are initially populated
await self.state.qualify(contract)
# B BUY is Long
# S SELL is Short
isLong = self.bs.lower().startswith("b")
# send the order to IBKR
# Note: negative quantity is parsed as a WHOLE DOLLAR AMOUNT to use,
# then price is irrelevant since it runs a midpoint of spread order.
am = ALGOMAP[self.algo]
placed = await self.state.placeOrderForContract(
self.symbol,
isLong,
contract,
self.qty,
self.price,
am,
)
if not placed:
logger.error("Order can't continue!")
return
# if this is a market order, don't run the algo loop
if {"MOO", "MOC", "MKT"} & set(am.split()):
logger.warning("Not running price algo because this is a market order...")
return
order, trade = placed
quoteKey = lookupKey(contract)
checkedTimes = 0
# while (unfilled quantity) AND (order NOT canceled or rejected or broken)
while (rem := trade.orderStatus.remaining) > 0 or (
"Pending" in trade.orderStatus.status
):
if ("Cancel" in trade.orderStatus.status) or (
trade.orderStatus.status in {"Filled", "Inactive"}
):
logger.error(
"[{} :: {}] Order was canceled or rejected! Status: {}",
trade.orderStatus.status,
trade.contract.localSymbol,
pp.pformat(trade.orderStatus),
)
return
if rem == 0:
logger.warning(
"Quantity Remaining is zero, but status is Pending. Waiting for update..."
)
# sleep 75 ms and check again
await asyncio.sleep(0.075)
continue
logger.info("Quantity remaining: {}", rem)
checkedTimes += 1
# if this is the first check after the order was placed, don't
# run the algo (i.e. give the original limit price a chance to work)
if checkedTimes == 1:
logger.info("Skipping adjust so original limit has a chance to fill...")
continue
# get current qty/value of trade both remaining and already executed
(
remainingAmount,
totalAmount,
currentPrice,
currentQty,
) = self.state.amountForTrade(trade)
# get current quote for order
bidask = self.state.currentQuote(quoteKey)
if bidask:
logger.info("Adjusting price for more aggressive fills...")
bid, ask = bidask
if isLong:
# if is buy, chase the ask
newPrice = round((currentPrice + ask) / 2, 2)
# reduce qty to remain in expected total spend constraint
newQty = amount / newPrice
# only crypto supports fractional values over the API,
# so all non-crypto contracts get floor'd
if not isinstance(trade.contract, Crypto):
newQty = math.floor(newQty)
else:
# else if is sell, chase the bid
newPrice = round((currentPrice + bid) / 2, 2)
newQty = currentQty # don't change quantities on shorts / sells
# TODO: this needs to be aware of CLOSING instead of OPEN SHORT.
# i.e. on OPENING orders we can grow/shrink qty, but on CLOSING
# we DO NOT want to shrink or grow our qty.
logger.info(
"Price changing from {} to {} ({})",
currentPrice,
newPrice,
(newPrice - currentPrice),
)
logger.info(
"Qty changing from {} to {} ({})",
currentQty,
newQty,
(newQty - currentQty),
)
logger.info("Submitting order update...")
order.lmtPrice = newPrice
order.totalQuantity = newQty
self.ib.placeOrder(contract, order)
waitDuration = 3
logger.info(
"[{} :: {}] Waiting for {} seconds to check for new executions...",
trade.orderStatus.orderId,
checkedTimes,
waitDuration,
)
try:
await asyncio.sleep(waitDuration)
except:
# catches CTRL-C during sleep
logger.warning(
"[{}] User canceled automated limit updates! Order still live.",
trade.orderStatus.orderId,
)
break
@dataclass
class IOpOrderFast(IOp):
"""Place a momentum order for scalping using multiple strikes and active price tracking.
For a 'symbol' at total dollar spend of 'amount' and 'direction'.
For ATR calcuation, requires a custom endpoint running with a REST API capable of
returning multiple ATR periods for any symbol going back multiple timeframes.
Maximum strikes attempted are based on the recent ATR for the underlying,
so we don't try to grab a +$50 OTM strike with recent historical movement
was only $2.50. (TODO: if running against, 2-4 week out chains allow higher
than ATR maximum because the vol will smile).
The buying algo is:
- use current underlying bid/ask midpoint (requires live quote)
- set price cap to maximum of 3 day or 20 day ATR (requires external API)
- buy 1 | |
P = _multirice3dfun(r,nu,sig,a)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Location of 1st Rician', 'Spread of 1st Rician', 'Amplitude of 1st Rician',
'Location of 2nd Rician', 'Spread of 2nd Rician', 'Amplitude of 2nd Rician',
'Location of 3rd Rician', 'Spread of 3rd Rician', 'Amplitude of 3rd Rician'),
units = ('nm','nm','','nm','nm','','nm','nm',''),
start = np.asarray([2.5, 0.7, 0.3, 3.5, 0.7, 0.3, 5, 0.7, 0.3]),
lower = np.asarray([1, 0.1, 0, 1, 0.1, 0, 1, 0.1, 0]),
upper = np.asarray([10, 5, 1, 10, 5, 1, 10, 5, 1]))
@docstring()
def dd_rice3(r,param):
r"""
Sum of three 3D-Rice distributions
Notes
-----
**Model:**
:math:`P(r) = a_1 R(r,\nu_1,\sigma_1) + a_2 R(r,\nu_2,\sigma_2) + a_3 R(r,\nu_3,\sigma_3)`
:math:`R(r,\nu,\sigma) = \frac{\nu^{n/2-1}}{\sigma^2}r^{n/2}\exp\left(-\frac{(r^2+\nu^2)}{2\sigma^2}\right)I_{n/2-1}\left(\frac{r\nu}{\sigma^2} \right)`
where :math:`n=3` and :math:`I_{n/2-1}(x)` is the modified Bessel function of the first kind with order :math:`n/2-1`.
This is a three-dimensional non-central chi distribution, the 3D generalization of the 2D Rice distribution.
============== ======================== ============= ============= ============= =======================================
Variable Symbol Start Value Lower bound Upper bound Description
============== ======================== ============= ============= ============= =======================================
``param[0]`` :math:`\nu_1` 2.5 1.0 10 1st Rician location (nm)
``param[1]`` :math:`\sigma_1` 0.7 0.1 5 1st Rician spread (nm)
``param[2]`` :math:`a_1` 0.3 0 1 1st Rician amplitude
``param[3]`` :math:`\nu_2` 4.0 1.0 10 2nd Rician location (nm)
``param[4]`` :math:`\sigma_2` 0.7 0.1 5 2nd Rician spread (nm)
``param[5]`` :math:`a_2` 0.3 0 1 2nd Rician amplitude
``param[6]`` :math:`\nu_3` 5.0 1.0 10 3rd Rician location (nm)
``param[7]`` :math:`\sigma_3` 0.7 0.1 5 3rd Rician spread (nm)
``param[8]`` :math:`a_3` 0.3 0 1 3rd Rician amplitude
============== ======================== ============= ============= ============= =======================================
"""
r,param = _parsparam(r,param,npar=9)
nu = [param[0], param[3], param[6]]
sig = [param[1], param[4], param[7]]
a = [param[2], param[5], param[8]]
P = _multirice3dfun(r,nu,sig,a)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Number of residues','Segment length','Scaling exponent'),
units = ('','nm',''),
start = np.asarray([50, 0.2, 0.602]),
lower = np.asarray([2, 0.1, 0.33 ]),
upper = np.asarray([1000, 0.4, 1 ]))
@docstring()
def dd_randcoil(r,param):
r"""
Random-coil model for an unfolded peptide/protein
Notes
-----
**Model:**
.. image:: ../images/model_scheme_dd_randcoil.png
:width: 25%
:math:`P(r) = \frac{3}{(2\pi\nu_0)^{3/2}}4\pi r^2\exp(-\frac{3 r^2}{\nu_0})`
where :math:`\nu_0 = 3/(12\pi r_0 N \nu)^{3/2}`
============== ============= ============= ============= ============= =======================================
Variable Symbol Start Value Lower bound Upper bound Description
============== ============= ============= ============= ============= =======================================
``param[0]`` :math:`N` 50 2 1000 Number of residues
``param[1]`` :math:`R_0` 0.20 0.10 0.40 Segment length (nm)
``param[2]`` :math:`\nu` 0.602 0.33 1.00 Scaling exponent
============== ============= ============= ============= ============= =======================================
"""
r,param = _parsparam(r,param,npar=3)
N = param[0] # number of residues
nu = param[1] # scaling exponent
R0 = param[2] # residue length
rsq = 6*(R0*N**nu)**2 # mean square end-to-end distance from radius of gyration
normFact = 3/(2*np.pi*rsq)**(3/2) # normalization prefactor
ShellSurf = 4*np.pi*r**2 # spherical shell surface
Gaussian = np.exp(-3*r**2/(2*rsq))
P = normFact*ShellSurf*Gaussian
P = _normalize(r,P)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Center','Radius'),
units = ('nm','nm'),
start = np.asarray([3, 0.5]),
lower = np.asarray([1, 0.1]),
upper = np.asarray([20, 5 ]))
@docstring()
def dd_circle(r,param):
r"""
Semicircle distribution model
Notes
-----
**Model:**
This provides a `semi-circle distribution <https://en.wikipedia.org/wiki/Wigner_semicircle_distribution>`_, defined by
:math:`P(r) = 2\pi\sqrt{(r-r_0)^2/R^2+1}` for :math:`r_0-R\le r\le r_0+R` and zero otherwise.
============== ================= ============= ============= ============= =================================
Variable Symbol Start Value Lower bound Upper bound Description
============== ================= ============= ============= ============= =================================
``param[0]`` :math:`r_0` 3.0 1 20 Center (nm)
``param[1]`` :math:`R` 0.5 0.1 5 Radius (nm)
============== ================= ============= ============= ============= =================================
"""
r,param = _parsparam(r,param,npar=2)
r0 = param[0]
R = abs(param[1])
dr = r - r0
idx = abs(dr)<R
P = np.zeros(len(r))
P[idx] = 2/np.pi/R**2*np.sqrt(R**2 - dr[idx]**2)
P = _normalize(r,P)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Center','FWHM'),
units = ('nm','nm'),
start = np.asarray([3, 0.5]),
lower = np.asarray([1, 0.1]),
upper = np.asarray([20, 5 ]))
@docstring()
def dd_cos(r,param):
r"""
Raised-cosine parametric model
Notes
-----
**Model:**
This provides a `raised-cosine distribution <https://en.wikipedia.org/wiki/Raised_cosine_distribution>`_, defined by
:math:`P(r) = \frac{1}{2w}\cos\left(\frac{r-r_0}{w}\pi\right)` for :math:`r_0-w \le r \le r_0+w`, and zero otherwise.
============== ================= ============= ============= ============= =================================
Variable Symbol Start Value Lower bound Upper bound Description
============== ================= ============= ============= ============= =================================
``param[0]`` :math:`r_0` 3.0 1 20 Center (nm)
``param[1]`` :math:`w` 0.5 0.1 5 FWHM (nm)
============== ================= ============= ============= ============= =================================
"""
r,param = _parsparam(r,param,npar=2)
r0 = param[0]
fwhm = param[1]
phi = (r-r0)/fwhm*np.pi
P = (1 + np.cos(phi))/2/fwhm
P[(r<(r0-fwhm)) | (r>(r0+fwhm))] = 0
P = _normalize(r,P)
return P
# =================================================================
def _pb(r,R):
# =================================================================
P = np.zeros(len(r))
idx = (r >= 0) & (r <= 2*R)
P[idx] = 3*r[idx]**5/(16*R**6) - 9*r[idx]**3/(4*R**4) + 3*r[idx]**2/(R**3)
return P
# =================================================================
def _pbs(r,R1,R2):
# =================================================================
P = np.zeros(len(r))
# Case1
idx = (r >= 0) & (r < np.minimum(2*R1,R2 - R1))
P[idx] = 12*r[idx]**3*R1**2 - r[idx]**5
# Case2
idx = (r >= R2 - R1) & (r < 2*R1)
P[idx] = 8*r[idx]**2*(R2**3 - R1**3) - 3*r[idx]*(R2**2 - R1**2)**2 - 6*r[idx]**3*(R2 - R1)*(R2 + R1)
# Case3
idx = (r >= 2*R1) & (r < R2 - R1)
P[idx] = 16*r[idx]**2*R1**3
# Case4
idx = (r >= np.maximum(R2 - R1,2*R1)) & (r < R1 + R2)
P[idx] = r[idx]**5 - 6*r[idx]**3*(R2**2 + R1**2) + 8*r[idx]**2*(R2**3 + R1**3) - 3*r[idx]*(R2**2 - R1**2)**2
P = P*3/(16*R1**3*(R2**3 - R1**3))
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Inner shell radius','Shell thickness'),
units = ('nm','nm'),
lower = np.asarray([0.1, 0.1]),
upper = np.asarray([20, 20 ]),
start = np.asarray([1.5, 0.5]))
@docstring()
def dd_shell(r,param):
r"""
Uniform spherical shell
Notes
-----
**Model:**
.. image:: ../images/model_scheme_dd_shell.png
:width: 25%
:math:`P(r) = \left(R_2^6 P_\mathrm{B}(r|R_2) - R_1^6 P_\mathrm{B}(r|R_1) - 2(r_2^3 - r_1^3)P_\mathrm{BS}(r|R_1,R_2)\right)/(R_2^3 - R_1^3)^2`
with
:math:`P_\mathrm{BS}(r|R_i,R_j) = \frac{3}{16R_i^3(R_j^3 - R_i^3)}\begin{cases} 12r^3R_i^2 - r^5 \quad \text{for} \quad 0\leq r < \min(2R_i,R_j - R_i) \\ 8r^2(R_j^3 - R_i^3) - 3r(R_j^2 - R_i^2)^2 - 6r^3(R_j - R_i)(R_j + R_i) \quad \text{for} \quad R_j-R_i \leq r < 2R_i \\ 16r^2R_i^3 \quad \text{for} \quad 2R_i\leq r < R_j - R_i \\ r^5 - 6r^3(R_j^2 + R_i^2) + 8r^2(R_j^3 + R_i^3) - 3r(R_j^2 - R1_2)^2 \quad \text{for} \quad \max(R_j-R_i,2R_i) \leq r < R_i+R_j \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}`
:math:`P_\mathrm{B}(r|R_i) = \begin{cases} \frac{3r^5}{16R_i^6} - \frac{9r^3}{4R_i^4} + \frac{3r^2}{R_i^3} \quad \text{for} \quad 0 \leq r < 2R_i \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}`
and
:math:`R_1 = R`
:math:`R_2 = R + w`
============== ============== ============= ============= ============= =======================================
Variable Symbol Start Value Lower bound Upper bound Description
============== ============== ============= ============= ============= =======================================
``param[0]`` :math:`R` 1.5 0.1 20 Inner shell radius (nm)
``param[1]`` :math:`w` 0.5 0.1 20 Shell thickness (nm)
============== ============== ============= ============= ============= =======================================
References
----------
.. [1] <NAME>, <NAME>,
Analytical distance distributions in systems of spherical symmetry with applications to double electron-electron resonance, JMR, 230, 50-63, 2013
"""
r,param = _parsparam(r,param,npar=2)
R1 = float(param[0])
w = float(param[1])
R2 = R1 + w
P = np.zeros(len(r))
P = R2**6*_pb(r,R2) - R1**6*_pb(r,R1) - 2*(R2**3 - R1**3)*_pbs(r,R1,R2)
P = P/(R2**3 - R1**3)**2
P = _normalize(r,P)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Sphere radius','Distance to point'),
units = ('nm','nm'),
lower = np.asarray([0.1, 0.1]),
upper = np.asarray([20, 20 ]),
start = np.asarray([1.5, 3.5]))
@docstring()
def dd_spherepoint(r,param):
r"""
One particle distanced from particles distributed on a sphere
Notes
-----
**Model:**
.. image:: ../images/model_scheme_dd_spherepoint.png
:width: 25%
:math:`P(r) = \begin{cases} \frac{3r(R^2-(d-r)^2)}{4dR^3} \quad \text{for} \quad d-R \leq r < d+R \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}`
============== ============== ============= ============= ============= =========================
Variable Symbol Start Value Lower bound Upper bound Description
============== ============== ============= ============= ============= =========================
``param[0]`` :math:`R` 1.5 0.1 20 Sphere radius (nm)
``param[1]`` :math:`d` 3.5 0.1 20 Distance to point (nm)
============== ============== ============= ============= ============= =========================
References
----------
.. [1] <NAME>, <NAME>,
Analytical distance distributions in systems of spherical symmetry with applications to double electron-electron resonance, JMR, 230, 50-63, 2013
"""
r,param = _parsparam(r,param,npar=2)
R = float(param[0])
d = float(param[1])
P = np.zeros(len(r))
idx = (r >= d - R) & (r<= d + R)
P[idx] = 3*r[idx]*(R**2 - (d - r[idx])**2)/(4*d*R**3)
P = _normalize(r,P)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Sphere radius',),
units = ('nm',),
lower = np.asarray([0.1]),
upper = np.asarray([20]),
start = np.asarray([2.5]))
@docstring()
def dd_spheresurf(r,param):
r"""
Particles distributed on a sphere's surface
Notes
-----
**Model:**
.. image:: ../images/model_scheme_dd_spheresurf.png
:width: 25%
:math:`P(r) = \begin{cases} \frac{r}{2R^2} \quad \text{for} \quad 0 \leq r < 2R \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}`
============== ============== ============= ============= ============= =========================
Variable Symbol Start Value Lower bound Upper bound Description
============== ============== ============= ============= ============= =========================
``param[0]`` :math:`R` 2.5 0.1 20 Sphere radius (nm)
============== ============== ============= ============= ============= =========================
References
----------
.. [1] <NAME>, <NAME>,
Analytical distance distributions in systems of spherical symmetry with applications to double electron-electron resonance, JMR, 230, 50-63, 2013
"""
r,param = _parsparam(r,param,npar=1)
R = float(param[0])
P = np.zeros(len(r))
idx = (r >= 0) & (r<= 2*R)
P[idx] = r[idx]/R**2
P = _normalize(r,P)
return P
#=================================================================
# =================================================================
@metadata(
parameters = ('Inner shell radius','1st Shell thickness','2nd Shell thickness'),
units = ('nm','nm','nm'),
lower = np.asarray([0.1, 0.1, 0.1]),
upper = np.asarray([20, 20, 20 ]),
start = np.asarray([1.5, 0.5, 0.5]))
@docstring()
def | |
<reponame>SofiaBadini/estimagic
import functools
import json
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.optimize._numdiff import approx_derivative
from estimagic.decorators import expand_criterion_output
from estimagic.decorators import handle_exceptions
from estimagic.decorators import log_evaluation
from estimagic.decorators import log_gradient
from estimagic.decorators import log_gradient_status
from estimagic.decorators import negative_criterion
from estimagic.decorators import numpy_interface
from estimagic.logging.create_database import prepare_database
from estimagic.optimization.process_constraints import process_constraints
from estimagic.optimization.reparametrize import reparametrize_to_internal
from estimagic.optimization.utilities import propose_algorithms
def transform_problem(
criterion,
params,
algorithm,
criterion_kwargs,
constraints,
general_options,
algo_options,
gradient,
gradient_options,
logging,
log_options,
dashboard,
dash_options,
):
"""Transform the user supplied problem.
The transformed optimization problem is converted from the original problem
which consists of the user supplied criterion, params DataFrame, criterion_kwargs,
constraints and gradient (if supplied).
In addition, the transformed optimization problem provides sophisticated logging
tools if activated by the user.
The transformed problem can be solved by almost any optimizer package:
1. The only constraints are bounds on the parameters.
2. The internal_criterion function takes an one dimensional np.array as input.
3. The internal criterion function returns a scalar value
(except for the case of the tao_pounders algorithm).
Note that because of the reparametrizations done by estimagic to implement
constraints on behalf of the user the internal params cannot be interpreted without
reparametrizing it to the full params DataFrame.
Args:
criterion (callable or list of callables): Python function that takes a pandas
DataFrame with parameters as the first argument. Supported outputs are:
- scalar floating point
- np.ndarray: contributions for the tao Pounders algorithm.
- tuple of a scalar floating point and a pd.DataFrame:
In this case the first output is the criterion value.
The second output are the comparison_plot_data.
See :ref:`comparison_plot`.
.. warning::
This feature is not implemented in the dashboard yet.
params (pd.DataFrame or list of pd.DataFrames): See :ref:`params`.
algorithm (str or list of strings): Name of the optimization algorithm.
See :ref:`list_of_algorithms`.
criterion_kwargs (dict or list of dict): Additional criterion keyword arguments.
constraints (list or list of lists): List with constraint dictionaries.
See :ref:`constraints` for details.
general_options (dict): Additional configurations for the optimization.
Keys can include:
- keep_dashboard_alive (bool): if True and dashboard is True the process
in which the dashboard is run is not terminated when maximize or
minimize finish.
algo_options (dict or list of dicts): Algorithm specific configurations.
gradient_options (dict): Options for the gradient function.
logging (str or pathlib.Path or list thereof): Path to an sqlite3 file which
typically has the file extension ``.db``. If the file does not exist,
it will be created. See :ref:`logging` for details.
log_options (dict or list of dict): Keyword arguments to influence the logging.
See :ref:`logging` for details.
dashboard (bool): Whether to create and show a dashboard, default is False.
See :ref:`dashboard` for details.
dash_options (dict or list of dict, optional): Options passed to the dashboard.
Supported keys are:
- port (int): port where to display the dashboard
- no_browser (bool): whether to display the dashboard in a browser
- rollover (int): how many iterations to keep in the monitoring plots
Returns:
optim_kwargs (dict): Dictionary collecting all arguments that are going to be
passed to _internal_minimize.
database_path (str or pathlib.Path or None): Path to the database.
result_kwargs (dict): Arguments needed to reparametrize back from the internal
paramater array to the params DataFrame of the user supplied problem.
In addition it contains whether the dashboard process should be kept alive
after the optimization(s) terminate(s).
"""
optim_kwargs, params, dash_options, database_path = _pre_process_arguments(
params=params,
algorithm=algorithm,
algo_options=algo_options,
logging=logging,
dashboard=dashboard,
dash_options=dash_options,
)
# harmonize criterion interface
is_maximization = general_options.pop("_maximization", False)
criterion = expand_criterion_output(criterion)
criterion = negative_criterion(criterion) if is_maximization else criterion
# first criterion evaluation for the database and the pounders algorithm
fitness_eval, comparison_plot_data = _evaluate_criterion(
criterion=criterion, params=params, criterion_kwargs=criterion_kwargs
)
general_options = general_options.copy()
general_options["start_criterion_value"] = fitness_eval
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=pd.errors.PerformanceWarning)
# transform the user supplied inputs into the internal inputs.
constraints, params = process_constraints(constraints, params)
internal_params = reparametrize_to_internal(params, constraints)
bounds = _get_internal_bounds(params)
# setup the database to pass it to the internal functions for logging
if logging:
database = prepare_database(
path=logging,
params=params,
comparison_plot_data=comparison_plot_data,
dash_options=dash_options,
constraints=constraints,
**log_options,
)
else:
database = False
# transform the user supplied criterion and gradient function into their
# internal counterparts that use internal inputs.
# this must be passed to _create_internal_criterion because the internal
# gradient creates its own internal criterion function whose calls are
# logged differently by the database.
logging_decorator = functools.partial(
log_evaluation,
database=database,
tables=["params_history", "criterion_history", "comparison_plot", "timestamps"],
)
internal_criterion = _create_internal_criterion(
criterion=criterion,
params=params,
constraints=constraints,
criterion_kwargs=criterion_kwargs,
logging_decorator=logging_decorator,
general_options=general_options,
database=database,
)
internal_gradient = _create_internal_gradient(
gradient=gradient,
gradient_options=gradient_options,
criterion=criterion,
params=params,
constraints=constraints,
criterion_kwargs=criterion_kwargs,
general_options=general_options,
database=database,
)
internal_kwargs = {
"internal_criterion": internal_criterion,
"internal_params": internal_params,
"bounds": bounds,
"internal_gradient": internal_gradient,
"database": database,
"general_options": general_options,
}
optim_kwargs.update(internal_kwargs)
result_kwargs = {
"params": params,
"constraints": constraints,
"keep_dashboard_alive": general_options.pop("keep_dashboard_alive", False),
}
return optim_kwargs, database_path, result_kwargs
def _pre_process_arguments(
params, algorithm, algo_options, logging, dashboard, dash_options
):
"""Process user supplied arguments without affecting the optimization problem.
Args:
params (pd.DataFrame or list of pd.DataFrames): See :ref:`params`.
algorithm (str or list of strings): Identifier of the optimization algorithm.
See :ref:`list_of_algorithms` for supported values.
algo_options (dict or list of dicts):
algorithm specific configurations for the optimization
dashboard (bool): Whether to create and show a dashboard, default is False.
See :ref:`dashboard` for details.
dash_options (dict or list of dict, optional): Options passed to the dashboard.
Supported keys are:
- port (int): port where to display the dashboard
- no_browser (bool): whether to display the dashboard in a browser
- rollover (int): how many iterations to keep in the monitoring plots
Returns:
optim_kwargs (dict): dictionary collecting the arguments that are going to be
passed to _internal_minimize
params (pd.DataFrame): The expanded params DataFrame with all needed columns.
See :ref:`params`.
database_path (str or pathlib.Path or None): path to the database.
"""
standard_dash_options = {"no_browser": False, "port": None, "rollover": 500}
# important for dash_options to be last for standards to be overwritten
dash_options = {**standard_dash_options, **dash_options}
origin, algo_name = _process_algorithm(algorithm)
optim_kwargs = {
"origin": origin,
"algo_name": algo_name,
"algo_options": algo_options,
}
params = _set_params_defaults_if_missing(params)
_check_params(params)
database_path = logging if dashboard else None
return optim_kwargs, params, dash_options, database_path
def _process_algorithm(algorithm):
"""Identify the algorithm from the user-supplied string.
Args:
algorithm (str): Package and name of the algorithm. It should be of the format
{pkg}_{name}.
Returns:
origin (str): Name of the package.
algo_name (str): Name of the algorithm.
"""
current_dir_path = Path(__file__).resolve().parent
with open(current_dir_path / "algo_dict.json") as j:
algos = json.load(j)
origin, algo_name = algorithm.split("_", 1)
try:
assert algo_name in algos[origin], "Invalid algorithm requested: {}".format(
algorithm
)
except (AssertionError, KeyError):
proposals = propose_algorithms(algorithm, algos)
raise NotImplementedError(
f"{algorithm} is not a valid choice. Did you mean one of {proposals}?"
)
return origin, algo_name
def _set_params_defaults_if_missing(params):
"""Set defaults and run checks on the user-supplied params.
Args:
params (pd.DataFrame): See :ref:`params`.
Returns:
params (pd.DataFrame): With defaults expanded params DataFrame.
"""
params = params.copy()
if "lower" not in params.columns:
params["lower"] = -np.inf
else:
params["lower"].fillna(-np.inf, inplace=True)
if "upper" not in params.columns:
params["upper"] = np.inf
else:
params["upper"].fillna(np.inf, inplace=True)
if "group" not in params.columns:
params["group"] = "All Parameters"
if "name" not in params.columns:
names = [_index_element_to_string(tup) for tup in params.index]
params["name"] = names
return params
def _index_element_to_string(element, separator="_"):
if isinstance(element, (tuple, list)):
as_strings = [str(entry).replace("-", "_") for entry in element]
res_string = separator.join(as_strings)
else:
res_string = str(element)
return res_string
def _check_params(params):
"""Check params has a unique index and contains no columns to be created internally.
Args:
params (pd.DataFrame or list of pd.DataFrames): See :ref:`params`.
Raises:
AssertionError: The index contains duplicates.
ValueError: The DataFrame contains internal columns.
"""
assert (
not params.index.duplicated().any()
), "No duplicates allowed in the index of params."
invalid_names = [
"_fixed",
"_fixed_value",
"_is_fixed_to_value",
"_is_fixed_to_other",
]
invalid_present_columns = []
for col in params.columns:
if col in invalid_names or col.startswith("_internal"):
invalid_present_columns.append(col)
if len(invalid_present_columns) > 0:
msg = (
"Column names starting with '_internal' and as well as any other of the "
f"following columns are not allowed in params:\n{invalid_names}."
f"This is violated for:\n{invalid_present_columns}."
)
raise ValueError(msg)
def _evaluate_criterion(criterion, params, criterion_kwargs):
"""Evaluate the criterion function for the first time.
The comparison_plot_data output is needed to initialize the database.
The criterion value | |
"unhailed",
"unhaired",
"unhairer",
"unhallow",
"unhalved",
"unhanded",
"unhanged",
"unhatted",
"unhealed",
"unhedged",
"unheeded",
"unhelmed",
"unhelped",
"unheroic",
"unhinges",
"unholier",
"unholily",
"unhooded",
"unhooked",
"unhorsed",
"unhorses",
"unhoused",
"unhouses",
"unhusked",
"unialgal",
"unicolor",
"unideaed",
"unifaces",
"unifiers",
"unifilar",
"unilobed",
"unimbued",
"unionise",
"unionize",
"uniquest",
"unironed",
"unironic",
"unisexes",
"unisonal",
"unitages",
"unitards",
"unitedly",
"unitized",
"unitizer",
"unitizes",
"univalve",
"univocal",
"unjammed",
"unjoined",
"unjoints",
"unjoyful",
"unjudged",
"unkeeled",
"unkenned",
"unkennel",
"unkinder",
"unkindly",
"unkingly",
"unkinked",
"unkissed",
"unkosher",
"unlacing",
"unlading",
"unlashed",
"unlashes",
"unlaying",
"unlearns",
"unlearnt",
"unleased",
"unlethal",
"unletted",
"unlevels",
"unlevied",
"unlicked",
"unlimber",
"unlively",
"unliving",
"unloader",
"unloosed",
"unloosen",
"unlooses",
"unlovely",
"unloving",
"unmailed",
"unmakers",
"unmaking",
"unmanful",
"unmapped",
"unmarred",
"unmasker",
"unmatted",
"unmeetly",
"unmellow",
"unmelted",
"unmended",
"unmeshed",
"unmeshes",
"unmewing",
"unmilled",
"unmingle",
"unmiters",
"unmitred",
"unmitres",
"unmixing",
"unmodish",
"unmolded",
"unmolten",
"unmoored",
"unmoving",
"unmuffle",
"unmuzzle",
"unnailed",
"unnerved",
"unnerves",
"unornate",
"unpacker",
"unpadded",
"unparted",
"unpaying",
"unpeeled",
"unpegged",
"unpenned",
"unpeople",
"unperson",
"unpicked",
"unpiling",
"unpinned",
"unpitied",
"unpitted",
"unplaced",
"unplaits",
"unpliant",
"unplowed",
"unpoetic",
"unpoised",
"unpolite",
"unpolled",
"unposted",
"unpotted",
"unpretty",
"unpriced",
"unprimed",
"unprized",
"unprobed",
"unproved",
"unpruned",
"unpucker",
"unpurely",
"unpurged",
"unpuzzle",
"unquiets",
"unquotes",
"unraised",
"unreally",
"unreason",
"unreeled",
"unreeler",
"unreeved",
"unreeves",
"unrented",
"unrepaid",
"unrepair",
"unrested",
"unretire",
"unrhymed",
"unribbed",
"unriddle",
"unrifled",
"unrigged",
"unrinsed",
"unripely",
"unripest",
"unripped",
"unrobing",
"unrolled",
"unroofed",
"unrooted",
"unrounds",
"unrulier",
"unrushed",
"unrusted",
"unsaddle",
"unsafely",
"unsafety",
"unsaying",
"unscaled",
"unscrews",
"unseamed",
"unseared",
"unseated",
"unseeded",
"unseeing",
"unseized",
"unserved",
"unsettle",
"unsewing",
"unsexing",
"unsexual",
"unshaded",
"unshaken",
"unshamed",
"unshaped",
"unshapen",
"unshared",
"unshells",
"unshifts",
"unshrunk",
"unsicker",
"unsifted",
"unsights",
"unsilent",
"unsinful",
"unslaked",
"unsliced",
"unslings",
"unsmoked",
"unsnarls",
"unsoaked",
"unsocial",
"unsoiled",
"unsolder",
"unsonsie",
"unsought",
"unsoured",
"unspeaks",
"unsphere",
"unspools",
"unsprung",
"unstably",
"unstacks",
"unstates",
"unstayed",
"unsteels",
"unsticks",
"unstitch",
"unstoned",
"unstraps",
"unstress",
"unstring",
"unstuffy",
"unsubtle",
"unsubtly",
"unsurely",
"unswathe",
"unswayed",
"unswears",
"untacked",
"untanned",
"untasted",
"untaught",
"untended",
"untented",
"untether",
"unthawed",
"unthinks",
"unthread",
"unthrone",
"untidied",
"untidier",
"untidies",
"untidily",
"untieing",
"untilled",
"untilted",
"untinged",
"untipped",
"untiring",
"untraced",
"untracks",
"untreads",
"untrendy",
"untruest",
"untrusty",
"untruths",
"untucked",
"untufted",
"untuning",
"untwined",
"untwines",
"untwists",
"ununbium",
"ununited",
"unvalued",
"unvaried",
"unveined",
"unversed",
"unvested",
"unviable",
"unvoiced",
"unvoices",
"unwalled",
"unwaning",
"unwarier",
"unwarily",
"unwarmed",
"unwarned",
"unwarped",
"unwasted",
"unweaned",
"unweaves",
"unwedded",
"unweeded",
"unweight",
"unwelded",
"unwetted",
"unwifely",
"unwilled",
"unwinder",
"unwisdom",
"unwisely",
"unwisest",
"unwished",
"unwishes",
"unwitted",
"unwonted",
"unwooded",
"unworked",
"unyeaned",
"unyoking",
"upbearer",
"upboiled",
"upbraids",
"upbuilds",
"upchucks",
"upclimbs",
"upcoiled",
"upcurled",
"upcurved",
"upcurves",
"updarted",
"updaters",
"updiving",
"updrafts",
"updrying",
"upending",
"upflings",
"upflowed",
"upfolded",
"upgather",
"upgazing",
"upgirded",
"upgrowth",
"upheaped",
"upheaved",
"upheaver",
"upheaves",
"uphoards",
"upholder",
"upleaped",
"uplifter",
"uplights",
"uppiling",
"uppishly",
"upraised",
"upraiser",
"upraises",
"uprating",
"upreared",
"uprisers",
"uprivers",
"uprootal",
"uprooter",
"uproused",
"uprouses",
"uprushed",
"uprushes",
"upscaled",
"upscales",
"upsetter",
"upshifts",
"upshoots",
"upsilons",
"upsizing",
"upsoared",
"upsprang",
"upspring",
"upsprung",
"upstaged",
"upstager",
"upstages",
"upstands",
"upstared",
"upstares",
"upstater",
"upstates",
"upstroke",
"upsurged",
"upsurges",
"upsweeps",
"upswells",
"upswings",
"uptalked",
"uptempos",
"upthrown",
"upthrows",
"upthrust",
"uptilted",
"uptossed",
"uptosses",
"uptowner",
"uptrends",
"upwafted",
"upwelled",
"uraemias",
"uraeuses",
"uralites",
"uralitic",
"uranides",
"uranisms",
"uranites",
"uranitic",
"uraniums",
"uranylic",
"urbanely",
"urbanest",
"urbanise",
"urbanist",
"urbanite",
"urbanity",
"urbanize",
"uredinia",
"ureteric",
"urethans",
"urethrae",
"urethras",
"urgingly",
"uridines",
"urinated",
"urinates",
"urinator",
"urinemia",
"urinemic",
"urochord",
"urodeles",
"uroliths",
"uropodal",
"uropygia",
"uroscopy",
"urostyle",
"ursiform",
"urticant",
"urticate",
"urushiol",
"usaunces",
"usquabae",
"usquebae",
"ustulate",
"usufruct",
"usurious",
"usurpers",
"uteruses",
"utilidor",
"utilizer",
"utopians",
"utopisms",
"utopists",
"utricles",
"utriculi",
"utterers",
"uvularly",
"uvulitis",
"uxorious",
"vacantly",
"vaccinal",
"vaccinas",
"vaccinee",
"vacuumed",
"vagility",
"vaginate",
"vagotomy",
"vagrancy",
"vagrants",
"vainness",
"valanced",
"valences",
"valerate",
"valguses",
"valiance",
"valiancy",
"valiants",
"valleyed",
"valonias",
"valorise",
"valorize",
"valorous",
"valuably",
"valuated",
"valuates",
"valuator",
"valvelet",
"valvulae",
"valvules",
"vambrace",
"vamoosed",
"vamooses",
"vamosing",
"vampiest",
"vampiric",
"vanadate",
"vanadous",
"vandalic",
"vandyked",
"vandykes",
"vanillas",
"vanillic",
"vanillin",
"vanisher",
"vanitied",
"vanitory",
"vanloads",
"vanpools",
"vantages",
"vapidity",
"vaporers",
"vaporing",
"vaporise",
"vaporish",
"vaporize",
"vaporous",
"vapoured",
"vapourer",
"vaqueros",
"varactor",
"variated",
"variates",
"variedly",
"variform",
"variolar",
"variolas",
"varioles",
"variorum",
"varistor",
"varletry",
"varments",
"varmints",
"varnishy",
"varoomed",
"vasculum",
"vaselike",
"vasiform",
"vasotomy",
"vastiest",
"vaticide",
"vaulters",
"vaultier",
"vaunters",
"vauntful",
"vaunting",
"vavasors",
"vavasour",
"vavassor",
"vealiest",
"vectored",
"vedalias",
"vedettes",
"vegetant",
"vegetate",
"vegetist",
"vegetive",
"veiledly",
"veilings",
"veillike",
"veiniest",
"veinings",
"veinless",
"veinlets",
"veinlike",
"veinules",
"veinulet",
"velamina",
"velarium",
"velarize",
"veligers",
"velleity",
"veloutes",
"veluring",
"velveret",
"velveted",
"venality",
"venation",
"vendable",
"vendaces",
"vendeuse",
"vendible",
"vendibly",
"veneered",
"veneerer",
"venenate",
"venenose",
"venerate",
"veneries",
"venially",
"venisons",
"venogram",
"venology",
"venomers",
"venoming",
"venosity",
"venously",
"ventages",
"ventails",
"ventless",
"ventrals",
"venturis",
"venulose",
"venulous",
"verandas",
"veratria",
"veratrin",
"veratrum",
"verbenas",
"verbiles",
"verbless",
"verdancy",
"verderer",
"verderor",
"verditer",
"verdured",
"verdures",
"verecund",
"vergence",
"verismos",
"veristic",
"verities",
"verjuice",
"vermeils",
"vermoulu",
"vermuths",
"vernacle",
"vernally",
"vernicle",
"verniers",
"vernixes",
"verrucae",
"verrucas",
"versants",
"verseman",
"versemen",
"versicle",
"versines",
"vertexes",
"verticil",
"vertigos",
"vervains",
"vesicant",
"vesicate",
"vesicula",
"vesperal",
"vespiary",
"vesseled",
"vestally",
"vestiary",
"vestigia",
"vestings",
"vestless",
"vestlike",
"vestment",
"vestries",
"vestural",
"vestured",
"vestures",
"vesuvian",
"vetivers",
"vetivert",
"vexation",
"vexillar",
"vexillum",
"vexingly",
"viaducts",
"vialling",
"viaticum",
"viatores",
"vibrance",
"vibrants",
"vibrated",
"vibratos",
"vibrioid",
"vibrions",
"vibrissa",
"vibronic",
"vicarate",
"vicarial",
"viceless",
"vicenary",
"viceroys",
"vicinage",
"vicomtes",
"victress",
"victuals",
"vicugnas",
"videotex",
"videttes",
"vidicons",
"viewdata",
"viewiest",
"viewless",
"vigneron",
"vigoroso",
"vilayets",
"vileness",
"vilifier",
"vilifies",
"vilipend",
"villadom",
"villatic",
"villeins",
"vinasses",
"vincible",
"vincibly",
"vinculum",
"vindaloo",
"vinegary",
"vineries",
"vinifera",
"vinified",
"vinifies",
"vinosity",
"vinously",
"vintager",
"violable",
"violably",
"violater",
"violists",
"violones",
"viomycin",
"viperine",
"viperish",
"viperous",
"viragoes",
"virelais",
"virelays",
"viremias",
"virgates",
"virgules",
"viricide",
"viridity",
"virilely",
"virilism",
"virilize",
"virtuosa",
"virtuose",
"virtuosi",
"virucide",
"virusoid",
"viscacha",
"viscidly",
"viscoses",
"viselike",
"visional",
"visioned",
"visitant",
"visiters",
"visoring",
"vitalise",
"vitalism",
"vitalist",
"vitalize",
"vitamers",
"vitellin",
"vitellus",
"vitesses",
"vitiable",
"vitiated",
"vitiates",
"vitiator",
"vitrains",
"vitrines",
"vitriols",
"vittling",
"vituline",
"vivacity",
"vivaries",
"vivarium",
"viverrid",
"vividest",
"vivified",
"vivifier",
"vivifies",
"vivipara",
"vivisect",
"vixenish",
"vizarded",
"vizcacha",
"vizirate",
"vizirial",
"vizoring",
"vocables",
"vocalese",
"vocalics",
"vocalise",
"vocalism",
"vocality",
"vocalize",
"vocative",
"vocoders",
"vogueing",
"voguings",
"voiceful",
"voicings",
"voidable",
"voidance",
"voidness",
"volcanos",
"voleries",
"volitant",
"volitive",
"volleyed",
"volleyer",
"volplane",
"voltaism",
"voluming",
"volutins",
"volution",
"volvoxes",
"volvulus",
"vomerine",
"vomiters",
"vomitive",
"vomitory",
"vomitous",
"voodooed",
"voracity",
"vorlages",
"vortexes",
"vortical",
"votaress",
"votaries",
"votarist",
"voteable",
"voteless",
"votively",
"vouchees",
"vouching",
"voudouns",
"voussoir",
"vouvrays",
"vowelize",
"voyaging",
"vrooming",
"vuggiest",
"vulcanic",
"vulgarer",
"vulgarly",
"vulgates",
"vulguses",
"vulvitis",
"wabblers",
"wabblier",
"wabbling",
"wackiest",
"waddings",
"waddlers",
"waddling",
"waddying",
"wadeable",
"wadmaals",
"wadmolls",
"waesucks",
"wafering",
"wafflers",
"wafflier",
"waffling",
"waftages",
"waftures",
"wageless",
"wagerers",
"wagglier",
"waggling",
"waggoned",
"wagonage",
"wagoners",
"wagoning",
"wagtails",
"wahconda",
"waiflike",
"wailsome",
"wainscot",
"waisters",
"waisting",
"waitered",
"waitings",
"waitrons",
"wakandas",
"wakeless",
"wakeners",
"wakening",
"wakerife",
"walkaway",
"walkings",
"walkouts",
"walkover",
"walkyrie",
"wallaroo",
"walleyed",
"walleyes",
"walloped",
"walloper",
"wallowed",
"wallower",
"walruses",
"waltzers",
"wamblier",
"wambling",
"wamefous",
"wamefuls",
"wammuses",
"wampuses",
"wanderoo",
"wanglers",
"wangling",
"wanigans",
"wannabee",
"wannigan",
"wantages",
"wantoned",
"wantoner",
"wantonly",
"warbling",
"wardenry",
"wardless",
"wardress",
"wardroom",
"wardship",
"wareroom",
"warfares",
"warhorse",
"wariness",
"warisons",
"warmaker",
"warmness",
"warmouth",
"warpages",
"warpaths",
"warplane",
"warpower",
"warpwise",
"warragal",
"warrener",
"warrigal",
"warslers",
"warsling",
"warstled",
"warstler",
"warstles",
"warthogs",
"wartiest",
"wartimes",
"wartless",
"wartlike",
"warworks",
"washbowl",
"washdays",
"washiest",
"washings",
"washouts",
"washrags",
"washtubs",
"waspiest",
"wasplike",
"wassails",
"wastable",
"wastages",
"wastelot",
"wasterie",
"wasteway",
"wastrels",
"wastries",
"watchcry",
"watcheye",
"watchout",
"waterage",
"waterbus",
"waterdog",
"waterers",
"waterhen",
"waterier",
"waterily",
"waterish",
"waterlog",
"watermen",
"waterski",
"wattages",
"wattapes",
"watthour",
"wattless",
"wattling",
"wauchted",
"waughted",
"waveband",
"waveless",
"wavelike",
"waveoffs",
"waverers",
"wavicles",
"waviness",
"waxberry",
"waxbills",
"waxiness",
"waxplant",
"waxweeds",
"waxwings",
"waxworks",
"waxworms",
"waybills",
"waygoing",
"waylayer",
"waysides",
"weakener",
"weakfish",
"weaklier",
"weakling",
"weakside",
"weanling",
"weaponed",
"weariest",
"weariful",
"wearying",
"weasands",
"weaseled",
"weaselly",
"weazands",
"webbiest",
"webbings",
"webworms",
"wedeling",
"wedgiest",
"wedlocks",
"weediest",
"weedless",
"weedlike",
"weeniest",
"weensier",
"weepiest",
"weepings",
"weeviled",
"weevilly",
"weftwise",
"weigelas",
"weigelia",
"weighers",
"weighman",
"weighmen",
"weighter",
"weirdies",
"weirding",
"weirdoes",
"welchers",
"welching",
"welcomer",
"weldable",
"weldless",
"weldment",
"welfares",
"welladay",
"wellaway",
"wellborn",
"wellcurb",
"welldoer",
"wellhole",
"wellsite",
"welshers",
"welshing",
"weltered",
"weltings",
"wenchers",
"wenching",
"wendigos",
"wenniest",
"weregild",
"wergelds",
"wergelts",
"wergilds",
"wessands",
"westered",
"westings",
"westmost",
"wetbacks",
"wetproof",
"wettable",
"wettings",
"wetwares",
"whackers",
"whackier",
"whaleman",
"whalemen",
"whalings",
"whammies",
"whamming",
"whangees",
"whanging",
"whappers",
"whapping",
"wharfage",
"wharfing",
"whatness",
"whatnots",
"whatsits",
"wheatear",
"wheatens",
"wheedled",
"wheedler",
"wheedles",
"wheelies",
"wheelman",
"wheelmen",
"wheeping",
"wheepled",
"wheeples",
"wheezers",
"wheezier",
"wheezily",
"whelkier",
"whelming",
"whelping",
"wherried",
"wherries",
"whetters",
"whetting",
"wheyface",
"wheylike",
"whickers",
"whidding",
"whiffers",
"whiffets",
"whiffing",
"whiffled",
"whiffler",
"whiffles",
"whimbrel",
"whimpers",
"whimseys",
"whimsied",
"whimsies",
"whinchat",
"whingers",
"whinging",
"whiniest",
"whinnied",
"whinnier",
"whinnies",
"whipcord",
"whiplike",
"whippers",
"whippets",
"whippier",
"whiprays",
"whipsawn",
"whipsaws",
"whiptail",
"whipworm",
"whirlers",
"whirlier",
"whirlies",
"whirried",
"whirries",
"whirring",
"whishing",
"whishted",
"whiskery",
"whiskeys",
"whisking",
"whispery",
"whisting",
"whitecap",
"whitefly",
"whitened",
"whiteout",
"whitiest",
"whitings",
"whitlows",
"whitrack",
"whitters",
"whittled",
"whittler",
"whittles",
"whittret",
"whizbang",
"whizzers",
"whizzier",
"whizzing",
"wholisms",
"whomping",
"whoofing",
"whoopees",
"whoopers",
"whoopies",
"whooplas",
"whooshed",
"whooshes",
"whoredom",
"whoreson",
"whortles",
"whosever",
"whosises",
"whumping",
"whupping",
"wickapes",
"wickeder",
"wickings",
"wickiups",
"wickless",
"wickyups",
"wicopies",
"widdling",
"widebody",
"wideners",
"wideness",
"wideouts",
"widgeons",
"widowing",
"widthway",
"wielders",
"wieldier",
"wifedoms",
"wifehood",
"wifeless",
"wifelier",
"wifelike",
"wiftiest",
"wiggiest",
"wiggings",
"wigglers",
"wigglier",
"wigmaker",
"wildered",
"wildings",
"wildling",
"wiliness",
"willable",
"williwau",
"williwaw",
"willowed",
"willower",
"willyard",
"willyart",
"willying",
"willywaw",
"wimbling",
"wimpiest",
"wimpling",
"winchers",
"winching",
"windable",
"windages",
"windbags",
"windbell",
"windburn",
"windflaw",
"windgall",
"windiest",
"windigos",
"windless",
"windling",
"windpipe",
"windrows",
"windsock",
"windways",
"wineless",
"winesaps",
"wineshop",
"wineskin",
"winesops",
"wingback",
"wingbows",
"wingding",
"wingedly",
"wingiest",
"winglets",
"winglike",
"wingover",
"wingtips",
"winkling",
"winnable",
"winnocks",
"winnowed",
"winnower",
"winsomer",
"wintered",
"winterer",
"winterly",
| |
<filename>sportsbetting/user_functions.py
#!/usr/bin/env python3
"""
Fonctions principales d'assistant de paris
"""
import colorama
import copy
import inspect
import socket
import sqlite3
import sys
import termcolor
import time
import traceback
import urllib
import urllib.error
import urllib.request
from itertools import combinations, permutations
from pprint import pprint
import numpy as np
import selenium
import selenium.common
import unidecode
import urllib3
from bs4 import BeautifulSoup
from multiprocessing.pool import ThreadPool
import sports_betting.sportsbetting as sportsbetting
from sports_betting.sportsbetting import selenium_init
from sports_betting.sportsbetting.database_functions import (get_id_formatted_competition_name, get_competition_by_id, import_teams_by_url,
import_teams_by_sport, import_teams_by_competition_id_thesportsdb)
from sports_betting.sportsbetting.parser_functions import parse
from sports_betting.sportsbetting.auxiliary_functions import (valid_odds, format_team_names, merge_dict_odds, afficher_mises_combine,
cotes_combine_all_sites, defined_bets, binomial, best_match_base,
filter_dict_dates, get_nb_issues, best_combine_reduit, filter_dict_minimum_odd)
from sports_betting.sportsbetting.basic_functions import (gain2, mises2, gain, mises, mises_freebet, cotes_freebet,
gain_pari_rembourse_si_perdant, gain_freebet2, mises_freebet2,
mises_pari_rembourse_si_perdant, gain_promo_gain_cote, mises_promo_gain_cote,
gain_gains_nets_boostes, mises_gains_nets_boostes, gain3, mises3)
from sports_betting.sportsbetting.lambda_functions import get_best_odds, get_profit
def parse_competition(competition, sport="football", *sites):
"""
Retourne les cotes d'une competition donnée pour un ou plusieurs sites de
paris. Si aucun site n'est choisi, le parsing se fait sur l'ensemble des
bookmakers reconnus par l'ARJEL
"""
if sportsbetting.ABORT:
raise sportsbetting.AbortException
try:
_id, formatted_name = get_id_formatted_competition_name(competition, sport)
except TypeError:
print("Competition inconnue")
return
print(formatted_name, *sites)
if not sites:
sites = ['betclic', 'betstars', 'bwin', 'france_pari', 'joa', 'netbet',
'parionssport', 'pasinobet', 'pmu', 'unibet', 'winamax', 'zebet']
res_parsing = {}
for site in sites:
if len(sites) > 1:
print(site)
url = get_competition_by_id(_id, site)
try:
if url:
try:
res_parsing[site] = parse(site, url)
except urllib3.exceptions.MaxRetryError:
selenium_init.DRIVER[site].quit()
print("Redémarrage de selenium")
selenium_init.start_selenium(site, timeout=20)
res_parsing[site] = parse(site, url)
except sqlite3.OperationalError:
print("Erreur dans la base de données, redémarrage en cours")
res_parsing[site] = parse(site, url)
except urllib.error.URLError:
print("{} non accessible sur {} (délai écoulé)".format(competition, site))
except KeyboardInterrupt:
res_parsing[site] = {}
except selenium.common.exceptions.TimeoutException:
print("Element non trouvé par selenium ({} sur {})".format(competition, site))
except sportsbetting.UnavailableCompetitionException:
print("{} non disponible sur {}".format(competition, site))
except socket.timeout:
print("{} non accessible sur {} (timeout socket)".format(competition, site))
except selenium.common.exceptions.StaleElementReferenceException:
print("StaleElement non trouvé par selenium ({} sur {})".format(competition, site))
except selenium.common.exceptions.WebDriverException:
print("Connection closed ({} sur {})".format(competition, site))
res = format_team_names(res_parsing, sport, competition)
out = valid_odds(merge_dict_odds(res), sport)
if inspect.currentframe().f_back.f_code.co_name != "<module>":
return out
return out
def parse_competitions_site(competitions, sport, site):
list_odds = []
if len(competitions) > 40 and site == "winamax": # to avoid being blocked by winamax
competitions = competitions[:40]
sportsbetting.SITE_PROGRESS[site] = 0
try:
for competition in competitions:
list_odds.append(parse_competition(competition, sport, site))
sportsbetting.PROGRESS += 100 / (len(competitions) * sportsbetting.SUB_PROGRESS_LIMIT)
sportsbetting.SITE_PROGRESS[site] += 100 / len(competitions)
except sportsbetting.UnavailableSiteException:
print("{} non accessible".format(site))
sportsbetting.SITE_PROGRESS[site] = 100
except sportsbetting.AbortException:
print("Interruption", site)
except sqlite3.OperationalError:
print("Database is locked", site)
if site in sportsbetting.SELENIUM_SITES:
selenium_init.DRIVER[site].quit()
return merge_dict_odds(list_odds)
def parse_competitions(competitions, sport="football", *sites):
sites_order = ['bwin', 'parionssport', 'betstars', 'pasinobet', 'joa', 'unibet', 'betclic',
'pmu', 'france_pari', 'netbet', 'winamax', 'zebet']
if not sites:
sites = sites_order
sportsbetting.EXPECTED_TIME = 28 + len(competitions) * 12.5
selenium_sites = sportsbetting.SELENIUM_SITES.intersection(sites)
selenium_required = ((inspect.currentframe().f_back.f_code.co_name
in ["<module>", "parse_thread"]
or 'test' in inspect.currentframe().f_back.f_code.co_name)
and (selenium_sites or not sites))
sportsbetting.SELENIUM_REQUIRED = selenium_required
sites = [site for site in sites_order if site in sites]
sportsbetting.PROGRESS = 0
if selenium_required:
for site in selenium_sites:
while True:
headless = sport != "handball" or site != "bwin"
if sportsbetting.ABORT or selenium_init.start_selenium(site, headless, timeout=15):
break
colorama.init()
print(termcolor.colored('Restarting', 'yellow'))
colorama.Style.RESET_ALL
colorama.deinit()
sportsbetting.PROGRESS += 100/len(selenium_sites)
sportsbetting.PROGRESS = 0
sportsbetting.SUB_PROGRESS_LIMIT = len(sites)
for competition in competitions:
if competition == sport or "Tout le" in competition:
import_teams_by_sport(sport)
else:
id_competition = get_id_formatted_competition_name(competition, sport)[0]
if id_competition < 0:
import_teams_by_competition_id_thesportsdb(id_competition)
else:
import_teams_by_url("http://www.comparateur-de-cotes.fr/comparateur/" + sport
+ "/a-ed" + str(id_competition))
list_odds = []
try:
sportsbetting.IS_PARSING = True
list_odds = ThreadPool(7).map(lambda x: parse_competitions_site(competitions, sport, x), sites)
sportsbetting.ODDS[sport] = merge_dict_odds(list_odds)
except Exception:
print(traceback.format_exc(), file=sys.stderr)
sportsbetting.IS_PARSING = False
if selenium_required:
colorama.init()
print(termcolor.colored('Drivers closed', 'green'))
colorama.Style.RESET_ALL
colorama.deinit()
sportsbetting.ABORT = False
def odds_match(match, sport="football"):
"""
Retourne les cotes d'un match donné sur tous les sites de l'ARJEL
"""
match = unidecode.unidecode(match)
all_odds = sportsbetting.ODDS[sport]
opponents = match.split('-')
for match_name in all_odds:
if (opponents[0].lower().strip() in unidecode.unidecode(match_name.split("-")[0].lower())
and opponents[1].lower().strip() in unidecode.unidecode(match_name.split("-")[1].lower())):
break
else:
for match_name in all_odds:
if (opponents[0].lower().strip() in unidecode.unidecode(match_name.lower())
and opponents[1].lower().strip() in unidecode.unidecode(match_name.lower())):
break
else:
return None, None
print(match_name)
return match_name, copy.deepcopy(all_odds[match_name])
def best_stakes_match(match, site, bet, minimum_odd, sport="football"):
"""
Pour un match, un bookmaker, une somme à miser sur ce bookmaker et une cote
minimale donnés, retourne la meilleure combinaison de paris à placer
"""
best_match, all_odds = odds_match(match, sport)
if not all_odds:
print("No match found")
return
pprint(all_odds)
odds_site = all_odds['odds'][site]
best_odds = copy.deepcopy(odds_site)
best_profit = -float("inf")
n = len(all_odds['odds'][site])
best_sites = [site for _ in range(n)]
best_i = 0
best_overall_odds = None
bets = None
sites = None
for odds in all_odds['odds'].items():
for i in range(n):
if odds[1][i] > best_odds[i] and (odds[1][i] >= 1.1 or odds[0] == "pmu"):
best_odds[i] = odds[1][i]
best_sites[i] = odds[0]
for i in range(n):
if odds_site[i] >= minimum_odd:
odds_to_check = (best_odds[:i] + [odds_site[i]] + best_odds[i + 1:])
profit = gain2(odds_to_check, i, bet)
if profit > best_profit:
best_profit = profit
best_overall_odds = odds_to_check
sites = best_sites[:i] + [site] + best_sites[i + 1:]
bets = mises2(odds_to_check, bet, i)
best_i = i
if best_overall_odds:
mises2(best_overall_odds, bet, best_i, True)
afficher_mises_combine(best_match.split(" / "), [sites], [bets], all_odds["odds"], sport)
else:
print("No match found")
def best_match_under_conditions(site, minimum_odd, bet, sport="football", date_max=None,
time_max=None, date_min=None, time_min=None, one_site=False,
live=False):
"""
Retourne le meilleur match sur lequel miser lorsqu'on doit miser une somme
donnée à une cote donnée. Cette somme peut-être sur seulement une issue
(one_site=False) ou bien répartie sur plusieurs issues d'un même match
(one_site=True), auquel cas, chacune des cotes du match doivent respecter le
critère de cote minimale.
"""
odds_function = get_best_odds(one_site)
profit_function = get_profit(bet, one_site)
criteria = lambda odds_to_check, i: ((not one_site and odds_to_check[i] >= minimum_odd)
or (one_site and all(odd >= minimum_odd
for odd in odds_to_check)))
display_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet,
best_rank, True) if not one_site
else mises(best_overall_odds, bet,
True))
result_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet,
best_rank, False) if not one_site
else mises(best_overall_odds, bet,
False))
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min, one_site=one_site)
def best_match_under_conditions2(site, minimum_odd, stake, sport="football", date_max=None,
time_max=None, date_min=None, time_min=None):
all_odds = filter_dict_dates(sportsbetting.ODDS[sport], date_max, time_max, date_min, time_min)
best_profit = -float("inf")
best_match = None
best_overall_odds = None
sites = None
nb_matches = len(all_odds)
n = get_nb_issues(sport)
for match in all_odds:
sportsbetting.PROGRESS += 100 / nb_matches
if site in all_odds[match]['odds']:
odds_site = all_odds[match]['odds'][site]
best_odds = copy.deepcopy(odds_site)
best_sites = [site for _ in range(n)]
for odds in all_odds[match]['odds'].items():
for i in range(n):
if odds[1][i] > best_odds[i] and (odds[1][i] >= 1.1 or odds[0] == "pmu"):
best_odds[i] = odds[1][i]
best_sites[i] = odds[0]
for odd_i, site_i in zip(best_odds, best_sites):
if odd_i < 1.1 and site_i != "pmu":
break
else:
profit = gain3(odds_site, best_odds, stake, minimum_odd)
if profit > best_profit:
best_profit = profit
best_odds_site = copy.deepcopy(odds_site)
best_best_odds = copy.deepcopy(best_odds)
best_match = match
stakes, best_indices = mises3(odds_site, best_odds, stake, minimum_odd)
sites = [site if i in best_indices else best_sites[i] for i in range(n)]
if best_match:
print(best_match)
pprint(all_odds[best_match])
mises3(best_odds_site, best_best_odds, stake, minimum_odd, True)
afficher_mises_combine([best_match], [sites], [stakes],
all_odds[best_match]["odds"], sport)
else:
print("No match found")
def best_match_pari_gagnant(site, minimum_odd, bet, sport="football",
date_max=None, time_max=None, date_min=None,
time_min=None, nb_matches_combine=1):
"""
Retourne le meilleur match sur lequel miser lorsqu'on doit gagner un pari à
une cote donnée sur un site donné.
"""
stakes = []
n = 2 + (sport not in ["tennis", "volleyball", "basketball", "nba"])
for _ in range(n**nb_matches_combine):
stakes.append([bet, site, minimum_odd])
best_match_stakes_to_bet(stakes, nb_matches_combine, sport, date_max, time_max, True)
def best_match_freebet(site, freebet, sport="football", live=False, date_max=None, time_max=None,
date_min=None, time_min=None):
"""
Retourne le match qui génère le meilleur gain pour un unique freebet placé,
couvert avec de l'argent réel.
"""
fact_live = 1 - 0.2 * live
odds_function = lambda best_odds, odds_site, i: (best_odds[:i] + [odds_site[i] * fact_live - 1]
+ best_odds[i + 1:])
profit_function = lambda odds_to_check, i: gain2(odds_to_check, i) + 1
criteria = lambda odds_to_check, i: True
display_function = lambda x, i: mises_freebet(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, True)
result_function = lambda x, i: mises_freebet(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, False)
best_match_base(odds_function, profit_function, criteria, display_function,
result_function, site, sport, date_max, time_max, date_min,
time_min, freebet=True)
def best_match_freebet2(site, freebet, sport="football", live=False, date_max=None, time_max=None,
date_min=None, time_min=None):
"""
Retourne le match qui génère le meilleur gain pour un unique freebet placé,
couvert avec de l'argent réel.
"""
fact_live = 1 - 0.2 * live
odds_function = lambda best_odds, odds_site, i: (best_odds[:i] + [odds_site[i] * fact_live - 1]
+ best_odds[i + 1:])
profit_function = lambda | |
sage: RowStandardTableauTuple([[[4]],[[2,3],[1]]]).residue_sequence(3,(0,1)).row_standard_tableaux().residue_sequence()
3-residue sequence (0,1,2,0) with multicharge (0,1)
sage: StandardTableauTuple([[[4]],[[1,3],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux().residue_sequence()
3-residue sequence (1,0,2,0) with multicharge (0,1)
"""
return self._residue
def an_element(self):
r"""
Return a particular element of ``self``.
EXAMPLES::
sage: RowStandardTableau([[2,3],[1]]).residue_sequence(3).row_standard_tableaux().an_element()
[[2, 3], [1]]
sage: StandardTableau([[1,3],[2]]).residue_sequence(3).row_standard_tableaux().an_element()
[[1, 3], [2]]
sage: RowStandardTableauTuple([[[4]],[[2,3],[1]]]).residue_sequence(3,(0,1)).row_standard_tableaux().an_element()
sage: StandardTableauTuple([[[4]],[[1,3],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux().an_element()
([[4], [3], [1], [2]], [])
"""
try:
return self.unrank(0)
except ValueError:
return None
class RowStandardTableauTuples_residue_shape(RowStandardTableauTuples_residue):
"""
All row standard tableau tuples with a fixed residue and shape.
INPUT:
- ``shape`` -- the shape of the partitions or partition tuples
- ``residue`` -- the residue sequence of the label
EXAMPLES::
sage: res = RowStandardTableauTuple([[[3,6],[1]],[[5,7],[4],[2]]]).residue_sequence(3,(0,0))
sage: tabs = res.row_standard_tableaux([[2,1],[2,1,1]]); tabs
Row standard (2,1|2,1^2)-tableaux with 3-residue sequence (2,1,0,2,0,1,1) and multicharge (0,0)
sage: tabs.shape()
([2, 1], [2, 1, 1])
sage: tabs.level()
2
sage: tabs[:6]
[([[5, 7], [4]], [[3, 6], [1], [2]]),
([[5, 7], [1]], [[3, 6], [4], [2]]),
([[3, 7], [4]], [[5, 6], [1], [2]]),
([[3, 7], [1]], [[5, 6], [4], [2]]),
([[5, 6], [4]], [[3, 7], [1], [2]]),
([[5, 6], [1]], [[3, 7], [4], [2]])]
"""
def __init__(self, residue, shape):
r"""
Initialize ``self``.
.. WARNING::
Input is not checked; please use :class:`RowStandardTableauTuples`
to ensure the options are properly parsed.
TESTS::
sage: res = RowStandardTableauTuple([[[1,3]],[[4,5],[2,6]]]).residue_sequence(3,(0,0))
sage: tabs = res.row_standard_tableaux([[2],[2,2]])
sage: TestSuite(tabs).run()
"""
if residue.size() != shape.size():
raise ValueError('the size of the shape and the length of the residue defence must coincide!')
super(RowStandardTableauTuples_residue_shape, self).__init__(residue)
self._shape = shape
# The _standard_tableaux attribute below is used to generate the
# tableaux in this class. The key observation is that any row standard
# tableau is standard if we stretch it out to a tableau with one row in
# each component
multicharge = residue.multicharge()
if shape.level() == 1:
standard_shape = [[r] for r in shape]
charge = [multicharge[0] - r for r in range(len(shape))]
else:
standard_shape = [[r] for mu in shape for r in mu]
charge = [multicharge[c] - r for c in range(len(shape))
for r in range(len(shape[c]))]
from sage.combinat.tableau_residues import ResidueSequence
res = ResidueSequence(residue.quantum_characteristic(), charge, residue.residues())
self._standard_tableaux = res.standard_tableaux(standard_shape)
# to convert the tableaux in self._standard_tableaux to row standard
# tableau we use the list _cumulative_lengths, which keeps track of the
# cumulative lengths of each component
if shape.level() == 1:
self._cumulative_lengths = [0, len(shape)]
else:
self._cumulative_lengths = [0]*(shape.level()+1)
for c in range(len(shape)):
self._cumulative_lengths[c+1] = self._cumulative_lengths[c] + len(shape[c])
def __contains__(self, t):
"""
Check containment of ``t`` in ``self``.
EXAMPLES::
sage: tabs = RowStandardTableauTuple([[[1,3]],[[4],[2]]]).residue_sequence(3,(0,1)).row_standard_tableaux([[2],[1,1]])
sage: [ [[1,2,3,4]], [[]] ] in tabs
False
sage: ([[1, 3]], [[4], [2]]) in tabs
True
"""
if not isinstance(t, self.element_class):
try:
t = RowStandardTableauTuple(t)
except ValueError:
return False
return (t.shape() == self._shape
and t.residue_sequence(self._quantum_characteristic,self._multicharge)
== self._residue)
def _repr_(self):
"""
Return the string representation of ``self``.
EXAMPLES::
sage: RowStandardTableau([[1,3],[2,4]]).residue_sequence(3).row_standard_tableaux([2,2])
Row standard (2^2)-tableaux with 3-residue sequence (0,2,1,0) and multicharge (0)
"""
return 'Row standard ({})-tableaux with {}'.format(self._shape._repr_compact_high(),
self._residue.__str__('and'))
def __iter__level_one(self):
r"""
Iterate through the row standard tableaux in ``self``.
We construct this sequence of tableaux recursively, as it is easier
(and more useful for applications to graded Specht modules).
EXAMPLES::
sage: RowStandardTableau([[2,4],[1,3]]).residue_sequence(3).row_standard_tableaux([2,2])[:] # indirect doctest
[[[3, 4], [1, 2]], [[2, 4], [1, 3]]]
"""
if self._size == 0:
yield RowStandardTableau([])
for t in self._standard_tableaux:
yield RowStandardTableau([s[0] for s in t])
def __iter__higher_levels(self):
r"""
Iterate through the row standard tableaux in ``self``.
We construct this sequence of tableaux recursively, as it is easier
(and more useful for applications to graded Specht modules).
EXAMPLES::
sage: RowStandardTableauTuple([[[2,4]],[[3,5],[1]]]).residue_sequence(3,[0,1]).row_standard_tableaux([[2],[2,1]])[:] # indirect doctest
[([[2, 4]], [[3, 5], [1]]),
([[1, 4]], [[3, 5], [2]]),
([[2, 3]], [[4, 5], [1]]),
([[1, 3]], [[4, 5], [2]])]
"""
if self._size == 0:
yield self.element_class(self, [[] for l in range(self._level)], check=False) # the empty tableaux
return
for t in self._standard_tableaux:
yield self.element_class(self,
[ [ t[r][0] for r in range(self._cumulative_lengths[c], self._cumulative_lengths[c+1])]
for c in range(self._level)],
check=False)
@lazy_attribute
def __iter__(self):
r"""
Iterate through the row standard tableaux in ``self``.
We construct this sequence of tableaux recursively, as it is easier
(and more useful for applications to graded Specht modules).
EXAMPLES::
sage: RowStandardTableau([[2,4],[1,3]]).residue_sequence(3).row_standard_tableaux([1,1,1,1])[:] # indirect doctest
[[[3], [1], [4], [2]], [[2], [1], [4], [3]]]
sage: RowStandardTableauTuple([[[2,4]],[[3,5],[1]]]).residue_sequence(3,[0,1]).row_standard_tableaux([[3],[1,1]])[:] # indirect doctest
[([[2, 4, 5]], [[3], [1]]),
([[1, 4, 5]], [[3], [2]]),
([[2, 3, 5]], [[4], [1]]),
([[1, 3, 5]], [[4], [2]])]
"""
if self._level == 1:
return self.__iter__level_one
else:
return self.__iter__higher_levels
#--------------------------------------------------
# Standard tableau tuples - parent classes
#--------------------------------------------------
class StandardTableauTuples(RowStandardTableauTuples):
"""
A factory class for the various classes of tuples of standard tableau.
INPUT:
There are three optional arguments:
- ``level`` -- the :meth:`~TableauTuples.level` of the tuples of tableaux
- ``size`` -- the :meth:`~TableauTuples.size` of the tuples of tableaux
- ``shape`` -- a list or a partition tuple specifying the :meth:`shape` of
the standard tableau tuples
It is not necessary to use the keywords. If they are not used then the first
integer argument specifies the :meth:`~TableauTuples.level` and the second
the :meth:`~TableauTuples.size` of the tableau tuples.
OUTPUT:
The appropriate subclass of :class:`StandardTableauTuples`.
A tuple of standard tableau is a tableau whose entries are positive
integers which increase from left to right along the rows, and from top to
bottom down the columns, in each component. The entries do NOT need to
increase from left to right along the components.
.. NOTE::
Sage uses the English convention for (tuples of) partitions and
tableaux: the longer rows are displayed on top. As with
:class:`PartitionTuple`, in sage the cells, or nodes, of partition
tuples are 0-based. For example, the (lexicographically) first cell
in any non-empty partition tuple is `[0,0,0]`.
EXAMPLES::
sage: tabs=StandardTableauTuples([[3],[2,2]]); tabs
Standard tableau tuples of shape ([3], [2, 2])
sage: tabs.cardinality()
70
sage: tabs[10:16]
[([[1, 2, 3]], [[4, 6], [5, 7]]),
([[1, 2, 4]], [[3, 6], [5, 7]]),
([[1, 3, 4]], [[2, 6], [5, 7]]),
([[2, 3, 4]], [[1, 6], [5, 7]]),
([[1, 2, 5]], [[3, 6], [4, 7]]),
([[1, 3, 5]], [[2, 6], [4, 7]])]
sage: tabs=StandardTableauTuples(level=3); tabs
Standard tableau tuples of level 3
sage: tabs[100]
([[1, 2], [3]], [], [[4]])
sage: StandardTableauTuples()[0]
()
TESTS::
sage: TestSuite( StandardTableauTuples() ).run()
sage: TestSuite( StandardTableauTuples(level=1) ).run()
sage: TestSuite( StandardTableauTuples(level=4) ).run()
sage: TestSuite( StandardTableauTuples(size=0) ).run(max_runs=50) # recursion depth exceeded with default max_runs
sage: TestSuite( StandardTableauTuples(size=6) ).run()
sage: TestSuite( StandardTableauTuples(level=1, size=0) ).run()
sage: TestSuite( StandardTableauTuples(level=1, size=0) ).run()
sage: TestSuite( StandardTableauTuples(level=1, size=10) ).run()
sage: TestSuite( StandardTableauTuples(level=4, size=0) ).run()
sage: TestSuite( StandardTableauTuples(level=4, size=0) ).run()
.. SEEALSO::
- :class:`TableauTuples`
- :class:`Tableau`
- :class:`StandardTableau`
- :class:`StandardTableauTuples`
"""
Element = StandardTableauTuple
level_one_parent_class = StandardTableaux_all # used in element_constructor
@staticmethod
def __classcall_private__(cls, *args, **kwargs):
r"""
This is a factory class which returns the appropriate parent based on
arguments.
See the documentation for :class:`StandardTableauTuples`
for more information.
EXAMPLES::
sage: StandardTableauTuples()
Standard tableau tuples
sage: StandardTableauTuples(4)
Standard tableau tuples of level 4
sage: StandardTableauTuples(4,3)
Standard tableau tuples of level 4 and size 3
sage: StandardTableauTuples([ [2,1],[1],[1,1,1],[3,2] ])
Standard tableau tuples of shape ([2, 1], [1], [1, 1, 1], [3, 2])
TESTS::
sage: StandardTableauTuples([ [2,1],[1],[1,1,1],[3,2,3] ])
Traceback (most recent call last):
...
ValueError: the shape must be a partition tuple
sage: P = PartitionTuples()
sage: pt = P([[1]]); pt
([1])
sage: StandardTableauTuples(pt)
Standard tableaux of shape [1]
"""
from sage.combinat.partition_tuple import PartitionTuple
# first check the keyword arguments
level = kwargs.get('level', None)
shape = kwargs.get('shape', None)
size = kwargs.get('size', None)
for key in kwargs:
if key not in ['level','shape','size']:
raise ValueError('%s is not a valid argument for StandardTableauTuples' % key)
# now process the positional arguments
if args:
#the first argument could be either the level or the shape
if isinstance(args[0], (int, Integer)):
if level is not None:
raise ValueError('the level was specified more than once')
else:
level = args[0]
else:
if shape is not None:
raise ValueError('the shape was specified more than once')
else:
shape = args[0] # we check that it is a PartitionTuple below
if len(args) == 2: # both the level and size were specified
if level is not None and size is not None:
raise ValueError('the level or | |
9 * m.b683 <= 0)
m.e918 = Constraint(expr= m.x591 - 9 * m.b684 <= 0)
m.e919 = Constraint(expr= m.x592 + 9 * m.b682 <= 9)
m.e920 = Constraint(expr= m.x593 + 9 * m.b683 <= 9)
m.e921 = Constraint(expr= m.x594 + 9 * m.b684 <= 9)
m.e922 = Constraint(expr= 5 * m.b685 + m.x775 == 0)
m.e923 = Constraint(expr= 4 * m.b686 + m.x776 == 0)
m.e924 = Constraint(expr= 6 * m.b687 + m.x777 == 0)
m.e925 = Constraint(expr= 8 * m.b688 + m.x778 == 0)
m.e926 = Constraint(expr= 7 * m.b689 + m.x779 == 0)
m.e927 = Constraint(expr= 6 * m.b690 + m.x780 == 0)
m.e928 = Constraint(expr= 6 * m.b691 + m.x781 == 0)
m.e929 = Constraint(expr= 9 * m.b692 + m.x782 == 0)
m.e930 = Constraint(expr= 4 * m.b693 + m.x783 == 0)
m.e931 = Constraint(expr= 10 * m.b694 + m.x784 == 0)
m.e932 = Constraint(expr= 9 * m.b695 + m.x785 == 0)
m.e933 = Constraint(expr= 5 * m.b696 + m.x786 == 0)
m.e934 = Constraint(expr= 6 * m.b697 + m.x787 == 0)
m.e935 = Constraint(expr= 10 * m.b698 + m.x788 == 0)
m.e936 = Constraint(expr= 6 * m.b699 + m.x789 == 0)
m.e937 = Constraint(expr= 7 * m.b700 + m.x790 == 0)
m.e938 = Constraint(expr= 7 * m.b701 + m.x791 == 0)
m.e939 = Constraint(expr= 4 * m.b702 + m.x792 == 0)
m.e940 = Constraint(expr= 4 * m.b703 + m.x793 == 0)
m.e941 = Constraint(expr= 3 * m.b704 + m.x794 == 0)
m.e942 = Constraint(expr= 2 * m.b705 + m.x795 == 0)
m.e943 = Constraint(expr= 5 * m.b706 + m.x796 == 0)
m.e944 = Constraint(expr= 6 * m.b707 + m.x797 == 0)
m.e945 = Constraint(expr= 7 * m.b708 + m.x798 == 0)
m.e946 = Constraint(expr= 2 * m.b709 + m.x799 == 0)
m.e947 = Constraint(expr= 5 * m.b710 + m.x800 == 0)
m.e948 = Constraint(expr= 2 * m.b711 + m.x801 == 0)
m.e949 = Constraint(expr= 4 * m.b712 + m.x802 == 0)
m.e950 = Constraint(expr= 7 * m.b713 + m.x803 == 0)
m.e951 = Constraint(expr= 4 * m.b714 + m.x804 == 0)
m.e952 = Constraint(expr= 3 * m.b715 + m.x805 == 0)
m.e953 = Constraint(expr= 9 * m.b716 + m.x806 == 0)
m.e954 = Constraint(expr= 3 * m.b717 + m.x807 == 0)
m.e955 = Constraint(expr= 7 * m.b718 + m.x808 == 0)
m.e956 = Constraint(expr= 2 * m.b719 + m.x809 == 0)
m.e957 = Constraint(expr= 9 * m.b720 + m.x810 == 0)
m.e958 = Constraint(expr= 3 * m.b721 + m.x811 == 0)
m.e959 = Constraint(expr= m.b722 + m.x812 == 0)
m.e960 = Constraint(expr= 9 * m.b723 + m.x813 == 0)
m.e961 = Constraint(expr= 2 * m.b724 + m.x814 == 0)
m.e962 = Constraint(expr= 6 * m.b725 + m.x815 == 0)
m.e963 = Constraint(expr= 3 * m.b726 + m.x816 == 0)
m.e964 = Constraint(expr= 4 * m.b727 + m.x817 == 0)
m.e965 = Constraint(expr= 8 * m.b728 + m.x818 == 0)
m.e966 = Constraint(expr= m.b729 + m.x819 == 0)
m.e967 = Constraint(expr= 2 * m.b730 + m.x820 == 0)
m.e968 = Constraint(expr= 5 * m.b731 + m.x821 == 0)
m.e969 = Constraint(expr= 2 * m.b732 + m.x822 == 0)
m.e970 = Constraint(expr= 3 * m.b733 + m.x823 == 0)
m.e971 = Constraint(expr= 4 * m.b734 + m.x824 == 0)
m.e972 = Constraint(expr= 3 * m.b735 + m.x825 == 0)
m.e973 = Constraint(expr= 5 * m.b736 + m.x826 == 0)
m.e974 = Constraint(expr= 7 * m.b737 + m.x827 == 0)
m.e975 = Constraint(expr= 6 * m.b738 + m.x828 == 0)
m.e976 = Constraint(expr= 2 * m.b739 + m.x829 == 0)
m.e977 = Constraint(expr= 8 * m.b740 + m.x830 == 0)
m.e978 = Constraint(expr= 4 * m.b741 + m.x831 == 0)
m.e979 = Constraint(expr= m.b742 + m.x832 == 0)
m.e980 = Constraint(expr= 4 * m.b743 + m.x833 == 0)
m.e981 = Constraint(expr= m.b744 + m.x834 == 0)
m.e982 = Constraint(expr= 2 * m.b745 + m.x835 == 0)
m.e983 = Constraint(expr= 5 * m.b746 + m.x836 == 0)
m.e984 = Constraint(expr= 2 * m.b747 + m.x837 == 0)
m.e985 = Constraint(expr= 9 * m.b748 + m.x838 == 0)
m.e986 = Constraint(expr= 2 * m.b749 + m.x839 == 0)
m.e987 = Constraint(expr= 9 * m.b750 + m.x840 == 0)
m.e988 = Constraint(expr= 5 * m.b751 + m.x841 == 0)
m.e989 = Constraint(expr= 8 * m.b752 + m.x842 == 0)
m.e990 = Constraint(expr= 4 * m.b753 + m.x843 == 0)
m.e991 = Constraint(expr= 2 * m.b754 + m.x844 == 0)
m.e992 = Constraint(expr= 3 * m.b755 + m.x845 == 0)
m.e993 = Constraint(expr= 8 * m.b756 + m.x846 == 0)
m.e994 = Constraint(expr= 10 * m.b757 + m.x847 == 0)
m.e995 = Constraint(expr= 6 * m.b758 + m.x848 == 0)
m.e996 = Constraint(expr= 3 * m.b759 + m.x849 == 0)
m.e997 = Constraint(expr= 4 * m.b760 + m.x850 == 0)
m.e998 = Constraint(expr= 8 * m.b761 + m.x851 == 0)
m.e999 = Constraint(expr= 7 * m.b762 + m.x852 == 0)
m.e1000 = Constraint(expr= 7 * m.b763 + m.x853 == 0)
m.e1001 = Constraint(expr= 3 * m.b764 + m.x854 == 0)
m.e1002 = Constraint(expr= 9 * m.b765 + m.x855 == 0)
m.e1003 = Constraint(expr= 4 * m.b766 + m.x856 == 0)
m.e1004 = Constraint(expr= 8 * m.b767 + m.x857 == 0)
m.e1005 = Constraint(expr= 6 * m.b768 + m.x858 == 0)
m.e1006 = Constraint(expr= 2 * m.b769 + m.x859 == 0)
m.e1007 = Constraint(expr= m.b770 + m.x860 == 0)
m.e1008 = Constraint(expr= 3 * m.b771 + m.x861 == 0)
m.e1009 = Constraint(expr= 8 * m.b772 + m.x862 == 0)
m.e1010 = Constraint(expr= 3 * m.b773 + m.x863 == 0)
m.e1011 = Constraint(expr= 4 * m.b774 + m.x864 == 0)
m.e1012 = Constraint(expr= m.b595 - m.b596 <= 0)
m.e1013 = Constraint(expr= m.b595 - m.b597 <= 0)
m.e1014 = Constraint(expr= m.b596 - m.b597 <= 0)
m.e1015 = Constraint(expr= m.b598 - m.b599 <= 0)
m.e1016 = Constraint(expr= m.b598 - m.b600 <= 0)
m.e1017 = Constraint(expr= m.b599 - m.b600 <= 0)
m.e1018 = Constraint(expr= m.b601 - m.b602 <= 0)
m.e1019 = Constraint(expr= m.b601 - m.b603 <= 0)
m.e1020 = Constraint(expr= m.b602 - m.b603 <= 0)
m.e1021 = Constraint(expr= m.b604 - m.b605 <= 0)
m.e1022 = Constraint(expr= m.b604 - m.b606 <= 0)
m.e1023 = Constraint(expr= m.b605 - m.b606 <= 0)
m.e1024 = Constraint(expr= m.b607 - m.b608 <= 0)
m.e1025 = Constraint(expr= m.b607 - m.b609 <= 0)
m.e1026 = Constraint(expr= m.b608 - m.b609 <= 0)
m.e1027 = Constraint(expr= m.b610 - m.b611 <= 0)
m.e1028 = Constraint(expr= m.b610 - m.b612 <= 0)
m.e1029 = Constraint(expr= m.b611 - m.b612 <= 0)
m.e1030 = Constraint(expr= m.b613 - m.b614 <= 0)
m.e1031 = Constraint(expr= m.b613 - m.b615 <= 0)
m.e1032 = Constraint(expr= m.b614 - m.b615 <= 0)
m.e1033 = Constraint(expr= m.b616 - m.b617 <= 0)
m.e1034 = Constraint(expr= m.b616 - m.b618 <= 0)
m.e1035 = Constraint(expr= m.b617 - m.b618 <= 0)
m.e1036 = Constraint(expr= m.b619 - m.b620 <= 0)
m.e1037 = Constraint(expr= m.b619 - m.b621 <= 0)
m.e1038 = Constraint(expr= m.b620 - m.b621 <= 0)
m.e1039 = Constraint(expr= m.b622 - m.b623 <= 0)
m.e1040 = Constraint(expr= m.b622 - m.b624 <= 0)
m.e1041 = Constraint(expr= m.b623 - m.b624 <= 0)
m.e1042 = Constraint(expr= m.b625 - m.b626 <= 0)
m.e1043 = Constraint(expr= m.b625 - m.b627 <= 0)
m.e1044 = Constraint(expr= m.b626 - m.b627 <= 0)
m.e1045 = Constraint(expr= m.b628 - m.b629 <= 0)
m.e1046 = Constraint(expr= m.b628 - m.b630 <= 0)
m.e1047 = Constraint(expr= m.b629 - m.b630 <= 0)
m.e1048 = Constraint(expr= m.b631 - m.b632 <= 0)
m.e1049 = Constraint(expr= m.b631 - m.b633 <= 0)
m.e1050 = Constraint(expr= m.b632 - m.b633 <= 0)
m.e1051 = Constraint(expr= m.b634 - m.b635 <= 0)
m.e1052 = Constraint(expr= m.b634 - m.b636 <= 0)
m.e1053 = Constraint(expr= m.b635 - m.b636 <= 0)
m.e1054 = Constraint(expr= m.b637 - m.b638 <= 0)
m.e1055 = Constraint(expr= m.b637 - m.b639 <= 0)
m.e1056 = Constraint(expr= m.b638 - m.b639 <= 0)
m.e1057 = Constraint(expr= m.b640 - m.b641 <= 0)
m.e1058 = Constraint(expr= m.b640 - m.b642 <= 0)
m.e1059 = Constraint(expr= m.b641 - m.b642 <= 0)
m.e1060 = Constraint(expr= m.b643 - m.b644 <= 0)
m.e1061 = Constraint(expr= m.b643 - m.b645 <= 0)
m.e1062 = Constraint(expr= m.b644 - m.b645 <= 0)
m.e1063 = Constraint(expr= m.b646 - m.b647 <= 0)
m.e1064 = Constraint(expr= m.b646 - m.b648 <= 0)
m.e1065 = Constraint(expr= m.b647 - m.b648 <= 0)
m.e1066 = Constraint(expr= m.b649 - m.b650 <= 0)
m.e1067 = Constraint(expr= m.b649 - m.b651 <= 0)
m.e1068 = Constraint(expr= m.b650 - m.b651 <= 0)
m.e1069 = Constraint(expr= m.b652 - m.b653 <= 0)
m.e1070 = Constraint(expr= m.b652 - m.b654 <= 0)
m.e1071 = Constraint(expr= m.b653 - m.b654 <= 0)
m.e1072 = Constraint(expr= m.b655 - m.b656 <= 0)
m.e1073 = Constraint(expr= m.b655 - m.b657 <= 0)
m.e1074 = Constraint(expr= m.b656 - m.b657 | |
encountered an EXE/SGATE overlap error.',
147: 'Formatter Correction Buffer underrun error.',
148: 'Formatter Correction Buffer overrun error.',
149: 'Formatted detected NRZ interface protocol error.',
150: 'Media Manager\xe2\x80\x99s MX Overrun error.',
151: 'Media Manager\xe2\x80\x99s NX Overrun error.',
152: 'Media Manager\xe2\x80\x99s TDT Request error.',
153: 'Media Manager\xe2\x80\x99s SST Overrun error.',
154: 'Servo PZT calibration failed.',
155: 'Fast I/O- Servo Data Update Timeout error.',
156: 'Fast I/O- First wedge Servo data Timeout error.',
157: 'Fast I/O- Max samples per collection exceeded.',
158: 'CR memory EDC error',
159: 'SP block detected an EDC error',
160: 'Preamp heater open/short fault.',
161: 'RW Channel fault- Memory buffer overflow or underflow or parity error during write.',
162: 'RW Channel fault- Memory buffer overflow or read data path FIFO underflow in legacy NRZ mode.',
163: 'RW Channel fault- Preamp fault during R/W.',
164: 'RW Channel fault- SGATE, RGATE, or WGATE overlap.',
165: 'RW Channel fault- Mismatch in split sector controls or sector size controls.',
166: 'RW Channel fault- Write clock or NRZ clock is not running.',
167: 'RW Channel fault- SGATE, RGATE, or WGATE asserted during calibration.',
168: 'RW Channel fault- RWBI changed during a read or write event.',
169: 'RW Channel fault- Mode overlap flag.',
170: 'RW Channel fault- Inappropriate WPLO or RPLO behavior.',
171: 'RW Channel fault- Write aborted.',
172: 'RW Channel fault- Bit count late.',
173: 'RW Channel fault- Servo overlap error',
174: 'RW Channel fault- Last data fault',
176: 'PES threshold in field is too far from the same value calculated in the factory.',
177: 'Not enough Harmonic Ratio samples were gathered',
178: 'Sigma of Harmonic Ratio samples after all discards exceeded the limit',
179: 'No EBMS contact fault, even at lowest threshold value',
180: 'EBMS fault still detected at highest threshold value',
181: 'Formatter detected BFI error.',
182: 'Formatter FIFO Interface error.',
183: 'Media sequencer- Disc sequencer Data transfer size mismatch.',
184: 'Correction buffer active while disc sequencer timeout error (this error code is used to fix the hardware skip mask read transfer issue).',
185: 'Seagate Iterative Decoder \xe2\x80\x93 Channel RSM fault',
186: 'Seagate Iterative Decoder \xe2\x80\x93 Channel WSM fault',
187: 'Seagate Iterative Decoder \xe2\x80\x93 Channel BCI fault',
188: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SRC fault',
189: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SAB fault',
190: 'Seagate Iterative Decoder \xe2\x80\x93 Channel read gate overflow error',
192: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB Bus B parity error',
193: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB buffer error on write',
194: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SOB buffer error on write',
195: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SOB parity error',
196: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SAB buffer error',
197: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SAB bend error',
198: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI buffer sync error',
199: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI data length error on write',
200: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI framing error on write',
201: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI write status error',
202: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI pipe state error (Bonanza), - Channel RSM Gross Error (Caribou- Luxor)',
203: 'Seagate Iterative Decoder \xe2\x80\x93 Channel decoder microcode error',
204: 'Seagate Iterative Decoder \xe2\x80\x93 Channel encoder microcode error',
205: 'Seagate Iterative Decoder \xe2\x80\x93 Channel NRZ parity error',
206: 'Seagate Iterative Decoder \xe2\x80\x93 Symbols per Sector mismatch error',
207: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB Bus A parity error',
208: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB NRZ parity error',
209: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SOB Buffer error on read',
210: 'Seagate Iterative Decoder \xe2\x80\x93 Channel SMB Buffer error on read',
211: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI data length error on read',
212: 'Seagate Iterative Decoder \xe2\x80\x93 Channel LLI framing error on read',
217: 'Seagate Iterative Decoder \xe2\x80\x93 Channel WSM Gross error',
218: 'Seagate Iterative Decoder \xe2\x80\x93 Channel ERF buffer error',
224: 'Preamp low voltage error',
225: 'Preamp low write data frequency at common point error',
226: 'Preamp write head open error',
227: 'Preamp write head shorted to ground error',
228: 'Preamp TA sensor open error',
229: 'Preamp temperature error',
230: 'Preamp write without heat error',
231: 'Preamp writer off in write error',
232: 'Preamp writer output buffer error',
233: 'Preamp low write data frequency at the head error',
234: 'Preamp FOS error',
235: 'Preamp TA or contact detect error',
236: 'Preamp SWOT error',
237: 'Preamp serial port communication error',
238: 'HSC magnitude overflow error',
240: 'RW Channel \xe2\x80\x93 RDATA valid overlap fault',
241: 'RW Channel \xe2\x80\x93 RD valid gap fault',
244: 'RW Channel \xe2\x80\x93 W Parity not ready',
247: 'RW Channel \xe2\x80\x93 Wrong sector length',
248: 'RW Channel \xe2\x80\x93 Encoder overflow error',
249: 'RW Channel \xe2\x80\x93 Encoder early termination fault',
250: 'RW Channel \xe2\x80\x93 Iteration parameter error',
251: 'RW Channel \xe2\x80\x93 MXP write fault',
252: 'RW Channel \xe2\x80\x93 Symbol count error',
253: 'RW Channel \xe2\x80\x93 RD Incomplete error',
254: 'RW Channel \xe2\x80\x93 RD Data VGA error',
255: 'RW Channel \xe2\x80\x93 RD Data TA error',
'L1': 'Hardware Error',
'L2': 'Internal Target Failure'},
1: {2: 'RW Channel \xe2\x80\x93 RFM Wrong sector length',
3: 'RW Channel \xe2\x80\x93 RFM FIFO underflow',
4: 'RW Channel \xe2\x80\x93 RFM FIFO Overflow',
5: 'RW Channel \xe2\x80\x93 Vector flow errors',
32: 'HSC - An error occurred when attempting to open the file to be used for Harmonic Sensor Circuitry data collection.',
33: 'HSC - The Standard Deviation of the VGAS data collected by the Harmonic Sensor Circuitry was zero.',
34: 'HSC - The Standard Deviation of the 3rd Harmonic data collected by the Harmonic Sensor Circuitry was zero.',
35: 'HSC - The Servo Loop Code returned at the completion of Harmonic Sensor Circuitry data collection was not 0.',
36: 'HSC - An invalid write pattern was specified Harmonic Sensor Circuitry data collection.',
37: 'AR Sensor - The AR Sensor DAC to Target calculation encountered the need to take the square root of a negative value.',
38: 'AR Sensor - The AR Sensor encountered an error when attempting to open the Background Task file.',
39: 'AR Sensor - The AR Sensor encountered an error when attempting to open the General Purpose Task file.',
40: "AR Sensor - The size of the Background Task file is inadequate to satisfy the AR Sensor's requirements.",
41: "AR Sensor - The size of the General Purpose Task file is inadequate to satisfy the AR Sensor's requirements.",
42: 'AR Sensor - The FAFH Parameter File revision is incompatible with the AR Sensor.',
43: 'AR Sensor - The AR Sensor Descriptor in the FAFH Parameter File is invalid.',
44: 'AR Sensor - The Iterative Call Index specified when invoking the AR Sensor exceeds the maximum supported value.',
45: 'AR Sensor - The AR Sensor encountered an error when performing a Track Position request.',
46: 'AR Sensor - The Servo Data Sample Count specified when invoking the AR Sensor exceeds the maximum supported value.',
47: 'AR Sensor - The AR Sensor encountered an error when attempting to set the read channel frequency.',
48: 'AR Sensor - The 3rd Harmonic value measured by the AR Sensor was 0.',
96: 'RW Channel - LOSSLOCKR fault',
97: 'RW Channel - BLICNT fault',
98: 'RW Channel - LLI ABORT fault',
99: 'RW Channel - WG FILLR fault',
100: 'RW Channel - WG FILLW fault',
101: 'RW Channel - CHAN fault',
102: 'RW Channel - FRAG NUM fault',
103: 'RW Channel - WTG fault',
104: 'RW Channel - CTG fault',
105: 'RW Channel -\xc2\xa0NZRCLR fault',
106: 'RW Channel - \xc2\xa0Read synthesizer prechange fail fault',
107: 'RW Channel -\xc2\xa0Servo synthesizer prechange fail fault',
108: 'RW Channel - Servo Error detected prior to halting Calibration Processor',
109: 'RW Channel -\xc2\xa0Unable to Halt Calibration Processor',
110: 'RW Channel -\xc2\xa0ADC Calibrations already disabled',
111: 'RW Channel -\xc2\xa0Calibration Processor Registers have already been saved',
112: 'RW Channel -\xc2\xa0Address where Calibration Processor Registers are to | |
can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object, the HTTP status code, and the headers.
If the method is called asynchronously,
returns the request thread.
:rtype: (ResourceListOfProcessedCommand, int, HTTPHeaderDict)
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'from_as_at',
'to_as_at',
'filter',
'page',
'limit'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_commands" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_commands`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_commands`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_commands`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_commands`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_commands`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_commands`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) > 16384): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_portfolio_commands`, length must be less than or equal to `16384`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_portfolio_commands`, length must be greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'filter' in local_var_params and not re.search(r'^[\s\S]*$', local_var_params['filter']): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_portfolio_commands`, must conform to the pattern `/^[\s\S]*$/`") # noqa: E501
if self.api_client.client_side_validation and ('page' in local_var_params and # noqa: E501
len(local_var_params['page']) > 500): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `get_portfolio_commands`, length must be less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('page' in local_var_params and # noqa: E501
len(local_var_params['page']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `get_portfolio_commands`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'page' in local_var_params and not re.search(r'^[a-zA-Z0-9\+\/]*={0,3}$', local_var_params['page']): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `get_portfolio_commands`, must conform to the pattern `/^[a-zA-Z0-9\+\/]*={0,3}$/`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `get_portfolio_commands`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `get_portfolio_commands`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'from_as_at' in local_var_params and local_var_params['from_as_at'] is not None: # noqa: E501
query_params.append(('fromAsAt', local_var_params['from_as_at'])) # noqa: E501
if 'to_as_at' in local_var_params and local_var_params['to_as_at'] is not None: # noqa: E501
query_params.append(('toAsAt', local_var_params['to_as_at'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3923'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfProcessedCommand",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/commands', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_portfolio_metadata(self, scope, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetPortfolioMetadata: Get access metadata rules for a portfolio # noqa: E501
Pass the scope and portfolio code parameters to retrieve the AccessMetadata associated with a portfolio # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_metadata(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio Access Metadata Rule to retrieve. (required)
:type scope: str
:param code: Portfolio code (required)
:type code: str
:param effective_at: The effectiveAt datetime at which to retrieve the access metadata rule.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the portfolio access metadata.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: dict(str, list[AccessMetadataValue])
"""
kwargs['_return_http_data_only'] = True
return self.get_portfolio_metadata_with_http_info(scope, code, **kwargs) # noqa: E501
def get_portfolio_metadata_with_http_info(self, scope, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetPortfolioMetadata: Get access metadata rules for a portfolio # noqa: E501
Pass the scope and portfolio code parameters to retrieve the AccessMetadata associated with a portfolio # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_metadata_with_http_info(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio Access Metadata Rule to retrieve. (required)
:type scope: str
:param code: Portfolio code (required)
:type code: str
:param effective_at: The effectiveAt datetime at which to retrieve the access metadata rule.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the portfolio access metadata.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object, the HTTP status code, and the headers.
If the method is called asynchronously,
returns the request thread.
:rtype: (dict(str, list[AccessMetadataValue]), int, HTTPHeaderDict)
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'effective_at',
'as_at'
| |
# coding=utf-8
# Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Tuple
import numpy as np
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from flax.traverse_util import flatten_dict, unflatten_dict
from jax import lax
from ...modeling_flax_outputs import (
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxBaseModelOutputWithPooling,
FlaxBaseModelOutputWithPoolingAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
FlaxMaskedLMOutput,
FlaxMultipleChoiceModelOutput,
FlaxNextSentencePredictorOutput,
FlaxQuestionAnsweringModelOutput,
FlaxSequenceClassifierOutput,
FlaxTokenClassifierOutput,
)
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
append_call_sample_docstring,
append_replace_return_docstrings,
overwrite_call_docstring,
)
from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
@flax.struct.dataclass
class FlaxBertForPreTrainingOutput(ModelOutput):
"""
Output type of [`BertForPreTraining`].
Args:
prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`jnp.ndarray` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
prediction_logits: jnp.ndarray = None
seq_relationship_logits: jnp.ndarray = None
hidden_states: Optional[Tuple[jnp.ndarray]] = None
attentions: Optional[Tuple[jnp.ndarray]] = None
BERT_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`BertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`numpy.ndarray` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
head_mask (`numpy.ndarray` of shape `({0})`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class FlaxBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.word_embeddings = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.position_embeddings = nn.Embed(
self.config.max_position_embeddings,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.token_type_embeddings = nn.Embed(
self.config.type_vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
# Embed
inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
position_embeds = self.position_embeddings(position_ids.astype("i4"))
token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
# Sum all embeddings
hidden_states = inputs_embeds + token_type_embeddings + position_embeds
# Layer Norm
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
return hidden_states
class FlaxBertSelfAttention(nn.Module):
config: BertConfig
causal: bool = False
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.head_dim = self.config.hidden_size // self.config.num_attention_heads
if self.config.hidden_size % self.config.num_attention_heads != 0:
raise ValueError(
"`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`\
: {self.config.num_attention_heads}"
)
self.query = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.key = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.value = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
@nn.compact
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
layer_head_mask,
key_value_states: Optional[jnp.array] = None,
init_cache: bool = False,
deterministic=True,
output_attentions: bool = False,
):
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.query(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.key(key_value_states)
value_states = self.value(key_value_states)
else:
# self_attention
key_states = self.key(hidden_states)
value_states = self.value(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length | |
<gh_stars>0
import numpy as np
import random
from collections import namedtuple, deque
from dqn.data_structures import SumSegmentTree, MinSegmentTree, MaxPriorityQueue
Experience = namedtuple("Experience", ["state_t", "action_t", "reward_tn", "state_tpn", "gamma_n"])
class Simple:
def __init__(self, capacity):
self._memories = deque(maxlen=capacity)
def store(self, state_t, action_t, reward_tn, state_tpn, gamma_n):
experience = Experience(state_t, action_t, reward_tn, state_tpn, gamma_n)
self._memories.append(experience)
def sample(self, batch_size):
# TODO: Perhaps ensure len(self._memories) >= batch_size
# Note: random.sample does not allow repeats. Do we want to allow them ?
return random.sample(self._memories, batch_size)
def __len__(self):
return len(self._memories)
# TODO: Think about storing the data directly in the segment trees, similar to how we do it for rank based
class Proportional:
def __init__(self, capacity, alpha_scheduler, beta_scheduler, epsilon=1e-5):
self._capacity = capacity # NOTE: The capacity here might be different than the segment trees' (the next power of 2). Does this cause any issues ? (I don't believe so)
self._alpha_scheduler = alpha_scheduler
self._beta_scheduler = beta_scheduler
self._sum_tree = SumSegmentTree(capacity)
self._min_tree = MinSegmentTree(capacity) # Is it more efficient to use a Min Priority Queue with a cap similar to the one in RankBased ?
self._epsilon = epsilon
self._memories = []
self._oldest_index = 0
self._max_priority = 1
def store(self, state_t, action_t, reward_tn, state_tpn, gamma_n):
experience = Experience(state_t, action_t, reward_tn, state_tpn, gamma_n)
if len(self._memories) < self._capacity:
self._memories.append(experience)
else:
self._memories[self._oldest_index] = experience
self._sum_tree[self._oldest_index] = self._max_priority
self._min_tree[self._oldest_index] = self._max_priority
self._oldest_index = (self._oldest_index + 1) % self._capacity
def sample(self, t, batch_size):
if len(self._memories) < batch_size:
raise RuntimeError("Not enough stored memories (" + str(len(self._memories)) + ") for batch_size of size " + str(batch_size))
total_priority_sum = self._sum_tree.sum()
segment_indexes = np.linspace(self._epsilon, total_priority_sum, batch_size + 1) # The smallest possible priority is of size |0| + eps = eps
indexes = []
for i in range(batch_size):
prefix_sum = np.random.uniform(low=segment_indexes[i], high=segment_indexes[i + 1])
indexes.append(self._sum_tree.prefix_sum_index(prefix_sum))
total_sum = self._sum_tree.sum()
sampled_probs = np.zeros(batch_size)
experiences = []
for i, index in enumerate(indexes):
prob = self._sum_tree[index] / total_sum
sampled_probs[i] = prob
experiences.append(self._memories[index])
min_prob = self._min_tree.min() / total_sum
beta = self._beta_scheduler.value(t)
max_weight = np.power(len(self._memories) * min_prob, -beta)
weights = np.power(len(self._memories) * sampled_probs, -beta) / max_weight
states_t, actions_t, rewards_tn, stats_tpn, gammas_n = zip(*experiences)
return states_t, actions_t, rewards_tn, stats_tpn, gammas_n, weights, indexes
def update_priorities(self, t, indexes, priorities):
alpha = self._alpha_scheduler.value(t)
priorities = np.abs(priorities) + self._epsilon # Note: Our implementation may not really be effected by removing an epsilon (uniform sampling from bounds.. unless a priority of 0 is on the end bounds and so prefix_sum never goes that far)
for index, priority in zip(indexes, priorities):
self._sum_tree[index] = priority**alpha
self._min_tree[index] = priority**alpha
self._max_priority = max(priority**alpha, self._max_priority)
def __len__(self):
return len(self._memories)
class RankBased:
# TODO: Precompute segments (requires a known batch_size = num_segments)
# Note the segments change based on N AND alpha
def __init__(self, capacity, alpha_scheduler, beta_scheduler, epsilon=1e-5, num_stores_until_sort=float('inf')):
self._alpha_scheduler = alpha_scheduler
self._beta_scheduler = beta_scheduler
self._priority_queue = MaxPriorityQueue(capacity)
self.num_stores_until_sort = num_stores_until_sort
self._stores_since_sort = 0
def _experience_probs(self, alpha):
# Returns probability of each experience in the priority queue by index (ordered) <-- make this clearer
ranks = np.arange(1, len(self._priority_queue) + 1)
priorities = 1 / ranks
powers = np.power(priorities, alpha)
probabilities = powers / np.sum(powers)
return probabilities
def _segment(self, probs, num_segments):
## TODO: Explain extra segment at end in doc-string (and says N + 1 numbers)
## TODO: Talk about how this algorithm isn't perfect: Note the addition of cdf part (either way) makes it strange
cdf = 0
prob_per_segment = 1 / num_segments
next_prob_boundary = prob_per_segment
segment_starts = [0]
for i in range(len(probs)):
if cdf >= next_prob_boundary:
segment_starts.append(i)
next_prob_boundary += prob_per_segment
cdf += probs[i]
segment_starts.append(len(self._priority_queue))
return segment_starts
def store(self, state_t, action_t, reward_tn, state_tpn, gamma_n):
experience = Experience(state_t, action_t, reward_tn, state_tpn, gamma_n)
max_priority = self._priority_queue.max_priority()
self._priority_queue.insert(max_priority, experience)
if self._stores_since_sort >= self.num_stores_until_sort:
self.sort()
self._stores_since_sort = 0
else:
self._stores_since_sort += 1
def sample(self, t, batch_size):
### TODO: Error when sampling without enough memories in storage
experiences = []
order_ids = []
sampled_probs = np.zeros(batch_size)
alpha = self._alpha_scheduler.value(t)
all_probs = self._experience_probs(alpha)
prob_segments = self._segment(all_probs, batch_size)
# Sample one transition from each segment (with each segment being of nearly equal probability)
for i in range(len(prob_segments) - 1):
index = random.randint(prob_segments[i], prob_segments[i + 1] - 1) # sample in range [start, next_start)
_, order_id, experience = self._priority_queue[index]
experiences.append(experience)
order_ids.append(order_id)
sampled_probs[i] = all_probs[index]
min_prob = all_probs[-1] # Note: This should eventually become a constant.. might be a faster method
beta = self._beta_scheduler.value(t)
max_weight = (len(self._priority_queue) * min_prob)**(-beta)
weights = np.power(len(self._priority_queue) * sampled_probs, -beta) / max_weight
states_t, actions_t, rewards_tn, stats_tpn, gammas_n = zip(*experiences)
return states_t, actions_t, rewards_tn, stats_tpn, gammas_n, weights, order_ids
def update_priorities(self, t, indexes, priorities):
priorities = np.abs(priorities)
self._priority_queue.update_priorities(indexes, priorities)
def sort(self):
self._priority_queue.sort()
def __len__(self):
return len(self._priority_queue)
##if __name__ == '__main__':
## test = Standard(capacity=5)
## test.store(1, 2, 3, 4, 0)
## test.store(4, 5, 6, 7, 0)
## test.store(8, 9, 10, 11, 0)
## test.store(12, 13, 14, 15, 0)
## test.store(16, 17, 18, 19, 0)
## print(test._memories)
## print(test.sample(3))
##if __name__ == '__main__':
## import annealing_schedules
## from data_structures import SumSegmentTree, MinSegmentTree, MaxPriorityQueue
##
## alpha_scheduler = annealing_schedules.Constant(0.7)
## beta_scheduler = annealing_schedules.Constant(0.5)
## test = RankBased(8, alpha_scheduler, beta_scheduler)
#### test = Proportional(8, alpha_scheduler, beta_scheduler)
## test.store(1, 2, 3, 4, 0)
## test.store(4, 5, 6, 7, 0)
## test.store(8, 9, 10, 11, 0)
## samples = test.sample(0, 3)
## test.update_priorities(2, samples[-1], [0.67, 1.23, 0.23])
## print(test.sample(1, 3))
## #print(test._max_priority)
###### The below (and remaining text) are notes on the rank based algorithm implementation and some commented out tests #####
## Notes on algorithm implementation:
## Two paragraphs from the paper: https://arxiv.org/pdf/1511.05952.pdf TODO: Put in name of paper
## For the rank-based variant, we can approximate the cumulative density function with a piecewise
## linear function with k segments of equal probability. The segment boundaries can be precomputed
## (they change only when N or α change). At runtime, we sample a segment, and then sample uniformly
## among the transitions within it. This works particularly well in conjunction with a minibatchbased
## learning algorithm: choose k to be the size of the minibatch, and sample exactly one transition
## from each segment – this is a form of stratified sampling that has the added advantage of balancing
## out the minibatch (there will always be exactly one transition with high magnitude δ, one with
## medium magnitude, etc).
## Our final
## solution was to store transitions in a priority queue implemented with an array-based binary heap.
## The heap array was then directly used as an approximation of a sorted array, which is infrequently
## sorted once every 10^6
## steps to prevent the heap becoming too unbalanced. This is an unconventional
## use of a binary heap, however our tests on smaller environments showed learning was unaffected
## compared to using a perfectly sorted array. This is likely due to the last-seen TD-error only being a
## proxy for the usefulness of a transition and our use of stochastic prioritized sampling. A small improvement
## in running time came from avoiding excessive recalculation of partitions for the sampling
## distribution. We reused the same partition for values of N that are close together and by updating
## α and β infrequently. Our final implementation for rank-based prioritization produced an additional
## 2%-4% increase in running time and negligible additional memory usage. This could be reduced
## further in a number of ways, e.g. with a more efficient heap implementation, but it was good enough
## for our experiments.
# Understanding the first paragraph: TODO: Perhaps put this right under the first paragraph
## The CDF can be used with a uniform distribution for sampling from a discrete "pdf"
# For example: X can be {1, 2, 3}. P(X = x) = [0.1, 0.4, 0.5] (i.e. P(X = 2) = 0.4)
# We sample from a uniform distribution u = U[0, 1]
# We then use the CDF and u to sample from our discrete distribution:
# if u < 0.1: return 1
# if u < 0.1 + 0.4: return 2
# if u < 0.1 + 0.4 + 0.5: return 3
# Note: The CDF is P(X < x) = [0.1, 0.1 + 0.4, 0.1 + 0.4 + 0.5] = [0.1, 0.5, 1]
# "we can approximate the cumulative density | |
which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class GetClusterConfigDownloadUrlResponse(_messages.Message):
r"""Response which contains a source location of the backup's cluster
configuration.
Fields:
signedUrl: Required. signed_url which will be used by the agent to
download cluster config.
"""
signedUrl = _messages.StringField(1)
class GetClusterConfigUploadUrlResponse(_messages.Message):
r"""Response which contains a target location for the backup's cluster
configuration.
Fields:
signedUrl: Required. signed_url which will be used by the agent to upload
cluster config.
"""
signedUrl = _messages.StringField(1)
class GkebackupProjectsLocationsBackupPlansBackupsCreateRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsCreateRequest object.
Fields:
backup: A Backup resource to be passed as the request body.
backupId: The client provided name when a Backup resource is created. The
name MUST satisfy ALL requirements listed below: a. be unique within the
BackupPlan to which the Backup is created. b. be 1-63 characters long c.
comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt) . d. matches
the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
parent: Required. Parent BackupPlan container to create Backup in. Format:
projects/{project}/locations/{location}/backupPlans/{backup_plan}
"""
backup = _messages.MessageField('Backup', 1)
backupId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsDeleteRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsDeleteRequest object.
Fields:
etag: etag, if provided, it must match the server's etag for the delete to
happen.
force: If set to true, any volumeBackups from this backup will also be
deleted. Otherwise, the request will only succeed if the backup has no
volumeBackup. (Per API guideline https://google.aip.dev/135#cascading-
delete)
name: Required. Name of the Backup resource. Format: projects/{project}/lo
cations/{location}/backupPlans/{backup_plan}/backups/{backup} The
{backup} field can either be the user specified Backup name or the
server generated UID for this backup.
"""
etag = _messages.StringField(1)
force = _messages.BooleanField(2)
name = _messages.StringField(3, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsGetClusterConfigDownloadUrlRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsGetClusterConfigDownloadUr
lRequest object.
Fields:
backup: Required. Full name of the Backup resource. Format: projects/{proj
ect}/locations/{location}/backupPlans/{backup_plan}/backups/{backup} The
{backup} field can either be the user specified Backup name or the
server generated UID for this backup.
"""
backup = _messages.StringField(1, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsGetClusterConfigUploadUrlRequest(_messages.Message):
r"""A
GkebackupProjectsLocationsBackupPlansBackupsGetClusterConfigUploadUrlRequest
object.
Fields:
backup: Required. Full name of the Backup resource. Format: projects/{proj
ect}/locations/{location}/backupPlans/{backup_plan}/backups/{backup} The
{backup} field can either be the user specified Backup name or the
server generated UID for this backup.
"""
backup = _messages.StringField(1, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsGetIamPolicyRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsGetIamPolicyRequest
object.
Fields:
options_requestedPolicyVersion: Optional. The policy format version to be
returned. Valid values are 0, 1, and 3. Requests specifying an invalid
value will be rejected. Requests for policies with any conditional
bindings must specify version 3. Policies without any conditional
bindings may specify any valid value or leave the field unset. To learn
which resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
options_requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32)
resource = _messages.StringField(2, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsGetRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsGetRequest object.
Fields:
name: Required. Full name of the Backup resource. Format: projects/{projec
t}/locations/{location}/backupPlans/{backup_plan}/backups/{backup} The
{backup} field can either be the user specified Backup name or the
server generated UID for this backup.
"""
name = _messages.StringField(1, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsListRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsListRequest object.
Fields:
filter: List filter.
orderBy: Sort results.
pageSize: If not specified, a default value will be chosen by the service.
Regardless of the page_size value, the response may include a partial
list and a caller should only rely on response's next_page_token to
determine if there are more instances left to be queried.
pageToken: Token returned by previous call to `ListBackupsRequest` which
specifies the position in the list from where to continue listing the
resources.
parent: Required. Parent BackupPlan container. Format:
projects/{project}/locations/{location}/backupPlans/{backup_plan}
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsPatchRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsPatchRequest object.
Fields:
backup: A Backup resource to be passed as the request body.
etag: etag, if provided, it must match the server's etag for the update to
happen.
name: Output only. The fully qualified name of the Backup.
projects/*/locations/*/backupPlans/*/backups/*
updateMask: Fields to be updated.
"""
backup = _messages.MessageField('Backup', 1)
etag = _messages.StringField(2)
name = _messages.StringField(3, required=True)
updateMask = _messages.StringField(4)
class GkebackupProjectsLocationsBackupPlansBackupsSetIamPolicyRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsSetIamPolicyRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class GkebackupProjectsLocationsBackupPlansBackupsSubmitClusterConfigRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsSubmitClusterConfigRequest
object.
Fields:
backup: Required. Full name of the Backup resource. Format: projects/{proj
ect}/locations/{location}/backupPlans/{backup_plan}/backups/{backup} The
{backup} field can either be the user specified Backup name or the
server generated UID for this backup.
submitClusterConfigRequest: A SubmitClusterConfigRequest resource to be
passed as the request body.
"""
backup = _messages.StringField(1, required=True)
submitClusterConfigRequest = _messages.MessageField('SubmitClusterConfigRequest', 2)
class GkebackupProjectsLocationsBackupPlansBackupsTestIamPermissionsRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsCreateRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsCreateRequest
object.
Fields:
parent: Required. The Backup to create VolumeBackup in. Format: projects/{
project}/locations/{location}/backupPlans/{backup_plan}/backups/{backup}
The {backup} field can either be the user specified Backup name or the
server generated UID for this backup.
volumeBackup: A VolumeBackup resource to be passed as the request body.
volumeBackupId: Required. The client provided name when a VolumeBackup
resource is created. The name MUST satisfy ALL requirements listed
below: a. be unique within the Backup to which the VolumeBackup is
created. b. be 1-63 characters long c. comply with
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) . d. matches the regular
expression `[a-z]([-a-z0-9]*[a-z0-9])?`
"""
parent = _messages.StringField(1, required=True)
volumeBackup = _messages.MessageField('VolumeBackup', 2)
volumeBackupId = _messages.StringField(3)
class GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsGetIamPolicyRequest(_messages.Message):
r"""A
GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsGetIamPolicyRequest
object.
Fields:
options_requestedPolicyVersion: Optional. The policy format version to be
returned. Valid values are 0, 1, and 3. Requests specifying an invalid
value will be rejected. Requests for policies with any conditional
bindings must specify version 3. Policies without any conditional
bindings may specify any valid value or leave the field unset. To learn
which resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
options_requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32)
resource = _messages.StringField(2, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsGetRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsGetRequest
object.
Fields:
name: Required. Full name of the VolumeBackup resource. Format: projects/{
project}/locations/{location}/backupPlans/{backup_plan}/backups/{backup}
/volumeBackups/{volume_backup} The {volume_backup} field can either be
the user specified Backup name or the server generated UID for this
volume backup.
"""
name = _messages.StringField(1, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsListRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsListRequest
object.
Fields:
filter: List filter.
orderBy: Sort results.
pageSize: If not specified, a default value will be chosen by the service.
Regardless of the page_size value, the response may include a partial
list and a caller should only rely on response's next_page_token to
determine if there are more instances left to be queried.
pageToken: Token returned by previous call to
ListVolumeBackupsRequest`which specifies the position in the list from
where to continue listing the resources.
parent: Required. Format: projects/{project}/locations/{location}/backupPl
ans/{backup_plan}/backups/{backup} The {backup} field can either be the
user specified Backup name or the server generated UID for this backup.
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsPatchRequest(_messages.Message):
r"""A GkebackupProjectsLocationsBackupPlansBackupsVolumeBackupsPatchRequest
object.
Fields:
etag: etag, if provided, it must match the server's etag for the update to
happen.
name: Output only. Full name of the volume backup which should be unique
within the Backup. The name will have a format:
projects/*/locations/*/backupPlans/*/backups/*/volumeBackups/* The last
segment of the name will have a format: 'pvc-'.
updateMask: Optional. Field mask is used to specify the fields to be
overwritten in the volume backup resource by this update. The fields
specified in the update_mask are relative to the resource, not the full
request. A field will be overwritten if it is in the mask. If the user
does not provide a mask then all fields will be overwritten. Fields to
| |
import torch
import torch.nn as nn
class Resnet50_256(nn.Module):
def __init__(self):
super(Resnet50_256, self).__init__()
self.meta = {'mean': [131.0912, 103.8827, 91.4953],
'std': [1, 1, 1],
'imageSize': [224, 224, 3]}
self.conv1_7x7_s2 = nn.Conv2d(3, 64, kernel_size=[7, 7], stride=(2, 2), padding=(3, 3), bias=False)
self.conv1_7x7_s2_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv1_relu_7x7_s2 = nn.ReLU(inplace=True)
self.pool1_3x3_s2 = nn.MaxPool2d(kernel_size=[3, 3], stride=[2, 2], padding=(0, 0), dilation=1, ceil_mode=True)
self.conv2_1_1x1_reduce = nn.Conv2d(64, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv2_1_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_1_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_3x3_relu = nn.ReLU(inplace=True)
self.conv2_1_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_1x1_proj = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_proj_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_relu = nn.ReLU(inplace=True)
self.conv2_2_1x1_reduce = nn.Conv2d(256, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_2_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv2_2_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_2_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_3x3_relu = nn.ReLU(inplace=True)
self.conv2_2_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_2_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_relu = nn.ReLU(inplace=True)
self.conv2_3_1x1_reduce = nn.Conv2d(256, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_3_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv2_3_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_3_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_3x3_relu = nn.ReLU(inplace=True)
self.conv2_3_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_3_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_relu = nn.ReLU(inplace=True)
self.conv3_1_1x1_reduce = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv3_1_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv3_1_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_1_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_3x3_relu = nn.ReLU(inplace=True)
self.conv3_1_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_1_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_1x1_proj = nn.Conv2d(256, 512, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv3_1_1x1_proj_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_relu = nn.ReLU(inplace=True)
self.conv3_2_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_2_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv3_2_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_2_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_3x3_relu = nn.ReLU(inplace=True)
self.conv3_2_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_2_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_relu = nn.ReLU(inplace=True)
self.conv3_3_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_3_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv3_3_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_3_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_3x3_relu = nn.ReLU(inplace=True)
self.conv3_3_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_3_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_relu = nn.ReLU(inplace=True)
self.conv3_4_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_4_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv3_4_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_4_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_3x3_relu = nn.ReLU(inplace=True)
self.conv3_4_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_4_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_relu = nn.ReLU(inplace=True)
self.conv4_1_1x1_reduce = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv4_1_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_1_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_1_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_3x3_relu = nn.ReLU(inplace=True)
self.conv4_1_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_1_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_1x1_proj = nn.Conv2d(512, 1024, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv4_1_1x1_proj_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_relu = nn.ReLU(inplace=True)
self.conv4_2_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_2_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_2_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_2_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_3x3_relu = nn.ReLU(inplace=True)
self.conv4_2_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_2_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_relu = nn.ReLU(inplace=True)
self.conv4_3_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_3_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_3_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_3_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_3x3_relu = nn.ReLU(inplace=True)
self.conv4_3_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_3_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_relu = nn.ReLU(inplace=True)
self.conv4_4_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_4_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_4_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_4_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_3x3_relu = nn.ReLU(inplace=True)
self.conv4_4_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_4_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_relu = nn.ReLU(inplace=True)
self.conv4_5_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_5_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_5_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_5_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_3x3_relu = nn.ReLU(inplace=True)
self.conv4_5_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_5_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_relu = nn.ReLU(inplace=True)
self.conv4_6_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_6_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_6_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_6_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_3x3_relu = nn.ReLU(inplace=True)
self.conv4_6_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_6_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_relu = nn.ReLU(inplace=True)
self.conv5_1_1x1_reduce = nn.Conv2d(1024, 512, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv5_1_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv5_1_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_1_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_3x3_relu = nn.ReLU(inplace=True)
self.conv5_1_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_1_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_1x1_proj = nn.Conv2d(1024, 2048, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv5_1_1x1_proj_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_relu = nn.ReLU(inplace=True)
self.conv5_2_1x1_reduce = nn.Conv2d(2048, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_2_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv5_2_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_2_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_3x3_relu = nn.ReLU(inplace=True)
self.conv5_2_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_2_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_relu = nn.ReLU(inplace=True)
self.conv5_3_1x1_reduce = nn.Conv2d(2048, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_3_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv5_3_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_3_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_3x3_relu = nn.ReLU(inplace=True)
self.conv5_3_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_3_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_relu = nn.ReLU(inplace=True)
self.pool5_7x7_s1 = nn.AdaptiveAvgPool2d(1)
self.feat_extract = nn.Conv2d(2048, 256, kernel_size=[1, 1], stride=(1, 1))
self.classifier_8631_f = nn.Conv2d(256, 8631, kernel_size=[1, 1], stride=(1, 1))
def forward(self, data):
conv1_7x7_s2 = self.conv1_7x7_s2(data)
conv1_7x7_s2_bn = self.conv1_7x7_s2_bn(conv1_7x7_s2)
conv1_7x7_s2_bnxx = self.conv1_relu_7x7_s2(conv1_7x7_s2_bn)
pool1_3x3_s2 = self.pool1_3x3_s2(conv1_7x7_s2_bnxx)
conv2_1_1x1_reduce = self.conv2_1_1x1_reduce(pool1_3x3_s2)
conv2_1_1x1_reduce_bn = self.conv2_1_1x1_reduce_bn(conv2_1_1x1_reduce)
conv2_1_1x1_reduce_bnxx = self.conv2_1_1x1_reduce_relu(conv2_1_1x1_reduce_bn)
conv2_1_3x3 = self.conv2_1_3x3(conv2_1_1x1_reduce_bnxx)
conv2_1_3x3_bn = self.conv2_1_3x3_bn(conv2_1_3x3)
conv2_1_3x3_bnxx = self.conv2_1_3x3_relu(conv2_1_3x3_bn)
conv2_1_1x1_increase = self.conv2_1_1x1_increase(conv2_1_3x3_bnxx)
conv2_1_1x1_increase_bn = self.conv2_1_1x1_increase_bn(conv2_1_1x1_increase)
conv2_1_1x1_proj = self.conv2_1_1x1_proj(pool1_3x3_s2)
conv2_1_1x1_proj_bn = self.conv2_1_1x1_proj_bn(conv2_1_1x1_proj)
conv2_1 = torch.add(conv2_1_1x1_proj_bn, 1, conv2_1_1x1_increase_bn)
conv2_1x = self.conv2_1_relu(conv2_1)
conv2_2_1x1_reduce = self.conv2_2_1x1_reduce(conv2_1x)
conv2_2_1x1_reduce_bn = self.conv2_2_1x1_reduce_bn(conv2_2_1x1_reduce)
conv2_2_1x1_reduce_bnxx = self.conv2_2_1x1_reduce_relu(conv2_2_1x1_reduce_bn)
conv2_2_3x3 = self.conv2_2_3x3(conv2_2_1x1_reduce_bnxx)
conv2_2_3x3_bn = self.conv2_2_3x3_bn(conv2_2_3x3)
conv2_2_3x3_bnxx = self.conv2_2_3x3_relu(conv2_2_3x3_bn)
conv2_2_1x1_increase = self.conv2_2_1x1_increase(conv2_2_3x3_bnxx)
conv2_2_1x1_increase_bn = self.conv2_2_1x1_increase_bn(conv2_2_1x1_increase)
conv2_2 = torch.add(conv2_1x, 1, conv2_2_1x1_increase_bn)
conv2_2x = self.conv2_2_relu(conv2_2)
conv2_3_1x1_reduce = self.conv2_3_1x1_reduce(conv2_2x)
conv2_3_1x1_reduce_bn = self.conv2_3_1x1_reduce_bn(conv2_3_1x1_reduce)
conv2_3_1x1_reduce_bnxx = self.conv2_3_1x1_reduce_relu(conv2_3_1x1_reduce_bn)
conv2_3_3x3 = self.conv2_3_3x3(conv2_3_1x1_reduce_bnxx)
conv2_3_3x3_bn = self.conv2_3_3x3_bn(conv2_3_3x3)
conv2_3_3x3_bnxx = self.conv2_3_3x3_relu(conv2_3_3x3_bn)
conv2_3_1x1_increase = self.conv2_3_1x1_increase(conv2_3_3x3_bnxx)
conv2_3_1x1_increase_bn = self.conv2_3_1x1_increase_bn(conv2_3_1x1_increase)
conv2_3 = torch.add(conv2_2x, 1, conv2_3_1x1_increase_bn)
conv2_3x = self.conv2_3_relu(conv2_3)
conv3_1_1x1_reduce = self.conv3_1_1x1_reduce(conv2_3x)
conv3_1_1x1_reduce_bn = self.conv3_1_1x1_reduce_bn(conv3_1_1x1_reduce)
conv3_1_1x1_reduce_bnxx = self.conv3_1_1x1_reduce_relu(conv3_1_1x1_reduce_bn)
conv3_1_3x3 = self.conv3_1_3x3(conv3_1_1x1_reduce_bnxx)
conv3_1_3x3_bn = self.conv3_1_3x3_bn(conv3_1_3x3)
conv3_1_3x3_bnxx = self.conv3_1_3x3_relu(conv3_1_3x3_bn)
conv3_1_1x1_increase = self.conv3_1_1x1_increase(conv3_1_3x3_bnxx)
conv3_1_1x1_increase_bn = self.conv3_1_1x1_increase_bn(conv3_1_1x1_increase)
conv3_1_1x1_proj = self.conv3_1_1x1_proj(conv2_3x)
conv3_1_1x1_proj_bn = self.conv3_1_1x1_proj_bn(conv3_1_1x1_proj)
conv3_1 = torch.add(conv3_1_1x1_proj_bn, 1, conv3_1_1x1_increase_bn)
conv3_1x = self.conv3_1_relu(conv3_1)
conv3_2_1x1_reduce = self.conv3_2_1x1_reduce(conv3_1x)
conv3_2_1x1_reduce_bn = self.conv3_2_1x1_reduce_bn(conv3_2_1x1_reduce)
conv3_2_1x1_reduce_bnxx = self.conv3_2_1x1_reduce_relu(conv3_2_1x1_reduce_bn)
conv3_2_3x3 = self.conv3_2_3x3(conv3_2_1x1_reduce_bnxx)
conv3_2_3x3_bn = self.conv3_2_3x3_bn(conv3_2_3x3)
conv3_2_3x3_bnxx = self.conv3_2_3x3_relu(conv3_2_3x3_bn)
conv3_2_1x1_increase = self.conv3_2_1x1_increase(conv3_2_3x3_bnxx)
conv3_2_1x1_increase_bn = self.conv3_2_1x1_increase_bn(conv3_2_1x1_increase)
conv3_2 = torch.add(conv3_1x, 1, conv3_2_1x1_increase_bn)
conv3_2x = self.conv3_2_relu(conv3_2)
conv3_3_1x1_reduce = self.conv3_3_1x1_reduce(conv3_2x)
conv3_3_1x1_reduce_bn = self.conv3_3_1x1_reduce_bn(conv3_3_1x1_reduce)
conv3_3_1x1_reduce_bnxx = self.conv3_3_1x1_reduce_relu(conv3_3_1x1_reduce_bn)
conv3_3_3x3 = | |
GROUP (ORDER BY mytable.name DESC) "
"OVER (PARTITION BY mytable.name ORDER BY mytable.myid "
"ROWS BETWEEN :param_1 FOLLOWING AND :param_2 FOLLOWING) "
"AS anon_1 FROM mytable",
)
def test_date_between(self):
import datetime
table = Table("dt", metadata, Column("date", Date))
self.assert_compile(
table.select(
table.c.date.between(
datetime.date(2006, 6, 1), datetime.date(2006, 6, 5)
)
),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={
"date_1": datetime.date(2006, 6, 1),
"date_2": datetime.date(2006, 6, 5),
},
)
self.assert_compile(
table.select(
sql.between(
table.c.date,
datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5),
)
),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={
"date_1": datetime.date(2006, 6, 1),
"date_2": datetime.date(2006, 6, 5),
},
)
def test_delayed_col_naming(self):
my_str = Column(String)
sel1 = select([my_str])
assert_raises_message(
exc.InvalidRequestError,
"Cannot initialize a sub-selectable with this Column",
lambda: sel1.c,
)
# calling label or as_scalar doesn't compile
# anything.
sel2 = select([func.substr(my_str, 2, 3)]).label("my_substr")
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
sel2.compile,
dialect=default.DefaultDialect(),
)
sel3 = select([my_str]).as_scalar()
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
sel3.compile,
dialect=default.DefaultDialect(),
)
my_str.name = "foo"
self.assert_compile(sel1, "SELECT foo")
self.assert_compile(
sel2, "(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)"
)
self.assert_compile(sel3, "(SELECT foo)")
def test_naming(self):
# TODO: the part where we check c.keys() are not "compile" tests, they
# belong probably in test_selectable, or some broken up
# version of that suite
f1 = func.hoho(table1.c.name)
s1 = select(
[
table1.c.myid,
table1.c.myid.label("foobar"),
f1,
func.lala(table1.c.name).label("gg"),
]
)
eq_(list(s1.c.keys()), ["myid", "foobar", str(f1), "gg"])
meta = MetaData()
t1 = Table("mytable", meta, Column("col1", Integer))
exprs = (
table1.c.myid == 12,
func.hoho(table1.c.myid),
cast(table1.c.name, Numeric),
literal("x"),
)
for col, key, expr, lbl in (
(table1.c.name, "name", "mytable.name", None),
(exprs[0], str(exprs[0]), "mytable.myid = :myid_1", "anon_1"),
(exprs[1], str(exprs[1]), "hoho(mytable.myid)", "hoho_1"),
(
exprs[2],
str(exprs[2]),
"CAST(mytable.name AS NUMERIC)",
"anon_1",
),
(t1.c.col1, "col1", "mytable.col1", None),
(
column("some wacky thing"),
"some wacky thing",
'"some wacky thing"',
"",
),
(exprs[3], exprs[3].key, ":param_1", "anon_1"),
):
if getattr(col, "table", None) is not None:
t = col.table
else:
t = table1
s1 = select([col], from_obj=t)
assert list(s1.c.keys()) == [key], list(s1.c.keys())
if lbl:
self.assert_compile(
s1, "SELECT %s AS %s FROM mytable" % (expr, lbl)
)
else:
self.assert_compile(s1, "SELECT %s FROM mytable" % (expr,))
s1 = select([s1])
if lbl:
self.assert_compile(
s1,
"SELECT %s FROM (SELECT %s AS %s FROM mytable)"
% (lbl, expr, lbl),
)
elif col.table is not None:
# sqlite rule labels subquery columns
self.assert_compile(
s1,
"SELECT %s FROM (SELECT %s AS %s FROM mytable)"
% (key, expr, key),
)
else:
self.assert_compile(
s1,
"SELECT %s FROM (SELECT %s FROM mytable)" % (expr, expr),
)
def test_hints(self):
s = select([table1.c.myid]).with_hint(table1, "test hint %(name)s")
s2 = (
select([table1.c.myid])
.with_hint(table1, "index(%(name)s idx)", "oracle")
.with_hint(table1, "WITH HINT INDEX idx", "sybase")
)
a1 = table1.alias()
s3 = select([a1.c.myid]).with_hint(a1, "index(%(name)s hint)")
subs4 = (
select([table1, table2])
.select_from(
table1.join(table2, table1.c.myid == table2.c.otherid)
)
.with_hint(table1, "hint1")
)
s4 = (
select([table3])
.select_from(
table3.join(subs4, subs4.c.othername == table3.c.otherstuff)
)
.with_hint(table3, "hint3")
)
t1 = table("QuotedName", column("col1"))
s6 = (
select([t1.c.col1])
.where(t1.c.col1 > 10)
.with_hint(t1, "%(name)s idx1")
)
a2 = t1.alias("SomeName")
s7 = (
select([a2.c.col1])
.where(a2.c.col1 > 10)
.with_hint(a2, "%(name)s idx1")
)
mysql_d, oracle_d, sybase_d = (
mysql.dialect(),
oracle.dialect(),
sybase.dialect(),
)
for stmt, dialect, expected in [
(s, mysql_d, "SELECT mytable.myid FROM mytable test hint mytable"),
(
s,
oracle_d,
"SELECT /*+ test hint mytable */ mytable.myid FROM mytable",
),
(
s,
sybase_d,
"SELECT mytable.myid FROM mytable test hint mytable",
),
(s2, mysql_d, "SELECT mytable.myid FROM mytable"),
(
s2,
oracle_d,
"SELECT /*+ index(mytable idx) */ mytable.myid FROM mytable",
),
(
s2,
sybase_d,
"SELECT mytable.myid FROM mytable WITH HINT INDEX idx",
),
(
s3,
mysql_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)",
),
(
s3,
oracle_d,
"SELECT /*+ index(mytable_1 hint) */ mytable_1.myid FROM "
"mytable mytable_1",
),
(
s3,
sybase_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)",
),
(
s4,
mysql_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 INNER JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 INNER "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff",
),
(
s4,
sybase_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff",
),
(
s4,
oracle_d,
"SELECT /*+ hint3 */ thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable JOIN (SELECT /*+ hint1 */ mytable.myid,"
" mytable.name, mytable.description, myothertable.otherid,"
" myothertable.othername FROM mytable JOIN myothertable ON"
" mytable.myid = myothertable.otherid) ON othername ="
" thirdtable.otherstuff",
),
# TODO: figure out dictionary ordering solution here
# (s5, oracle_d,
# "SELECT /*+ hint3 */ /*+ hint1 */ thirdtable.userid, "
# "thirdtable.otherstuff "
# "FROM thirdtable JOIN (SELECT mytable.myid,"
# " mytable.name, mytable.description, myothertable.otherid,"
# " myothertable.othername FROM mytable JOIN myothertable ON"
# " mytable.myid = myothertable.otherid) ON othername ="
# " thirdtable.otherstuff"),
(
s6,
oracle_d,
"""SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """
"""FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1""",
),
(
s7,
oracle_d,
"""SELECT /*+ "SomeName" idx1 */ "SomeName".col1 FROM """
""""QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1""",
),
]:
self.assert_compile(stmt, expected, dialect=dialect)
def test_statement_hints(self):
stmt = (
select([table1.c.myid])
.with_statement_hint("test hint one")
.with_statement_hint("test hint two", "mysql")
)
self.assert_compile(
stmt, "SELECT mytable.myid FROM mytable test hint one"
)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable test hint one test hint two",
dialect="mysql",
)
def test_literal_as_text_fromstring(self):
self.assert_compile(and_(text("a"), text("b")), "a AND b")
def test_literal_as_text_nonstring_raise(self):
assert_raises(exc.ArgumentError, and_, ("a",), ("b",))
class BindParameterTest(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "default"
def test_binds(self):
for (
stmt,
expected_named_stmt,
expected_positional_stmt,
expected_default_params_dict,
expected_default_params_list,
test_param_dict,
expected_test_params_dict,
expected_test_params_list,
) in [
(
select(
[table1, table2],
and_(
table1.c.myid == table2.c.otherid,
table1.c.name == bindparam("mytablename"),
),
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid "
"AND mytable.name = :mytablename",
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid AND "
"mytable.name = ?",
{"mytablename": None},
[None],
{"mytablename": 5},
{"mytablename": 5},
[5],
),
(
select(
[table1],
or_(
table1.c.myid == bindparam("myid"),
table2.c.otherid == bindparam("myid"),
),
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = :myid "
"OR myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{"myid": None},
[None, None],
{"myid": 5},
{"myid": 5},
[5, 5],
),
(
text(
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid"
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? OR "
"myothertable.otherid = ?",
{"myid": None},
[None, None],
{"myid": 5},
{"myid": 5},
[5, 5],
),
(
select(
[table1],
or_(
table1.c.myid == bindparam("myid", unique=True),
table2.c.otherid == bindparam("myid", unique=True),
),
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{"myid_1": None, "myid_2": None},
[None, None],
{"myid_1": 5, "myid_2": 6},
{"myid_1": 5, "myid_2": 6},
[5, 6],
),
(
bindparam("test", type_=String, required=False) + text("'hi'"),
":test || 'hi'",
"? || 'hi'",
{"test": None},
[None],
{},
{"test": None},
[None],
),
(
# testing select.params() here - bindparam() objects
# must get required flag set to False
select(
[table1],
or_(
table1.c.myid == bindparam("myid"),
table2.c.otherid == bindparam("myotherid"),
),
).params({"myid": 8, "myotherid": 7}),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid OR myothertable.otherid = :myotherid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{"myid": 8, "myotherid": 7},
[8, 7],
{"myid": 5},
{"myid": 5, "myotherid": 7},
[5, 7],
),
(
select(
[table1],
or_(
table1.c.myid
== bindparam("myid", value=7, unique=True),
table2.c.otherid
== bindparam("myid", value=8, unique=True),
),
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, | |
<filename>backend/portal/summoners/views.py
import boto
import json
import random
import string
from boto.sqs.connection import SQSConnection
from boto.sqs.message import RawMessage
from cassiopeia.type.api.exception import APIError
from datetime import datetime
from django.contrib.auth import hashers
from django.core.cache import cache
from django.core.mail import EmailMessage
from django.db.utils import IntegrityError
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from portal.errors import FRIEND_ALREADY_LISTED
from portal.errors import FRIEND_EQUALS_USER
from portal.errors import FRIEND_LIMIT_REACHED
from portal.errors import INTERNAL_PROCESSING_ERROR
from portal.errors import INVALID_CREDENTIALS
from portal.errors import INVALID_REQUEST_FORMAT
from portal.errors import INVALID_RIOT_RESPONSE
from portal.errors import RUNE_PAGE_CODE_NOT_FOUND
from portal.errors import SUMMONER_ALREADY_REGISTERED
from portal.errors import SUMMONER_DOES_NOT_EXIST
from portal.errors import SUMMONER_NOT_IN_DATABASE
from portal.errors import SUMMONER_NOT_RANKED
from portal.errors import SUMMONER_NOT_REGISTERED
from portal.keys import AWS_ACCESS_KEY_ID
from portal.keys import AWS_SECRET_ACCESS_KEY
from portal.riot import format_key
from portal.riot import riot_request
from summoners.models import Summoner
from summoners.models import User
from summoners.serializers import summoner_serializer
@require_POST
def add_friend(request):
# extract data
data = json.loads(request.body.decode('utf-8'))
region = data.get("region")
user_key = data.get("user_key")
friend_key = data.get("friend_key")
# ensure the data is valid
if None in (region, user_key, friend_key):
return HttpResponse(json.dumps(INVALID_REQUEST_FORMAT))
# ensure proper key format
user_key = format_key(user_key)
friend_key = format_key(friend_key)
# make sure friend is not the user
if user_key == friend_key:
return HttpResponse(json.dumps(FRIEND_EQUALS_USER))
try:
# get the users summoner object
user_o = cache.get(region + user_key + "summoner")
if user_o is None:
user_o = Summoner.objects.get(region=region, key=user_key)
cache.set(region + user_key + "summoner", user_o, None)
Summoner.objects.filter(pk=user_o.pk).update(accessed=datetime.now())
except Summoner.DoesNotExist:
return HttpResponse(json.dumps(SUMMONER_NOT_IN_DATABASE))
# check if user is at friend limit or if friend is already listed
if user_o.friends is not None:
friends = user_o.friends.split(",")
if len(friends) >= 20:
return HttpResponse(json.dumps(FRIEND_LIMIT_REACHED))
for friend in friends:
if friend == friend_key:
return HttpResponse(json.dumps(FRIEND_ALREADY_LISTED))
try:
# get the friends summoner object
friend_o = cache.get(region + friend_key + "summoner")
if friend_o is None:
friend_o = Summoner.objects.get(region=region, key=friend_key)
cache.set(region + friend_key + "summoner", friend_o, None)
Summoner.objects.filter(pk=friend_o.pk).update(accessed=datetime.now())
except Summoner.DoesNotExist:
try:
# summoner not in database, request summoner data from riot
args = {"request": 1, "key": friend_key}
riot_response = riot_request(region, args)
except APIError as e:
if e.error_code == 404:
return HttpResponse(json.dumps(SUMMONER_DOES_NOT_EXIST))
else:
return HttpResponse(json.dumps(INVALID_RIOT_RESPONSE))
try:
# extract the summoner
friend = riot_response.get(friend_key)
except AttributeError:
return HttpResponse(json.dumps(INVALID_RIOT_RESPONSE))
try:
# use the summoner id to get the friends league information
args = {"request": 4, "summoner_ids": friend.id}
riot_response = riot_request(region, args)
except APIError as e:
if e.error_code == 404:
return HttpResponse(json.dumps(SUMMONER_NOT_RANKED))
else:
return HttpResponse(json.dumps(INVALID_RIOT_RESPONSE))
try:
# extract the league data
leagues = riot_response.get(str(friend.id))
# iterate over the leagues looking for the dynamic queue league
league = None
for item in leagues:
if item.queue == "RANKED_SOLO_5x5":
league = item
# ensure the dynamic queue league was found
if league is None:
return HttpResponse(json.dumps(SUMMONER_NOT_RANKED))
# iterate over the league entries to get more detailed information
division, lp, wins, losses, series = None, None, None, None, ""
for entry in league.entries:
if entry.playerOrTeamId == str(friend.id):
division = entry.division
lp = entry.leaguePoints
wins = entry.wins
losses = entry.losses
if entry.miniSeries is not None:
series = entry.miniSeries.progress
except AttributeError:
return HttpResponse(json.dumps(INVALID_RIOT_RESPONSE))
try:
# use the gathered information to create a summoner object
friend_o = Summoner.objects.create(
region=region,
key=friend_key,
name=friend.name,
summoner_id=friend.id,
tier=league.tier,
division=division,
lp=lp,
wins=wins,
losses=losses,
series=series,
profile_icon=friend.profileIconId)
except IntegrityError:
return HttpResponse(json.dumps(INTERNAL_PROCESSING_ERROR))
# update the newly created summoner
conn = SQSConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
queue = conn.get_queue("portal")
message = RawMessage()
message.set_body(json.dumps({"region": region, "keys": [friend_key]}))
queue.write(message)
# add the friends key to the users friend list
if user_o.friends != "":
user_o.friends += "," + friend_key
else:
user_o.friends = friend_key
Summoner.objects.filter(pk=user_o.pk).update(friends=user_o.friends)
cache.set(region + user_key + "summoner", user_o, None)
# return the friends summoner object
return HttpResponse(summoner_serializer(friend_o, None, False))
@require_POST
def add_friend_1_1(request):
# extract data
data = json.loads(request.body.decode('utf-8'))
region = data.get("region")
key = data.get("key")
# ensure the data is valid
if None in (region, key):
return HttpResponse(json.dumps(INVALID_REQUEST_FORMAT))
# ensure proper key format
key = format_key(key)
try:
# get the summoner object
summoner_o = cache.get(region + key + "summoner")
if summoner_o is None:
summoner_o = Summoner.objects.get(region=region, key=key)
cache.set(region + key + "summoner", summoner_o, None)
Summoner.objects.filter(pk=summoner_o.pk).update(accessed=datetime.now())
# return the summoner object
return HttpResponse(summoner_serializer(summoner_o, None, False))
except Summoner.DoesNotExist:
pass
try:
# summoner not in database, request summoner data from riot
args = {"request": 1, "key": key}
riot_response = riot_request(region, args)
except APIError as e:
if e.error_code == 404:
return HttpResponse(json.dumps(SUMMONER_DOES_NOT_EXIST))
else:
return HttpResponse(json.dumps(INVALID_RIOT_RESPONSE))
try:
# extract the summoner
summoner = riot_response.get(key)
# extract summoner fields
summoner_id = summoner.id
name = summoner.name
profile_icon = summoner.profileIconId
except AttributeError:
return HttpResponse(json.dumps(INVALID_RIOT_RESPONSE))
try:
# use summoner id to get league information
args = {"request": 4, "summoner_ids": summoner_id}
riot_response = riot_request(region, args)
except APIError as e:
if e.error_code == 404:
return HttpResponse(json.dumps(SUMMONER_NOT_RANKED))
else:
return HttpResponse(json.dumps(INVALID_RIOT_RESPONSE))
try:
# extract the league data
leagues = riot_response.get(str(summoner_id))
# iterate over the leagues looking for the dynamic queue league
league = None
for item in leagues:
if item.queue == "RANKED_SOLO_5x5":
league = item
# ensure the dynamic queue league was found
if league is None:
return HttpResponse(json.dumps(SUMMONER_NOT_RANKED))
# iterate over the league entries to get more detailed information
division, lp, wins, losses, series = None, None, None, None, ""
for entry in league.entries:
if entry.playerOrTeamId == str(summoner_id):
division = entry.division
lp = entry.leaguePoints
wins = entry.wins
losses = entry.losses
if entry.miniSeries is not None:
series = entry.miniSeries.progress
# extract the tier information
tier = league.tier
except AttributeError:
return HttpResponse(json.dumps(INVALID_RIOT_RESPONSE))
try:
# use the gathered information to create a summoner object
summoner_o = Summoner.objects.create(
region=region,
key=key,
name=name,
summoner_id=summoner_id,
tier=tier,
division=division,
lp=lp,
wins=wins,
losses=losses,
series=series,
profile_icon=profile_icon)
except IntegrityError:
return HttpResponse(json.dumps(INTERNAL_PROCESSING_ERROR))
# update the newly created summoner
conn = SQSConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
queue = conn.get_queue("portal")
message = RawMessage()
message.set_body(json.dumps({"region": region, "keys": [key]}))
queue.write(message)
# return the summoner object
return HttpResponse(summoner_serializer(summoner_o, None, False))
@require_POST
def change_email(request):
# extract data
data = json.loads(request.body.decode('utf-8'))
region = data.get("region")
key = data.get("key")
password = data.get("password")
new_email = data.get("new_email")
# ensure the data is valid
if None in (region, key, password, new_email):
return HttpResponse(json.dumps(INVALID_REQUEST_FORMAT))
# ensure proper key format
key = format_key(key)
try:
# get summoner object
summoner_o = cache.get(region + key + "summoner")
if summoner_o is None:
summoner_o = Summoner.objects.get(region=region, key=key)
cache.set(region + key + "summoner", summoner_o, None)
Summoner.objects.filter(pk=summoner_o.pk).update(accessed=datetime.now())
except Summoner.DoesNotExist:
return HttpResponse(json.dumps(SUMMONER_NOT_IN_DATABASE))
# make sure user object exists
if summoner_o.user is None:
return HttpResponse(json.dumps(SUMMONER_NOT_REGISTERED))
# ensure password is correct
if not hashers.check_password(password, summoner_o.user.password):
return HttpResponse(json.dumps(INVALID_CREDENTIALS))
# change email
User.objects.filter(pk=summoner_o.user.pk).update(email=new_email)
# return the users summoner object with the email included
return HttpResponse(summoner_serializer(summoner_o, new_email, False))
@require_POST
def change_password(request):
# extract data
data = json.loads(request.body.decode('utf-8'))
region = data.get("region")
key = data.get("key")
current_password = data.get("current_password")
new_password = data.get("new_password")
# ensure the data is valid
if None in (region, key, current_password, new_password):
return HttpResponse(json.dumps(INVALID_REQUEST_FORMAT))
# ensure proper key format
key = format_key(key)
try:
# get summoner object
summoner_o = cache.get(region + key + "summoner")
if summoner_o is None:
summoner_o = Summoner.objects.get(region=region, key=key)
cache.set(region + key + "summoner", summoner_o, None)
Summoner.objects.filter(pk=summoner_o.pk).update(accessed=datetime.now())
except Summoner.DoesNotExist:
return HttpResponse(json.dumps(SUMMONER_NOT_IN_DATABASE))
# make sure user object exists
if summoner_o.user is None:
return HttpResponse(json.dumps(SUMMONER_NOT_REGISTERED))
# make sure entered password is correct password
if not hashers.check_password(current_password, summoner_o.user.password):
return HttpResponse(json.dumps(INVALID_CREDENTIALS))
# change password
User.objects.filter(pk=summoner_o.user.pk).update(password=<PASSWORD>(<PASSWORD>))
# return the users summoner object
return HttpResponse(summoner_serializer(summoner_o, None, False))
@require_POST
def get_summoners(request):
# extract data
data = json.loads(request.body.decode('utf-8'))
region = data.get("region")
keys = data.get("keys")
# ensure the data is valid
if None in (region, keys):
return HttpResponse(json.dumps(INVALID_REQUEST_FORMAT))
# initialize empty list for requested summoner objects
summoners_o = []
# iterate over each key
for key in keys:
# ensure proper key format
key = format_key(key)
try:
# get summoner object
summoner_o = cache.get(region + key + "summoner")
if summoner_o is None:
summoner_o = Summoner.objects.get(region=region, key=key)
cache.set(region + key + "summoner", summoner_o, None)
Summoner.objects.filter(pk=summoner_o.pk).update(accessed=datetime.now())
# append summoner object to list
summoners_o.append(summoner_o)
except Summoner.DoesNotExist:
return HttpResponse(json.dumps(SUMMONER_NOT_IN_DATABASE))
# remove duplicates
summoners_o = set(summoners_o)
# return the requested summoner objects
return HttpResponse(summoner_serializer(summoners_o, None, True))
@require_POST
def login_user(request):
# extract data
data = json.loads(request.body.decode('utf-8'))
region = data.get("region")
key = data.get("key")
password = data.get("password")
# ensure the data is valid
if None in (region, key, password):
return HttpResponse(json.dumps(INVALID_REQUEST_FORMAT))
# ensure proper key format
key = format_key(key)
try:
# get the summoner object
summoner_o = cache.get(region + key + "summoner")
if summoner_o is None:
summoner_o = Summoner.objects.get(region=region, key=key)
cache.set(region + key + "summoner", summoner_o, None)
Summoner.objects.filter(pk=summoner_o.pk).update(accessed=datetime.now())
except Summoner.DoesNotExist:
return HttpResponse(json.dumps(SUMMONER_NOT_IN_DATABASE))
# make sure user object exists
if summoner_o.user | |
Adv.'
],
ylim: (0, 100),
xlim: (0, 0.05),
is_log: False,
}
robust_non_adaptive2 = {ylabel: "Test Accuracy (%)",
file_name: "distortion_robust_net_non_adaptive2",
title: "C&W L$_2$ non-adaptive",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: ['PlainNet', 'RobustNet', 'FC', 'BandLimit'],
xlim: (-0.05, 1.15),
ylim: (0, 100),
xlabel: '$L_2$ distortion',
is_log: False}
robust_adaptive2 = {ylabel: "Test Accuracy (%)",
file_name: "distortion_robust_net2",
title: "C&W L$_2$ adaptive",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 6,
legend_cols: 1,
# labels: ['plain', 'robust\n0.2 0.1', 'fft 50%'],
labels: ['PlainNet', 'RobustNet', 'FC'],
xlim: (-0.05, 1.15),
ylim: (0, 100),
xlabel: '$L_2$ distortion',
is_log: False}
robust_non_adaptive3 = {ylabel: "Test Accuracy (%)",
file_name: "distortion_robust_net_non_adaptive3",
title: "C&W L$_2$ non-adaptive",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: ['PlainNet', 'RobustNet', 'FC', 'BandLimit'],
xlim: (-0.05, 1.15),
ylim: (0, 100),
xlabel: '$L_2$ distortion',
is_log: False}
robust_adaptive3 = {ylabel: "Test Accuracy (%)",
file_name: "distortion_robust_net3",
title: "C&W L$_2$ adaptive",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 8,
legend_cols: 1,
# labels: ['plain', 'robust\n0.2 0.1', 'fft 50%'],
labels: ['PlainNet', 'RobustNet', 'FC', 'BandLimit'],
xlim: (-0.05, 1.15),
ylim: (0, 100),
xlabel: '$L_2$ distortion',
is_log: False}
train_vs_inference = {
ylabel: "Test Accuracy (%)",
file_name: "train_vs_test_perturbation2",
# title: "C&W L$_2$ adaptive",
title: "ParamNet",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: ['test 0.01', 'train 0.01',
'test 0.02', 'train 0.02'],
# xlim: (0, 1.15),
ylim: (0, 100),
xlabel: '$L_2$ distortion',
is_log: False,
# legend_title: 'ParamNet:',
}
train_vs_inference3 = {
ylabel: "Test Accuracy (%)",
file_name: "train_vs_test_perturbation3",
# title: "C&W L$_2$ adaptive",
title: "ParamNet",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 12,
legend_cols: 1,
labels: ['test 0.01', 'train 0.01',
'test 0.02', 'train 0.02',
'test 0.07', 'train 0.07'],
# xlim: (0, 1.15),
ylim: (0, 100),
xlabel: '$L_2$ distortion',
is_log: False,
# legend_title: 'ParamNet:',
}
robust_layers_dp = {
ylabel: "Test Accuracy (%)",
file_name: "distortion_robust_net_layers",
# title: "C&W L$_2$ adaptive",
title: "RobustNet",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: ['0.0 0.0', '0.2 0.1', '0.2 0.0',
'0.3 0.0'],
xlim: (-0.05, 1.15),
ylim: (0, 100),
xlabel: '$L_2$ distortion',
is_log: False,
# legend_title: 'RobustNet:',
}
four_cw_c_40_iters_pgd_adv_train = {
ylabel: "Test Accuracy (%)",
xlabel: 'C&W c parameter',
file_name: "distortion_cw_c_40_iters_pgd_adv_train",
# title: "PGD L$_{\infty}$ adaptive",
title: "CW L$_2$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: True,
}
four_cw_l2_distance_40_iters_pgd_adv_train = {
ylabel: "Test Accuracy (%)",
xlabel: 'L$_2$ distortion',
file_name: "distortion_cw_l2_distance_40_iters_pgd_adv_train",
# title: "PGD L$_{\infty}$ adaptive",
title: "CW L$_2$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: False,
}
four_pgd_many_iters_attack_40_iters_pgd_adv_train = {
ylabel: "Test Accuracy (%)",
xlabel: '# of PGD iterations',
file_name: "distortion_pgd_many_iters_attack_train_40_iters_pgd_adv_train",
# title: "PGD L$_{\infty}$ adaptive",
title: "PGD L$_\infty$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: True,
}
four_pgd_linf_distance_40_iters_pgd_adv_train = {
ylabel: "Test Accuracy (%)",
xlabel: '$L_\infty$ distortion x $10^{-6}$',
file_name: "distortion_pgd_linf_distance_40_iters_pgd_adv_train",
# title: "PGD L$_{\infty}$ adaptive",
title: "PGD L$_\infty$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: False,
}
svhn_cw_c = {
ylabel: "Test Accuracy (%)",
xlabel: 'C&W c parameter',
file_name: 'svhn_cw_c',
# title: "PGD L$_{\infty}$ adaptive",
title: "CW L$_2$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: True,
}
svhn_cw_dist = {
ylabel: "Test Accuracy (%)",
xlabel: 'L$_2$ distortion',
file_name: 'svhn_cw_dist',
# title: "PGD L$_{\infty}$ adaptive",
title: "CW L$_2$ adaptive",
legend_pos: "upper right",
# legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: False,
}
svhn_pgd_iters = {
ylabel: "Test Accuracy (%)",
xlabel: '# of PGD iterations',
file_name: "svhn_pgd_iters",
# title: "PGD L$_{\infty}$ adaptive",
title: "PGD L$_\infty$ adaptive",
# legend_pos: "lower left",
legend_pos: "center right",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: True,
}
svhn_pgd_dist = {
ylabel: "Test Accuracy (%)",
xlabel: '$L_\infty$ distortion x $10^{-6}$',
file_name: "svhn_pgd_dist",
# title: "PGD L$_{\infty}$ adaptive",
title: "PGD L$_\infty$ adaptive",
# legend_pos: "lower left",
legend_pos: "upper right",
# legend_pos: "center",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: False,
}
cw_c_40_iters_pgd_adv = {
ylabel: "Test Accuracy (%)",
xlabel: 'C&W c parameter',
file_name: "distortion_cw_c_40_iters_pgd_adv_train2",
# title: "PGD L$_{\infty}$ adaptive",
title: "CW L$_2$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: True,
}
cw_dist_40_iters_pgd_adv = {
ylabel: "Test Accuracy (%)",
xlabel: 'L$_2$ distortion',
file_name: "distortion_cw_l2_distance_40_iters_pgd_adv_train2",
# title: "PGD L$_{\infty}$ adaptive",
title: "CW L$_2$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: False,
}
pgd_iters_40_iters_pgd_adv_train = {
ylabel: "Test Accuracy (%)",
xlabel: '# of PGD iterations',
file_name: "distortion_pgd_many_iters_attack_train_40_iters_pgd_adv_train2",
# title: "PGD L$_{\infty}$ adaptive",
title: "PGD L$_\infty$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: True,
}
pgd_dist_40_iters_pgd_adv_train = {
ylabel: "Test Accuracy (%)",
xlabel: '$L_\infty$ distortion x $10^{-6}$',
file_name: "distortion_pgd_linf_distance_40_iters_pgd_adv_train2",
# title: "PGD L$_{\infty}$ adaptive",
title: "PGD L$_\infty$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: False,
}
svhn_cw_c2 = {
ylabel: "Test Accuracy (%)",
xlabel: 'C&W c parameter',
file_name: 'svhn_cw_c4',
# title: "PGD L$_{\infty}$ adaptive",
title: "CW L$_2$ adaptive",
legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: True,
}
svhn_cw_dist2 = {
ylabel: "Test Accuracy (%)",
xlabel: 'L$_2$ distortion',
file_name: 'svhn_cw_dist4',
# title: "PGD L$_{\infty}$ adaptive",
title: "CW L$_2$ adaptive",
legend_pos: "upper right",
# legend_pos: "lower left",
# legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: False,
}
svhn_pgd_iters2 = {
ylabel: "Test Accuracy (%)",
xlabel: '# of PGD iterations',
file_name: "svhn_pgd_iters4",
# title: "PGD L$_{\infty}$ adaptive",
title: "PGD L$_\infty$ adaptive",
# legend_pos: "lower left",
# legend_pos: "center right",
legend_pos: "upper right",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
is_log: False,
is_symlog: True,
}
svhn_pgd_dist2 = {
ylabel: "Test Accuracy (%)",
xlabel: '$L_\infty$ distortion',
file_name: "svhn_pgd_dist4",
# title: "PGD L$_{\infty}$ adaptive",
title: "PGD L$_\infty$ adaptive",
# legend_pos: "lower left",
legend_pos: "upper right",
# legend_pos: "center",
bbox: (-1.0, 0.0),
column_nr: 8,
legend_cols: 1,
labels: [
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
xlim: (0, 0.05),
is_symlog: False,
}
boundary_attack_linf = {
ylabel: "Test Accuracy (%)",
xlabel: '$L_\infty$ distortion',
file_name: "boundary_attack1",
# title: "PGD L$_{\infty}$ adaptive",
title: "Boundary (25K iters)",
# legend_pos: "lower left",
legend_pos: "center right",
# legend_pos: "center",
bbox: (-1.0, 0.0),
column_nr: 10,
legend_cols: 1,
labels: [
'Plain',
'Adv. Train',
'PNI-W Adv.',
'RobustNet',
'RobustNet Adv.'
],
ylim: (0, 100),
xlim: (0, 1.0),
is_symlog: False,
}
boundary_attack_L2 = {
ylabel: "Test Accuracy (%)",
xlabel: 'max $L_2$ distortion',
file_name: "boundary_attack_L2_25K_iters2",
# title: "PGD L$_{\infty}$ adaptive",
title: "Boundary (25K iters)",
# legend_pos: "lower left",
legend_pos: "center right",
# legend_pos: "center",
bbox: (-1.0, 0.0),
column_nr: 10,
legend_cols: 1,
| |
"""
Unit and regression test for the reference_handler package.
"""
# Import package, test suite, and other packages as needed
import reference_handler # noqa: F401
from reference_handler import decode_latex
from reference_handler import encode_latex
import sys
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
def test_latex_utf8_imported():
"""Sample test, will always pass so long as import statement worked"""
assert 'reference_handler' in sys.modules
def test_latex_umlauts():
"""LaTex command for an umlaut on a character, e.g. \"{a}"""
answer = 'ÄB̈C̈D̈ËF̈G̈ḦÏJ̈K̈L̈M̈N̈ÖP̈Q̈R̈S̈T̈ÜV̈ẄẌŸZ̈äb̈c̈d̈ëf̈g̈ḧïj̈k̈l̈m̈n̈öp̈q̈r̈s̈ẗüv̈ẅẍÿz̈ı̈ȷ̈' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\"{' + char + '}'
text += r'\"{\i}' # dotless i
text += r'\"{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_acute_accents():
"""LaTex command for an acute accent character, e.g. \'{a}"""
answer = 'ÁB́ĆD́ÉF́ǴH́ÍJ́ḰĹḾŃÓṔQ́ŔŚT́ÚV́ẂX́ÝŹáb́ćd́éf́ǵh́íj́ḱĺḿńóṕq́ŕśt́úv́ẃx́ýźı́ȷ́' # noqa: E501
text = ''
for char in list(alphabet):
text += r"\'{" + char + '}'
text += r"\'{\i}" # dotless i
text += r"\'{\j}" # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_dot_above():
r"""LaTex command for a dot above the character, e.g. \.{a}"""
answer = 'ȦḂĊḊĖḞĠḢİJ̇K̇L̇ṀṄȮṖQ̇ṘṠṪU̇V̇ẆẊẎŻȧḃċḋėḟġḣi̇j̇k̇l̇ṁṅȯṗq̇ṙṡṫu̇v̇ẇẋẏżı̇ȷ̇' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\.{' + char + '}'
text += r'\.{\i}' # dotless i
text += r'\.{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_macron_above():
r"""LaTex command for a macron above the character, e.g. \={a}"""
answer = 'ĀB̄C̄D̄ĒF̄ḠH̄ĪJ̄K̄L̄M̄N̄ŌP̄Q̄R̄S̄T̄ŪV̄W̄X̄ȲZ̄āb̄c̄d̄ēf̄ḡh̄īj̄k̄l̄m̄n̄ōp̄q̄r̄s̄t̄ūv̄w̄x̄ȳz̄ı̄ȷ̄' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\={' + char + '}'
text += r'\={\i}' # dotless i
text += r'\={\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_circumflex_above():
r"""LaTex command for a circumflex above the character, e.g. \^{a}"""
answer = 'ÂB̂ĈD̂ÊF̂ĜĤÎĴK̂L̂M̂N̂ÔP̂Q̂R̂ŜT̂ÛV̂ŴX̂ŶẐâb̂ĉd̂êf̂ĝĥîĵk̂l̂m̂n̂ôp̂q̂r̂ŝt̂ûv̂ŵx̂ŷẑı̂ȷ̂' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\^{' + char + '}'
text += r'\^{\i}' # dotless i
text += r'\^{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_grave_accent():
r"""LaTex command for a grave accent on the character, e.g. \`{a}"""
answer = 'ÀB̀C̀D̀ÈF̀G̀H̀ÌJ̀K̀L̀M̀ǸÒP̀Q̀R̀S̀T̀ÙV̀ẀX̀ỲZ̀àb̀c̀d̀èf̀g̀h̀ìj̀k̀l̀m̀ǹòp̀q̀r̀s̀t̀ùv̀ẁx̀ỳz̀ı̀ȷ̀' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\`{' + char + '}'
text += r'\`{\i}' # dotless i
text += r'\`{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_vertical_line_above():
r"""LaTex command for a vertical line above the character, e.g. \|{a}"""
answer = 'A̍B̍C̍D̍E̍F̍G̍H̍I̍J̍K̍L̍M̍N̍O̍P̍Q̍R̍S̍T̍U̍V̍W̍X̍Y̍Z̍a̍b̍c̍d̍e̍f̍g̍h̍i̍j̍k̍l̍m̍n̍o̍p̍q̍r̍s̍t̍u̍v̍w̍x̍y̍z̍ı̍ȷ̍' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\|{' + char + '}'
text += r'\|{\i}' # dotless i
text += r'\|{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_tilde_above():
r"""LaTex command for a tilde above the character, e.g. \~{a}"""
answer = 'ÃB̃C̃D̃ẼF̃G̃H̃ĨJ̃K̃L̃M̃ÑÕP̃Q̃R̃S̃T̃ŨṼW̃X̃ỸZ̃ãb̃c̃d̃ẽf̃g̃h̃ĩj̃k̃l̃m̃ñõp̃q̃r̃s̃t̃ũṽw̃x̃ỹz̃ı̃ȷ̃' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\~{' + char + '}'
text += r'\~{\i}' # dotless i
text += r'\~{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_macron_below():
r"""LaTex command for a macron below the character, e.g. \b{a}"""
answer = 'A̱ḆC̱ḎE̱F̱G̱H̱I̱J̱ḴḺM̱ṈO̱P̱Q̱ṞS̱ṮU̱V̱W̱X̱Y̱Ẕa̱ḇc̱ḏe̱f̱g̱ẖi̱j̱ḵḻm̱ṉo̱p̱q̱ṟs̱ṯu̱v̱w̱x̱y̱ẕı̱ȷ̱' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\b{' + char + '}'
text += r'\b{\i}' # dotless i
text += r'\b{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_cedilla():
r"""LaTex command for a cedilla on the character, e.g. \c{a}"""
answer = 'A̧B̧ÇḐȨF̧ĢḨI̧J̧ĶĻM̧ŅO̧P̧Q̧ŖŞŢU̧V̧W̧X̧Y̧Z̧a̧b̧çḑȩf̧ģḩi̧j̧ķļm̧ņo̧p̧q̧ŗşţu̧v̧w̧x̧y̧z̧ı̧ȷ̧' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\c{' + char + '}'
text += r'\c{\i}' # dotless i
text += r'\c{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_double_grave_accent():
r"""LaTex command for a double grave accent on the character, e.g. \C{a}"""
answer = 'ȀB̏C̏D̏ȄF̏G̏H̏ȈJ̏K̏L̏M̏N̏ȌP̏Q̏ȐS̏T̏ȔV̏W̏X̏Y̏Z̏ȁb̏c̏d̏ȅf̏g̏h̏ȉj̏k̏l̏m̏n̏ȍp̏q̏ȑs̏t̏ȕv̏w̏x̏y̏z̏ı̏ȷ̏' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\C{' + char + '}'
text += r'\C{\i}' # dotless i
text += r'\C{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_dot_below():
r"""LaTex command for a dot below the character, e.g. \d{a}"""
answer = 'ẠḄC̣ḌẸF̣G̣ḤỊJ̣ḲḶṂṆỌP̣Q̣ṚṢṬỤṾẈX̣ỴẒạḅc̣ḍẹf̣g̣ḥịj̣ḳḷṃṇọp̣q̣ṛṣṭụṿẉx̣ỵẓı̣ȷ̣' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\d{' + char + '}'
text += r'\d{\i}' # dotless i
text += r'\d{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_inverted_breve():
r"""LaTex command for an inverted breve above the character, e.g. \f{a}"""
answer = 'ȂB̑C̑D̑ȆF̑G̑H̑ȊJ̑K̑L̑M̑N̑ȎP̑Q̑ȒS̑T̑ȖV̑W̑X̑Y̑Z̑ȃb̑c̑d̑ȇf̑g̑h̑ȋj̑k̑l̑m̑n̑ȏp̑q̑ȓs̑t̑ȗv̑w̑x̑y̑z̑ı̑ȷ̑' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\f{' + char + '}'
text += r'\f{\i}' # dotless i
text += r'\f{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_hook_above():
r"""LaTex command for a hook above the character, e.g. \h{a}"""
answer = 'ẢB̉C̉D̉ẺF̉G̉H̉ỈJ̉K̉L̉M̉N̉ỎP̉Q̉R̉S̉T̉ỦV̉W̉X̉ỶZ̉ảb̉c̉d̉ẻf̉g̉h̉ỉj̉k̉l̉m̉n̉ỏp̉q̉r̉s̉t̉ủv̉w̉x̉ỷz̉ı̉ȷ̉' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\h{' + char + '}'
text += r'\h{\i}' # dotless i
text += r'\h{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_double_acute_accent():
r"""LaTex command for a double acute accent on the character, e.g. \H{a}"""
answer = 'A̋B̋C̋D̋E̋F̋G̋H̋I̋J̋K̋L̋M̋N̋ŐP̋Q̋R̋S̋T̋ŰV̋W̋X̋Y̋Z̋a̋b̋c̋d̋e̋f̋g̋h̋i̋j̋k̋l̋m̋n̋őp̋q̋r̋s̋t̋űv̋w̋x̋y̋z̋ı̋ȷ̋' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\H{' + char + '}'
text += r'\H{\i}' # dotless i
text += r'\H{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_ogonek():
r"""LaTex command for a ogonek on the character, e.g. \k{a}"""
answer = 'ĄB̨C̨D̨ĘF̨G̨H̨ĮJ̨K̨L̨M̨N̨ǪP̨Q̨R̨S̨T̨ŲV̨W̨X̨Y̨Z̨ąb̨c̨d̨ęf̨g̨h̨įj̨k̨l̨m̨n̨ǫp̨q̨r̨s̨t̨ųv̨w̨x̨y̨z̨ı̨ȷ̨' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\k{' + char + '}'
text += r'\k{\i}' # dotless i
text += r'\k{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_ring_above():
r"""LaTex command for a ring above the character, e.g. \r{a}"""
answer = 'ÅB̊C̊D̊E̊F̊G̊H̊I̊J̊K̊L̊M̊N̊O̊P̊Q̊R̊S̊T̊ŮV̊W̊X̊Y̊Z̊åb̊c̊d̊e̊f̊g̊h̊i̊j̊k̊l̊m̊n̊o̊p̊q̊r̊s̊t̊ův̊ẘx̊ẙz̊ı̊ȷ̊' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\r{' + char + '}'
text += r'\r{\i}' # dotless i
text += r'\r{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_double_inverted_breve():
r"""LaTex command for a double inverted breve on the character, e.g.
\t{a}"""
answer = 'A͡AB͡BC͡CD͡DE͡EF͡FG͡GH͡HI͡IJ͡JK͡KL͡LM͡MN͡NO͡OP͡PQ͡QR͡RS͡ST͡TU͡UV͡VW͡WX͡XY͡YZ͡Za͡ab͡bc͡cd͡de͡ef͡fg͡gh͡hi͡ij͡jk͡kl͡lm͡mn͡no͡op͡pq͡qr͡rs͡st͡tu͡uv͡vw͡wx͡xy͡yz͡zı͡ıȷ͡ȷ' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\t{' + char + '}' + char
text += r'\t{\i}\i' # dotless i
text += r'\t{\j}\j' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_breve():
r"""LaTex command for a breve on the character, e.g. \u{a}"""
answer = 'ĂB̆C̆D̆ĔF̆ĞH̆ĬJ̆K̆L̆M̆N̆ŎP̆Q̆R̆S̆T̆ŬV̆W̆X̆Y̆Z̆ăb̆c̆d̆ĕf̆ğh̆ĭj̆k̆l̆m̆n̆ŏp̆q̆r̆s̆t̆ŭv̆w̆x̆y̆z̆ı̆ȷ̆' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\u{' + char + '}'
text += r'\u{\i}' # dotless i
text += r'\u{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_double_vertical_line_above():
r"""LaTex command for a double vertical line above the character,
e.g. \b{a}"""
answer = 'A̎B̎C̎D̎E̎F̎G̎H̎I̎J̎K̎L̎M̎N̎O̎P̎Q̎R̎S̎T̎U̎V̎W̎X̎Y̎Z̎a̎b̎c̎d̎e̎f̎g̎h̎i̎j̎k̎l̎m̎n̎o̎p̎q̎r̎s̎t̎u̎v̎w̎x̎y̎z̎ı̎ȷ̎' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\U{' + char + '}'
text += r'\U{\i}' # dotless i
text += r'\U{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_caron():
r"""LaTex command for a caron on the character, e.g. \v{a}"""
answer = 'ǍB̌ČĎĚF̌ǦȞǏJ̌ǨĽM̌ŇǑP̌Q̌ŘŠŤǓV̌W̌X̌Y̌Žǎb̌čďěf̌ǧȟǐǰǩľm̌ňǒp̌q̌řšťǔv̌w̌x̌y̌žı̌ȷ̌' # noqa: E501
text = ''
for char in list(alphabet):
text += r'\v{' + char + '}'
text += r'\v{\i}' # dotless i
text += r'\v{\j}' # dotless j
result = decode_latex(text)
check = encode_latex(result)
assert (result == answer and check == text)
def test_latex_symbols():
r"""LaTex command for symbols, e.g. \i"""
answer = 'ıȷłŁøØ' # | |
= getattr(classified_domain, 'cert_info',
None)
if cert_info is None:
domains_certificate_status[
classified_domain.domain] = (
"create_in_progress")
continue
else:
edge_host_name = (
classified_domain.cert_info.
get_edge_host_name())
domain_access_url = service_obj.provider_details[
self.driver.provider_name
].get_domain_access_url(classified_domain.domain)
old_operator_url = (
None if domain_access_url is None else
domain_access_url.get('old_operator_url', None)
)
domains_certificate_status[
classified_domain.domain] = (
classified_domain.cert_info.get_cert_status())
if edge_host_name is None:
continue
provider_access_url = self._get_provider_access_url(
classified_domain, dp, edge_host_name)
links.append({
'href': provider_access_url,
'rel': 'access_url',
'domain': dp,
'certificate': classified_domain.certificate,
'old_operator_url': old_operator_url
})
except Exception:
LOG.exception("Failed to Update Service - {0}".
format(provider_service_id))
return self.responder.failed("failed to update service")
# check to see if a domain was upgraded from http -> https+san
# and keep the policy if it was an upgrade
try:
for policy in policies:
is_upgrade = False
for link_id in ids:
if (
link_id['policy_name'] == policy['policy_name'] and
link_id['protocol'] == 'https' and
policy['protocol'] == 'http'
):
is_upgrade = True
configuration_number = self._get_configuration_number(
util.dict2obj(policy))
# when an upgrade is detected, keep track of the
# old http policy. the old http policy will be deleted
# later.
if is_upgrade is True:
LOG.info(
"{0} was upgraded from http to https san. "
"Queuing old http policy for delete.".format(
policy['policy_name']))
self.http_policy_queue.enqueue_http_policy(
json.dumps({
'configuration_number': configuration_number,
'policy_name': policy['policy_name'],
'project_id': service_obj.project_id
})
)
continue
LOG.info('Starting to delete old policy %s' %
policy['policy_name'])
resp = self.policy_api_client.delete(
self.policy_api_base_url.format(
configuration_number=configuration_number,
policy_name=policy['policy_name']))
LOG.info('akamai response code: %s' % resp.status_code)
LOG.info('akamai response text: %s' % resp.text)
if resp.status_code != 200:
raise RuntimeError(resp.text)
LOG.info('Delete old policy %s complete' %
policy['policy_name'])
except Exception:
LOG.exception("Failed to Update Service - {0}".
format(provider_service_id))
return self.responder.failed("failed to update service")
LOG.info("ids : {0} for service_id {1}".format(json.dumps(ids),
service_obj.service_id))
LOG.info("links : {0} for service_id {1}".format(links,
service_obj.service_id))
LOG.info("domain certificate status : {0} "
"for service_id {1}".format(domains_certificate_status,
service_obj.service_id))
return self.responder.updated(
json.dumps(ids), links,
domains_certificate_status=domains_certificate_status)
except Exception as e:
LOG.exception("Failed to Update Service - {0}".
format(provider_service_id))
LOG.exception('Updating policy failed: %s', traceback.format_exc())
return self.responder.failed(
"failed to update service - %s" % str(e))
def delete(self, project_id, provider_service_id):
# delete needs to provide a list of policy id/domains
# then delete them accordingly
try:
policies = json.loads(provider_service_id)
except Exception:
# raise a more meaningful error for debugging info
try:
msg = 'Mal-formed Akamai ' \
'policy ids: {0}'.format(provider_service_id)
LOG.exception(msg)
raise RuntimeError(msg)
except Exception as e:
return self.responder.failed(str(e))
try:
for policy in policies:
LOG.info('Starting to delete policy %s' % policy)
# TODO(tonytan4ever): needs to look at if service
# domain is an https domain, if it is then a different
# base url is needed
configuration_number = self._get_configuration_number(
util.dict2obj(policy))
resp = self.policy_api_client.delete(
self.policy_api_base_url.format(
configuration_number=configuration_number,
policy_name=policy['policy_name']))
LOG.info('akamai response code: %s' % resp.status_code)
LOG.info('akamai response text: %s' % resp.text)
if resp.status_code != 200:
raise RuntimeError(resp.text)
except Exception as e:
LOG.exception("Failed to Delete Service - {0}".
format(provider_service_id))
return self.responder.failed(str(e))
else:
LOG.info("Successfully Deleted Service - {0}".
format(provider_service_id))
return self.responder.deleted(provider_service_id)
def purge(self, provider_service_id, service_obj, hard=True,
purge_url='/*', network='production'):
try:
if not purge_url.startswith('/'):
purge_url = ('/' + purge_url)
if not hard:
return self._policy(provider_service_id, service_obj,
invalidate=True, invalidate_url=purge_url)
if purge_url == '/*':
raise RuntimeError('Akamai purge-all functionality has not'
' been implemented')
purge_type = 'delete'
try:
policies = json.loads(provider_service_id)
except Exception:
# raise a more meaningful error for debugging info
msg = 'Mal-formed Akamai ' \
'policy ids: {0}'.format(provider_service_id)
LOG.exception(msg)
raise RuntimeError(msg)
for policy in policies:
url_scheme = None
if policy['protocol'] == 'http':
url_scheme = 'http://'
elif policy['protocol'] == 'https':
url_scheme = 'https://'
actual_purge_url = ''.join([url_scheme,
policy['policy_name'],
purge_url])
data = {
'objects': [
actual_purge_url
]
}
resp = self.ccu_api_client.post(self.ccu_api_base_url.format(
purge_type=purge_type, network=network),
data=json.dumps(data),
headers=(
self.request_header
))
if resp.status_code != 201:
raise RuntimeError(resp.text)
LOG.info("purge response: %s for project id: %s, "
"on: %s, purge_url: %s"
% (resp.text, service_obj.project_id,
provider_service_id, actual_purge_url))
return self.responder.purged(provider_service_id,
purge_url=purge_url)
except Exception as e:
LOG.exception("Failed to Purge/Invalidate Service - {0}".
format(provider_service_id))
return self.responder.failed(str(e))
def get_subcustomer_id(self, project_id, domain):
# subcustomer_id now just set for project_id
return ''.join([str(project_id)])
def _get_subcustomer_id_region(self, configuration_number,
project_id, domain):
LOG.info("Starting to get "
"Sub-Customer ID "
"region for domain: {0}".format(domain))
resp = self.subcustomer_api_client.get(
self.akamai_subcustomer_api_base_url.format(
configuration_number=configuration_number,
subcustomer_id=self.get_subcustomer_id(project_id,
domain)))
if resp.ok:
region = json.loads(resp.content)["geo"]
LOG.info("Sub-Customer ID region: {0} for domain: {1}".format(
region, domain))
return (region, self.get_subcustomer_id(project_id,
domain))
else:
LOG.info("Sub-Customer ID region retrieval for "
"domain: {0} failed!".format(domain))
LOG.info("Response Code: {0}".format(resp.status_code))
msg = "Response Text: {0}".format(resp.text)
LOG.info(msg)
raise RuntimeError(msg)
def _put_subcustomer_id_region(self, configuration_number,
project_id, domain, region='US'):
LOG.info("Starting to put Sub-Customer ID "
"region for domain: {0}".format(domain))
resp = self.subcustomer_api_client.put(
self.akamai_subcustomer_api_base_url.format(
configuration_number=configuration_number,
subcustomer_id=self.get_subcustomer_id(project_id, domain)),
data=json.dumps({"geo": region}))
if resp.ok:
LOG.info("Sub-Customer ID region "
"set to : {0} "
"for domain: {1}".format(region,
domain))
else:
msg = "Setting Sub-Customer ID region for " \
"domain: {0} failed!".format(domain)
LOG.info(msg)
raise RuntimeError(msg)
def _delete_subcustomer_id_region(self, configuration_number,
project_id, domain):
LOG.info("Starting to delete "
"Sub-Customer ID for "
"domain: {0}".format(domain))
resp = self.subcustomer_api_client.delete(
self.akamai_subcustomer_api_base_url.format(
configuration_number=configuration_number,
subcustomer_id=self.get_subcustomer_id(project_id, domain)))
if resp.ok:
LOG.info("Sub-Customer ID deleted for domain: {0}".format(
domain))
else:
msg = "Deleting Sub-Customer ID for " \
"domain: {0} failed!".format(domain)
LOG.info(msg)
raise RuntimeError(msg)
@decorators.lazy_property(write=False)
def current_customer(self):
return None
def _classify_domains(self, domains_list):
# classify domains into different categories based on first two level
# of domains, group them together
# for right now we just use the whole domain as the digital property
return domains_list
def _process_new_origin(self, origin, rules_list):
# NOTE(TheSriram): ensure that request_url starts with a '/'
for rule in origin.rules:
if rule.request_url:
if not rule.request_url.startswith('/'):
rule.request_url = ('/' + rule.request_url)
rule_dict_template = {
'matches': [],
'behaviors': []
}
origin_behavior_dict = {
'name': 'origin',
'value': '-',
'params': {
# missing digitalProperty(domain) for now
'originDomain': '',
'hostHeaderType': 'digital_property',
'cacheKeyType': 'digital_property',
'hostHeaderValue': '-',
'cacheKeyValue': '-'
}
}
if origin.hostheadertype == 'custom':
origin_behavior_dict['params']['hostHeaderType'] = 'fixed'
origin_behavior_dict['params']['hostHeaderValue'] = \
origin.hostheadervalue
elif origin.hostheadertype == 'origin':
origin_behavior_dict['params']['hostHeaderType'] = 'origin'
origin_behavior_dict['params']['hostHeaderValue'] = \
origin.hostheadervalue
wildcards = []
# this is the global 'url-wildcard' rule
if origin.rules == []:
wildcards.append("/*")
else:
for rule in origin.rules:
wildcards.append(rule.request_url)
if len(wildcards) > 0:
match_rule = {
'name': 'url-wildcard',
'value': " ".join(wildcards)
}
rule_dict_template['matches'].append(
match_rule)
origin_behavior_dict['params']['originDomain'] = (
origin.origin
)
rule_dict_template['behaviors'].append(
origin_behavior_dict
)
# Append the new generated rules
rules_list.append(rule_dict_template)
def _process_new_domain(self, domain, rules_list):
dp = domain.domain
for rule in rules_list:
for behavior in rule['behaviors']:
if 'params' in behavior:
behavior['params']['digitalProperty'] = dp
return dp
def _process_restriction_rules(self, restriction_rules, rules_list):
# NOTE(TheSriram): ensure that request_url starts with a '/'
for restriction_rule in restriction_rules:
for rule_entry in restriction_rule.rules:
if rule_entry.request_url:
if not rule_entry.request_url.startswith('/'):
rule_entry.request_url = (
'/' + rule_entry.request_url)
# restriction implementation for akamai
# for each restriction rule
# restriction entities include: referrer, geography, client_ip
restriction_entities = ['referrer', 'geography', 'client_ip']
class entityRequestUrlMappingList(dict):
"""A dictionary with a name attribute"""
def __init__(self, name, orig_dict):
self.name = name
self.update(orig_dict)
# classify restriction/rules based on their white/black-list
white_list_entities = entityRequestUrlMappingList(
'whitelist',
{entity: {} for entity
in restriction_entities})
black_list_entities = entityRequestUrlMappingList(
'blacklist',
{entity: {} for entity
in restriction_entities})
for restriction_rule in restriction_rules:
entity_rule_mapping = {}
if restriction_rule.access == 'whitelist':
entity_rule_mapping = white_list_entities
elif restriction_rule.access == 'blacklist':
entity_rule_mapping = black_list_entities
for rule_entry in restriction_rule.rules:
# classify rules based on their entities, then request_urls
if getattr(rule_entry, "referrer", None) is not None:
if (rule_entry.request_url not in
entity_rule_mapping['referrer']):
entity_rule_mapping['referrer'][rule_entry.request_url]\
= [rule_entry]
else:
entity_rule_mapping['referrer'][rule_entry.request_url]\
.append(rule_entry)
elif getattr(rule_entry, "client_ip", None) is not None:
if (rule_entry.request_url not in
entity_rule_mapping['client_ip']):
entity_rule_mapping['client_ip'][rule_entry.request_url]\
= [rule_entry]
else:
entity_rule_mapping['client_ip'][rule_entry.request_url]\
.append(rule_entry)
elif getattr(rule_entry, "geography", None) is not None:
if (rule_entry.request_url not in
entity_rule_mapping['geography']):
entity_rule_mapping['geography'][rule_entry.request_url]\
= [rule_entry]
else:
entity_rule_mapping['geography'][rule_entry.request_url]\
.append(rule_entry)
for entity_request_url_rule_mapping in [white_list_entities,
black_list_entities]:
for entity in entity_request_url_rule_mapping:
for request_url in entity_request_url_rule_mapping[entity]:
found_match = False
# need to write up a function gets the value of behavior
behavior_name = self._get_behavior_name(
entity, entity_request_url_rule_mapping.name)
behavior_value = self._get_behavior_value(
entity,
entity_request_url_rule_mapping[entity][request_url])
behavior_dict = {
'name': behavior_name,
'value': behavior_value
}
if entity == 'geography':
behavior_dict['type'] = 'country'
# if we have a matches rule already
for rule in rules_list:
for match in rule['matches']:
if request_url == match['value']:
# we found an existing matching rule.
# add the whitelist/blacklist behavior to it
found_match = True
rule['behaviors'].append(behavior_dict)
# if there is no matches entry yet for this rule
if not found_match:
# create an akamai rule
rule_dict_template = {
'matches': [],
'behaviors': []
}
# add the match and behavior to this new rule
if rule_entry.request_url is not | |
message['status'] = 'READY'
message['error'] = None
message['timestamp'] = time.time()
message['sla_id'] = self.services[serv_id]['sla_id']
message['policy_id'] = self.services[serv_id]['policy_id']
message['nsr'] = self.services[serv_id]['nsr']
message['vnfrs'] = []
for function in self.services[serv_id]['function']:
message['vnfrs'].append(function['vnfr'])
LOG.debug("Payload of message " + str(message))
orig_corr_id = self.services[serv_id]['original_corr_id']
self.manoconn.notify(t.GK_CREATE,
yaml.dump(message),
correlation_id=orig_corr_id)
def inform_gk(self, serv_id):
"""
This method informs the gatekeeper.
"""
topic = self.services[serv_id]['topic']
LOG.info("Service " + serv_id + ": Reporting result on " + topic)
message = {}
message['status'] = 'READY'
message['workflow'] = self.services[serv_id]['current_workflow']
message['error'] = None
message['timestamp'] = time.time()
message['nsr'] = self.services[serv_id]['nsr']
message['vnfrs'] = []
for function in self.services[serv_id]['function']:
message['vnfrs'].append(function['vnfr'])
if 'start_time' in self.services[serv_id]:
start_time = self.services[serv_id]['start_time']
message['duration'] = time.time() - start_time
LOG.debug("Payload of message " + str(message))
orig_corr_id = self.services[serv_id]['original_corr_id']
self.manoconn.notify(topic,
yaml.dump(message),
correlation_id=orig_corr_id)
###########
# SLM tasks
###########
def add_service_to_ledger(self, payload, corr_id):
"""
This method adds new services with their specifics to the ledger,
so other functions can use this information.
:param payload: the payload of the received message
:param corr_id: the correlation id of the received message
"""
# Generate an istance uuid for the service
serv_id = str(uuid.uuid4())
# Add the service to the ledger and add instance ids
self.services[serv_id] = {}
self.services[serv_id]['nsd'] = payload['NSD']
self.services[serv_id]['id'] = serv_id
self.services[serv_id]['cnf'] = False
msg = ": NSD uuid is " + str(payload['NSD']['uuid'])
LOG.info("Service " + serv_id + msg)
msg = ": NSD name is " + str(payload['NSD']['name'])
LOG.info("Service " + serv_id + msg)
nsd = self.services[serv_id]['nsd']
# Adjust for flavour
self.services[serv_id]['flavour'] = None
if 'flavor' in payload.keys():
if payload['flavor']:
self.services[serv_id]['flavour'] = payload['flavor']
elif 'flavour' in payload.keys():
if payload['flavour']:
self.services[serv_id]['flavour'] = payload['flavour']
msg = str(self.services[serv_id]['flavour'])
LOG.info("The selected flavour is: " + msg)
if self.services[serv_id]['flavour']:
flavour_dict = {}
for flavour in nsd['deployment_flavours']:
if flavour['name'] == self.services[serv_id]['flavour']:
flavour_dict = flavour
break
for key in flavour_dict.keys():
if key != 'name':
nsd[key] = flavour_dict[key]
if 'deployment_flavours' in nsd:
del nsd['deployment_flavours']
self.services[serv_id]['function'] = []
for key in payload.keys():
if key[:4] == 'VNFD':
vnf_id = str(uuid.uuid4())
msg = "VNFD instance id generated: " + vnf_id
LOG.info("Service " + serv_id + msg)
vnfd = payload[key]
vnf_base = {'start': {'trigger': True, 'payload': {}},
'stop': {'trigger': True, 'payload': {}},
'configure': {'trigger': True, 'payload': {}},
'scale': {'trigger': True, 'payload': {}},
'vnfd': vnfd,
'id': vnf_id,
'flavour':None}
for vnf_nsd in nsd['network_functions']:
if vnf_nsd['vnf_name'] == vnfd['name'] and \
vnf_nsd['vnf_vendor'] == vnfd['vendor'] and \
vnf_nsd['vnf_version'] == vnfd['version']:
if vnf_nsd.get('vnf_flavour'):
vnf_base['flavour'] = vnf_nsd['vnf_flavour']
flavour_dict = {}
for flavour in vnfd['deployment_flavours']:
if flavour['name'] == vnf_base['flavour']:
flavour_dict = flavour
break
for key in flavour_dict.keys():
if key != 'name':
vnfd[key] = flavour_dict[key]
if 'virtual_deployment_units' in vnf_base['vnfd'].keys():
for vdu in vnf_base['vnfd']['virtual_deployment_units']:
vdu['id'] = vdu['id'] + '-' + vnf_id
if 'cloudnative_deployment_units' in vnf_base['vnfd'].keys():
self.services[serv_id]['cnf'] = True
for vdu in vnf_base['vnfd']['cloudnative_deployment_units']:
vdu['id'] = vdu['id'] + '-' + vnf_id
self.services[serv_id]['function'].append(vnf_base)
# Add to correlation id to the ledger
self.services[serv_id]['original_corr_id'] = corr_id
# Add payload to the ledger
self.services[serv_id]['payload'] = payload
self.services[serv_id]['infrastructure'] = {}
# Create the service schedule
self.services[serv_id]['schedule'] = []
# Create the SSM dict if SSMs are defined in NSD
ssm_dict = tools.get_sm_from_descriptor(payload['NSD'])
self.services[serv_id]['ssm'] = ssm_dict
print(self.services[serv_id]['ssm'])
# Create counter for vnfs
self.services[serv_id]['vnfs_to_resp'] = 0
self.services[serv_id]['vims_to_resp'] = 0
# Create the chain pause and kill flag
self.services[serv_id]['pause_chain'] = False
self.services[serv_id]['kill_chain'] = False
# Create IP Mapping
self.services[serv_id]['ip_mapping'] = []
# Add ingress and egress fields
self.services[serv_id]['ingress'] = None
self.services[serv_id]['egress'] = None
if 'ingresses' in payload.keys():
if payload['ingresses']:
if payload['ingresses'] != '[]':
self.services[serv_id]['ingress'] = payload['ingresses']
if 'egresses' in payload.keys():
if payload['egresses']:
if payload['ingresses'] != '[]':
self.services[serv_id]['egress'] = payload['egresses']
# Add params
self.services[serv_id]['params'] = payload.get('params')
# Add user data to ledger
self.services[serv_id]['user_data'] = payload['user_data']
# Add user defined mapping input to ledger
self.services[serv_id]['input_mapping'] = {'vnfs':[], 'vls':[]}
int_map = self.services[serv_id]['input_mapping']
if 'mapping' in payload.keys():
ext_map = payload['mapping']
if 'network_functions' in ext_map.keys():
int_map['vnfs'] = ext_map['network_functions']
if 'virtual_links' in ext_map.keys():
int_map['vls'] = ext_map['virtual_links']
# Add keys to ledger
try:
keys = payload['user_data']['customer']['keys']
self.services[serv_id]['public_key'] = keys['public']
self.services[serv_id]['private_key'] = keys['private']
except:
msg = ": extracting keys failed " + str(payload['user_data'])
LOG.info("Service " + serv_id + msg)
self.services[serv_id]['public_key'] = None
self.services[serv_id]['private_key'] = None
LOG.info("Public key: " + str(self.services[serv_id]['public_key']))
# Add customer constraints to ledger
if 'policies' in payload['user_data']['customer'].keys():
policies = payload['user_data']['customer']['policies']
self.services[serv_id]['customer_policies'] = policies
else:
self.services[serv_id]['customer_policies'] = {}
# Add policy and sla id
self.services[serv_id]['sla_id'] = None
self.services[serv_id]['policy_id'] = None
customer = payload['user_data']['customer']
if 'sla_id' in customer.keys():
if customer['sla_id'] != '':
self.services[serv_id]['sla_id'] = customer['sla_id']
if 'policy_id' in customer.keys():
if customer['policy_id'] != '':
self.services[serv_id]['policy_id'] = customer['policy_id']
return serv_id
def recreate_ledger(self, corr_id, serv_id):
"""
This method recreates an entry in the ledger for a service
based on the service instance id.
:param corr_id: the correlation id of the received message
:param serv_id: the service instance id
"""
def request_returned_with_error(request, file_type):
code = str(request['error'])
err = str(request['content'])
msg = "Retrieving of " + file_type + ": " + code + " " + err
LOG.info("Service " + serv_id + ': ' + msg)
self.services[serv_id]['error'] = msg
# base of the ledger
self.services[serv_id] = {}
self.services[serv_id]['original_corr_id'] = corr_id
self.services[serv_id]['schedule'] = []
self.services[serv_id]['kill_chain'] = False
self.services[serv_id]['infrastructure'] = {}
self.services[serv_id]['vnfs_to_resp'] = 0
self.services[serv_id]['vims_to_resp'] = 0
self.services[serv_id]['pause_chain'] = False
self.services[serv_id]['error'] = None
self.services[serv_id]['ip_mapping'] = []
self.services[serv_id]['ingress'] = None
self.services[serv_id]['egress'] = None
self.services[serv_id]['public_key'] = None
self.services[serv_id]['private_key'] = None
self.services[serv_id]['cnf'] = False
self.services[serv_id]['user_data'] = {}
self.services[serv_id]['user_data']['customer'] = {}
self.services[serv_id]['user_data']['developer'] = {}
self.services[serv_id]['user_data']['customer']['email'] = None
self.services[serv_id]['user_data']['customer']['phone'] = None
self.services[serv_id]['user_data']['developer']['email'] = None
self.services[serv_id]['user_data']['developer']['phone'] = None
self.services[serv_id]['customer_policies'] = {}
self.services[serv_id]['input_mapping'] = {'vnfs':[], 'vls':[]}
# Retrieve the service record based on the service instance id
base = t.nsr_path + "/"
LOG.info("Requesting NSR for: " + str(base) + str(serv_id))
request = tools.getRestData(base, serv_id)
if request['error'] is not None:
request_returned_with_error(request, 'NSR')
return None
self.services[serv_id]['nsr'] = request['content']
del self.services[serv_id]['nsr']["uuid"]
del self.services[serv_id]['nsr']["updated_at"]
del self.services[serv_id]['nsr']["created_at"]
LOG.info("Service " + serv_id + ": Recreating ledger: NSR retrieved.")
# Retrieve the NSD
nsr = self.services[serv_id]['nsr']
self.services[serv_id]['nsr']['id'] = serv_id
nsd_uuid = nsr['descriptor_reference']
head = {'content-type': 'application/x-yaml'}
LOG.info("Request NSD for: " + str(t.nsd_path + '/') + str(nsd_uuid))
request = tools.getRestData(t.nsd_path + '/', nsd_uuid, header=head)
if request['error'] is not None:
request_returned_with_error(request, 'NSD')
return None
self.services[serv_id]['nsd'] = request['content']['nsd']
self.services[serv_id]['nsd']['uuid'] = nsd_uuid
LOG.info("Service " + serv_id + ": Recreating ledger: NSD retrieved.")
nsd = self.services[serv_id]['nsd']
# adjust for flavour
self.services[serv_id]['flavour'] = nsr.get('flavour')
if self.services[serv_id]['flavour']:
flavour_dict = {}
for flavour in nsd['deployment_flavours']:
if flavour['name'] == self.services[serv_id]['flavour']:
flavour_dict = flavour
break
for key in flavour_dict.keys():
if key != 'name':
nsd[key] = flavour_dict[key]
if 'deployment_flavours' in nsd:
del nsd['deployment_flavours']
# Retrieve the function records based on the service record
self.services[serv_id]['function'] = []
nsr = self.services[serv_id]['nsr']
for vnf in nsr['network_functions']:
base = t.vnfr_path + "/"
request = tools.getRestData(base, vnf['vnfr_id'])
if request['error'] is not None:
request_returned_with_error(request, 'VNFR')
return None
if 'virtual_deployment_units' in request['content'].keys():
vdu = request['content']['virtual_deployment_units'][0]
vim_id = vdu['vnfc_instance'][0]['vim_id']
if 'cloudnative_deployment_units' in request['content'].keys():
self.services[serv_id]['cnf'] = True
vdu = request['content']['cloudnative_deployment_units'][0]
vim_id = vdu['vim_id']
new_function = {'id': vnf['vnfr_id'],
'start': {'trigger': False, 'payload': {}},
'stop': {'trigger': True, 'payload': {}},
'configure': {'trigger': True, 'payload': {}},
'scale': {'trigger': True, 'payload': {}},
'vnfr': request['content'],
'vim_uuid': vim_id,
'flavour': request['content'].get('flavour')}
del new_function['vnfr']['updated_at']
del new_function['vnfr']['created_at']
del new_function['vnfr']['uuid']
new_function['vnfr']['id'] = vnf['vnfr_id']
self.services[serv_id]['function'].append(new_function)
msg = ": Recreating ledger: VNFR retrieved."
LOG.info("Service " + serv_id + msg)
# Retrieve the VNFDS based on the function records
for vnf in self.services[serv_id]['function']:
vnfd_id = vnf['vnfr']['descriptor_reference']
req = tools.getRestData(t.vnfd_path + '/', vnfd_id, header=head)
if req['error'] is not None:
request_returned_with_error(req, 'VNFD')
return None
vnf['vnfd'] = req['content']['vnfd']
vnf['vnfd']['uuid'] = vnfd_id
if 'virtual_deployment_units' in vnf['vnfd'].keys():
for vdu in vnf['vnfd']['virtual_deployment_units']:
vdu['id'] = vdu['id'] + '-' + vnf['id']
if 'cloudnative_deployment_units' in vnf['vnfd'].keys():
for vdu in vnf['vnfd']['cloudnative_deployment_units']:
vdu['id'] = vdu['id'] + '-' + vnf['id']
LOG.info("Service " + serv_id + ": Recreate: VNFD retrieved.")
if vnf['flavour']:
flavour_dict = {}
for flavour in vnf['vnfd']['deployment_flavours']:
if flavour['name'] == vnf['flavour']:
flavour_dict = flavour
break
for key in flavour_dict.keys():
if key != 'name':
vnf['vnfd'][key] = flavour_dict[key]
LOG.info("Serice " +
serv_id + ": Recreating ledger: VNFDs retrieved.")
# Retrieve the params
self.services[serv_id]['params'] = nsr.get('params')
# Retrieve the deployed SSMs based on the NSD
nsd = self.services[serv_id]['nsd']
ssm_dict = tools.get_sm_from_descriptor(nsd)
self.services[serv_id]['ssm'] = ssm_dict
# Recreate connection | |
import re
import os
import numpy as np
import tensorflow as tf
import json
from tqdm import *
from math import sqrt
from PIL import Image
from time import sleep
from keras import backend as K
from keras.preprocessing.image import Iterator
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.generic_utils import Progbar
from keras.models import model_from_json
import img_utils
def fit_flow_from_directory(config, fit_sample_size, directory, max_samples,
target_size=None, color_mode='grayscale',
batch_size=32, shuffle=True, seed=None,
follow_links=False, nb_windows=25,
sample_shape=(255, 340, 1)):
stats_file_path = "statistics.txt"
drone_data_gen = DroneDataGenerator()
fit_drone_data_gen = DroneDataGenerator(**config)
print("[*] Generating statistically representative samples...")
batches = drone_data_gen.flow_from_directory(directory, max_samples, target_size,
color_mode, batch_size, shuffle,
seed, follow_links, nb_windows)
print("[*] Fitting the generator on the samples...")
for i in tqdm(range(int(batches.samples/batch_size))):
imgs, labels = next(batches)
if fit_sample_size < 1:
index = np.random.choice(imgs.shape[0], int(batch_size*fit_sample_size),
replace=False)
fit_drone_data_gen.fit(imgs[index])
else:
fit_drone_data_gen.fit(imgs[:])
del drone_data_gen
del batches
print("[*] Done! Mean: {} - STD: {}".format(fit_drone_data_gen.mean,
fit_drone_data_gen.std))
with open(os.path.join(directory, stats_file_path), "w") as stats_file:
stats_file.write("mean: {}".format(fit_drone_data_gen.mean))
stats_file.write("std: {}".format(fit_drone_data_gen.std))
print("[*] Saved mean and std as {}".format(stats_file_path))
return fit_drone_data_gen.flow_from_directory(directory, max_samples,
target_size, color_mode,
batch_size, shuffle, seed,
follow_links, nb_windows), fit_drone_data_gen.mean, fit_drone_data_gen.std
class DroneDataGenerator(ImageDataGenerator):
"""
Generate minibatches of images and labels with real-time augmentation.
The only function that changes w.r.t. parent class is the flow that
generates data. This function needed in fact adaptation for different
directory structure and labels. All the remaining functions remain
unchanged.
For an example usage, see the evaluate.py script
"""
def __init__(self, *args, **kwargs):
if 'channel_shift_range' in kwargs:
self.channelShiftFactor = kwargs['channel_shift_range']
del kwargs['channel_shift_range']
else:
self.channelShiftFactor = 0
if 'shading_factor' in kwargs:
self.shadingFactor = kwargs['shading_factor']
del kwargs['shading_factor']
else:
self.shadingFactor = 0
if 'salt_and_pepper_factor' in kwargs:
self.saltAndPepperFactor = kwargs['salt_and_pepper_factor']
del kwargs['salt_and_pepper_factor']
else:
self.saltAndPepperFactor = 0
super(DroneDataGenerator, self).__init__(*args, **kwargs)
def flow_from_directory(self, directory, max_samples, target_size=None,
color_mode='grayscale', batch_size=32,
shuffle=True, seed=None, follow_links=False, nb_windows=25,
mean=None, std=None):
return DroneDirectoryIterator(
directory, max_samples, self,
target_size=target_size, color_mode=color_mode,
batch_size=batch_size, shuffle=shuffle, seed=seed,
follow_links=follow_links, nb_windows=nb_windows, mean=None,
std=None)
class DroneDirectoryIterator(Iterator):
'''
Class for managing data loading of images and labels.
The assumed folder structure is:
root_folder/
dataset_1/
images/
img_01.png
...
annotations.csv
dataset_2/
images/
img_01.png
...
annotations.csv
...
# Arguments
directory: Path to the root directory to read data from.
image_data_generator: Image Generator.
target_size: tuple of integers, dimensions to resize input images to.
crop_size: tuple of integers, dimensions to crop input images.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
batch_size: The desired batch size
shuffle: Whether to shuffle data or not
seed : numpy seed to shuffle data
follow_links: Bool, whether to follow symbolic links or not
# TODO: Add functionality to save images to have a look at the augmentation
'''
def __init__(self, directory, max_samples, image_data_generator,
target_size=None, color_mode='grayscale',
batch_size=32, shuffle=True, seed=None, follow_links=False,
nb_windows=25, mean=None, std=None):
self.samples = 0
self.max_samples = max_samples
self.formats = {'png', 'jpg'}
self.directory = directory
self.image_data_generator = image_data_generator
# self.target_size = tuple(target_size)
self.nb_windows = nb_windows
self.follow_links = follow_links
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
if self.color_mode == 'rgb':
self.image_shape = target_size + (3,)
else:
self.image_shape = target_size + (1,)
# Idea = associate each filename with a corresponding steering or label
self.filenames = []
self.ground_truth_loc = dict()
self.gt_coord = dict()
self.ground_truth_rot = []
# For featurewise standardization
self.mean = mean
self.std = std
self.saved_transforms = 0
if self.image_data_generator.channelShiftFactor > 0:
print("[*] Saving transformed images to {}".format(os.path.join(self.directory,
"img_transforms")))
if not os.path.isdir(os.path.join(self.directory, "img_transforms")):
os.mkdir(os.path.join(self.directory, "img_transforms"))
self._walk_dir(directory)
# Conversion of list into array
# self.ground_truth_loc = np.array(self.ground_truth_loc, dtype = K.floatx())
self.ground_truth_rot = np.array(self.ground_truth_rot, dtype = K.floatx())
assert self.samples > 0, "Empty dataset!"
super(DroneDirectoryIterator, self).__init__(self.samples,
batch_size, shuffle, seed)
def _walk_dir(self, path):
for root, dirs, files in os.walk(path):
if "annotations.csv" in files:
sub_dirs = os.path.relpath(root, path).split('/')
sub_dirs = ''.join(sub_dirs)
self._parse_dir(root, sub_dirs)
def _parse_dir(self, path, sub_dirs):
annotations_path = os.path.join(path, "annotations.csv")
images_path = os.path.join(path, "images")
rot_annotations = []
with open(annotations_path, 'r') as annotations_file:
annotations_file.readline() # Skip the header
for line in annotations_file:
line = line.split(',')
frame_no = int(line[0].split('.')[0])
key = "{}_{}".format(sub_dirs, frame_no)
gate_center = [int(line[1]), int(line[2])]
on_screen = (gate_center[0] >= 0 and gate_center[0] <=
self.image_shape[0]) and (gate_center[1] >= 0 and
gate_center[1] <=
self.image_shape[1])
self.ground_truth_loc[key] =\
self._compute_location_labels(line[1:3], on_screen)
# self._compute_location_labels(line[1:3], bool(int(float(line[-1]))))
self.gt_coord[key] = "{}x{}".format(line[1], line[2])
rot_annotations.append(line[3])
if len(self.ground_truth_loc) == 0 or len(rot_annotations) == 0:
print("[!] Annotations could not be loaded!")
raise Exception("Annotations not found")
n = 0
for filename in sorted(os.listdir(images_path)):
if self.max_samples and n == self.max_samples:
break
is_valid = False
for extension in self.formats:
if filename.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.filenames.append(os.path.relpath(os.path.join(images_path,
filename),
self.directory))
self.samples += 1
n += 1
def _compute_location_labels(self, coordinates, visible):
'''
Computes the gate location window from the given pixel coordinates, and
returns a list of binary labels corresponding to the N + 1 windows (+1
because a special window is defined for the case where the gate is not
visible).
'''
# TODO: Use keras.utils.to_categorical(y, num_classes=None, dtype='float32')
# which does this automatically!
sqrt_win = sqrt(self.nb_windows)
windows_width = [int(i * self.image_shape[0] / sqrt_win)
for i in range(1, int(sqrt_win) + 1)]
windows_height = [int(i * self.image_shape[1] / sqrt_win)
for i in range(1, int(sqrt_win) + 1)]
i, j = 0, 0
if not visible:
labels = [0 for i in range(self.nb_windows + 1)]
labels[0] = 1
return labels
for index, window_i in enumerate(windows_width):
if int(float(coordinates[0])) < window_i:
i = index + 1 # Start at 1
break
for index, window_h in enumerate(windows_height):
if int(float(coordinates[1])) < window_h:
j = index + 1 # Start at 1
break
labels = [0 for i in range(self.nb_windows + 1)]
labels[int(i + ((j-1)*sqrt_win))] = 1
return labels
def next(self):
"""
Public function to fetch next batch.
# Returns
The next batch of images and labels.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
# TODO: Batch orientation
def _get_batches_of_transformed_samples(self, index_array) :
current_batch_size = index_array.shape[0]
# Image transformation is not under thread lock, so it can be done in
# parallel
batch_x = np.zeros((current_batch_size,) + (self.image_shape[1],
self.image_shape[0],
self.image_shape[2]),
dtype=K.floatx())
batch_localization = np.zeros((current_batch_size, self.nb_windows + 1,),
dtype=K.floatx())
batch_orientation = np.zeros((current_batch_size, 2,),
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# Build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
x = img_utils.load_img(os.path.join(self.directory, fname),
grayscale=grayscale)
# 50% chances of transforming the image
shifting = np.random.rand() <= 0.5
if shifting and self.image_data_generator.channelShiftFactor > 0:
shifted_x = img_utils.random_channel_shift(x, self.image_data_generator.channelShiftFactor)
else:
shifted_x = x
shading = np.random.rand() <= 0.5
if shading and self.image_data_generator.shadingFactor > 0:
shaded_x = img_utils.add_shade(shifted_x,
weight=self.image_data_generator.shadingFactor)
else:
shaded_x = shifted_x
salting = np.random.rand() <= 0.5
if salting and self.image_data_generator.saltAndPepperFactor > 0:
salted_x = img_utils.add_salt_and_pepper(shaded_x,
amount=self.image_data_generator.saltAndPepperFactor)
else:
salted_x = shaded_x
if shifting and self.image_data_generator.channelShiftFactor > 0 and self.saved_transforms < 50:
Image.fromarray(x.astype(np.uint8), "RGB").save(os.path.join(self.directory,
"img_transforms",
"original_{}.jpg".format(self.saved_transforms)))
Image.fromarray(shifted_x.astype(np.uint8), "RGB").save(os.path.join(self.directory,
"img_transforms",
"transformed{}.jpg".format(self.saved_transforms)))
self.saved_transforms += 1
self.image_data_generator.standardize(salted_x)
batch_x[i] = salted_x
# Build batch of localization and orientation data
# Get rid of the filename and images/ folder
sub_dirs_str = os.path.split(os.path.split(fname)[0])[0]
sub_dirs_str = sub_dirs_str.replace('/', '')
frame_no = int(os.path.split(fname)[-1].split('.')[0])
key = "{}_{}".format(sub_dirs_str, frame_no)
# batch_localization[i, 0] = 1.0
if key in self.ground_truth_loc:
batch_localization[i, :] = self.ground_truth_loc[key]
else:
batch_localization[i, 0] = 0
batch_orientation[i, 0] = 0.0
# batch_orientation[i, 1] = self.ground_truth_rot[fname]
if self.mean and self.std:
batch_x = (batch_x - self.mean)/(self.std + 1e-6) # Epsilum
batch_y = batch_localization # TODO: add batch_orientation
return batch_x, batch_y
def compute_predictions_and_gt(model, generator, steps,
max_q_size=10,
pickle_safe=False, verbose=0):
"""
Generate predictions and associated ground truth
for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Function adapted from keras `predict_generator`.
# Arguments
generator: Generator yielding batches of input samples.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_q_size: Maximum size for the generator queue.
pickle_safe: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
# Returns
Numpy array(s) of predictions and associated ground truth.
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
| |
result = sot._prepare_request(requires_id=False, prepend_key=True)
self.assertEqual("/something", result.url)
self.assertEqual({key: {"x": body_value}}, result.body)
self.assertEqual({"y": header_value}, result.headers)
def test__prepare_request_with_patch(self):
class Test(resource.Resource):
commit_jsonpatch = True
base_path = "/something"
x = resource.Body("x")
y = resource.Body("y")
the_id = "id"
sot = Test.existing(id=the_id, x=1, y=2)
sot.x = 3
result = sot._prepare_request(requires_id=True, patch=True)
self.assertEqual("something/id", result.url)
self.assertEqual([{'op': 'replace', 'path': '/x', 'value': 3}],
result.body)
def test__prepare_request_with_patch_not_synchronized(self):
class Test(resource.Resource):
commit_jsonpatch = True
base_path = "/something"
x = resource.Body("x")
y = resource.Body("y")
the_id = "id"
sot = Test.new(id=the_id, x=1)
result = sot._prepare_request(requires_id=True, patch=True)
self.assertEqual("something/id", result.url)
self.assertEqual([{'op': 'add', 'path': '/x', 'value': 1}],
result.body)
def test__prepare_request_with_patch_params(self):
class Test(resource.Resource):
commit_jsonpatch = True
base_path = "/something"
x = resource.Body("x")
y = resource.Body("y")
the_id = "id"
sot = Test.existing(id=the_id, x=1, y=2)
sot.x = 3
params = [('foo', 'bar'),
('life', 42)]
result = sot._prepare_request(requires_id=True, patch=True,
params=params)
self.assertEqual("something/id?foo=bar&life=42", result.url)
self.assertEqual([{'op': 'replace', 'path': '/x', 'value': 3}],
result.body)
def test__translate_response_no_body(self):
class Test(resource.Resource):
attr = resource.Header("attr")
response = FakeResponse({}, headers={"attr": "value"})
sot = Test()
sot._translate_response(response, has_body=False)
self.assertEqual(dict(), sot._header.dirty)
self.assertEqual("value", sot.attr)
def test__translate_response_with_body_no_resource_key(self):
class Test(resource.Resource):
attr = resource.Body("attr")
body = {"attr": "value"}
response = FakeResponse(body)
sot = Test()
sot._filter_component = mock.Mock(side_effect=[body, dict()])
sot._translate_response(response, has_body=True)
self.assertEqual("value", sot.attr)
self.assertEqual(dict(), sot._body.dirty)
self.assertEqual(dict(), sot._header.dirty)
def test__translate_response_with_body_with_resource_key(self):
key = "key"
class Test(resource.Resource):
resource_key = key
attr = resource.Body("attr")
body = {"attr": "value"}
response = FakeResponse({key: body})
sot = Test()
sot._filter_component = mock.Mock(side_effect=[body, dict()])
sot._translate_response(response, has_body=True)
self.assertEqual("value", sot.attr)
self.assertEqual(dict(), sot._body.dirty)
self.assertEqual(dict(), sot._header.dirty)
def test_cant_do_anything(self):
class Test(resource.Resource):
allow_create = False
allow_fetch = False
allow_commit = False
allow_delete = False
allow_head = False
allow_list = False
sot = Test()
# The first argument to all of these operations is the session,
# but we raise before we get to it so just pass anything in.
self.assertRaises(exceptions.MethodNotSupported, sot.create, "")
self.assertRaises(exceptions.MethodNotSupported, sot.fetch, "")
self.assertRaises(exceptions.MethodNotSupported, sot.delete, "")
self.assertRaises(exceptions.MethodNotSupported, sot.head, "")
# list is a generator so you need to begin consuming
# it in order to exercise the failure.
the_list = sot.list("")
self.assertRaises(exceptions.MethodNotSupported, next, the_list)
# Update checks the dirty list first before even trying to see
# if the call can be made, so fake a dirty list.
sot._body = mock.Mock()
sot._body.dirty = mock.Mock(return_value={"x": "y"})
self.assertRaises(exceptions.MethodNotSupported, sot.commit, "")
def test_unknown_attrs_under_props_create(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
sot = Test.new(**{
'dummy': 'value',
})
self.assertDictEqual({'dummy': 'value'}, sot.properties)
self.assertDictEqual(
{'dummy': 'value'}, sot.to_dict()['properties']
)
self.assertDictEqual(
{'dummy': 'value'}, sot['properties']
)
self.assertEqual('value', sot['properties']['dummy'])
sot = Test.new(**{
'dummy': 'value',
'properties': 'a,b,c'
})
self.assertDictEqual(
{'dummy': 'value', 'properties': 'a,b,c'},
sot.properties
)
self.assertDictEqual(
{'dummy': 'value', 'properties': 'a,b,c'},
sot.to_dict()['properties']
)
sot = Test.new(**{'properties': None})
self.assertIsNone(sot.properties)
self.assertIsNone(sot.to_dict()['properties'])
def test_unknown_attrs_not_stored(self):
class Test(resource.Resource):
properties = resource.Body("properties")
sot = Test.new(**{
'dummy': 'value',
})
self.assertIsNone(sot.properties)
def test_unknown_attrs_not_stored1(self):
class Test(resource.Resource):
_store_unknown_attrs_as_properties = True
sot = Test.new(**{
'dummy': 'value',
})
self.assertRaises(KeyError, sot.__getitem__, 'properties')
def test_unknown_attrs_under_props_set(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
sot = Test.new(**{
'dummy': 'value',
})
sot['properties'] = {'dummy': 'new_value'}
self.assertEqual('new_value', sot['properties']['dummy'])
sot.properties = {'dummy': 'new_value1'}
self.assertEqual('new_value1', sot['properties']['dummy'])
def test_unknown_attrs_prepare_request_unpacked(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
# Unknown attribute given as root attribute
sot = Test.new(**{
'dummy': 'value',
'properties': 'a,b,c'
})
request_body = sot._prepare_request(requires_id=False).body
self.assertEqual('value', request_body['dummy'])
self.assertEqual('a,b,c', request_body['properties'])
# properties are already a dict
sot = Test.new(**{
'properties': {
'properties': 'a,b,c',
'dummy': 'value'
}
})
request_body = sot._prepare_request(requires_id=False).body
self.assertEqual('value', request_body['dummy'])
self.assertEqual('a,b,c', request_body['properties'])
def test_unknown_attrs_prepare_request_no_unpack_dict(self):
# if props type is not None - ensure no unpacking is done
class Test(resource.Resource):
properties = resource.Body("properties", type=dict)
sot = Test.new(**{
'properties': {
'properties': 'a,b,c',
'dummy': 'value'
}
})
request_body = sot._prepare_request(requires_id=False).body
self.assertDictEqual(
{'dummy': 'value', 'properties': 'a,b,c'},
request_body['properties'])
def test_unknown_attrs_prepare_request_patch_unpacked(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
commit_jsonpatch = True
sot = Test.existing(**{
'dummy': 'value',
'properties': 'a,b,c'
})
sot._update(**{'properties': {'dummy': 'new_value'}})
request_body = sot._prepare_request(requires_id=False, patch=True).body
self.assertDictEqual(
{
u'path': u'/dummy',
u'value': u'new_value',
u'op': u'replace'
},
request_body[0])
def test_unknown_attrs_under_props_translate_response(self):
class Test(resource.Resource):
properties = resource.Body("properties")
_store_unknown_attrs_as_properties = True
body = {'dummy': 'value', 'properties': 'a,b,c'}
response = FakeResponse(body)
sot = Test()
sot._translate_response(response, has_body=True)
self.assertDictEqual(
{'dummy': 'value', 'properties': 'a,b,c'},
sot.properties
)
class TestResourceActions(base.TestCase):
def setUp(self):
super(TestResourceActions, self).setUp()
self.service_name = "service"
self.base_path = "base_path"
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
resources_key = 'resources'
allow_create = True
allow_fetch = True
allow_head = True
allow_commit = True
allow_delete = True
allow_list = True
self.test_class = Test
self.request = mock.Mock(spec=resource._Request)
self.request.url = "uri"
self.request.body = "body"
self.request.headers = "headers"
self.response = FakeResponse({})
self.sot = Test(id="id")
self.sot._prepare_request = mock.Mock(return_value=self.request)
self.sot._translate_response = mock.Mock()
self.session = mock.Mock(spec=adapter.Adapter)
self.session.create = mock.Mock(return_value=self.response)
self.session.get = mock.Mock(return_value=self.response)
self.session.put = mock.Mock(return_value=self.response)
self.session.patch = mock.Mock(return_value=self.response)
self.session.post = mock.Mock(return_value=self.response)
self.session.delete = mock.Mock(return_value=self.response)
self.session.head = mock.Mock(return_value=self.response)
self.session.session = self.session
self.session._get_connection = mock.Mock(return_value=self.cloud)
self.session.default_microversion = None
self.session.retriable_status_codes = None
self.endpoint_data = mock.Mock(max_microversion='1.99',
min_microversion=None)
self.session.get_endpoint_data.return_value = self.endpoint_data
def _test_create(self, cls, requires_id=False, prepend_key=False,
microversion=None, base_path=None, params=None,
id_marked_dirty=True):
id = "id" if requires_id else None
sot = cls(id=id)
sot._prepare_request = mock.Mock(return_value=self.request)
sot._translate_response = mock.Mock()
params = params or {}
result = sot.create(self.session, prepend_key=prepend_key,
base_path=base_path, **params)
id_is_dirty = ('id' in sot._body._dirty)
self.assertEqual(id_marked_dirty, id_is_dirty)
sot._prepare_request.assert_called_once_with(
requires_id=requires_id, prepend_key=prepend_key,
base_path=base_path)
if requires_id:
self.session.put.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, params=params)
else:
self.session.post.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, params=params)
self.assertEqual(sot.microversion, microversion)
sot._translate_response.assert_called_once_with(self.response,
has_body=sot.has_body)
self.assertEqual(result, sot)
def test_put_create(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'PUT'
self._test_create(Test, requires_id=True, prepend_key=True)
def test_put_create_exclude_id(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'PUT'
create_exclude_id_from_body = True
self._test_create(Test, requires_id=True, prepend_key=True,
id_marked_dirty=False)
def test_put_create_with_microversion(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'PUT'
_max_microversion = '1.42'
self._test_create(Test, requires_id=True, prepend_key=True,
microversion='1.42')
def test_put_create_with_params(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'PUT'
self._test_create(Test, requires_id=True, prepend_key=True,
params={'answer': 42})
def test_post_create(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'POST'
self._test_create(Test, requires_id=False, prepend_key=True)
def test_post_create_base_path(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'POST'
self._test_create(Test, requires_id=False, prepend_key=True,
base_path='dummy')
def test_post_create_with_params(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_create = True
create_method = 'POST'
self._test_create(Test, requires_id=False, prepend_key=True,
params={'answer': 42})
def test_fetch(self):
result = self.sot.fetch(self.session)
self.sot._prepare_request.assert_called_once_with(
requires_id=True, base_path=None)
self.session.get.assert_called_once_with(
self.request.url, microversion=None, params={})
self.assertIsNone(self.sot.microversion)
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_fetch_with_params(self):
result = self.sot.fetch(self.session, fields='a,b')
self.sot._prepare_request.assert_called_once_with(
requires_id=True, base_path=None)
self.session.get.assert_called_once_with(
self.request.url, microversion=None, params={'fields': 'a,b'})
self.assertIsNone(self.sot.microversion)
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_fetch_with_microversion(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_fetch = True
_max_microversion = '1.42'
sot = Test(id='id')
sot._prepare_request = mock.Mock(return_value=self.request)
sot._translate_response = mock.Mock()
result = sot.fetch(self.session)
sot._prepare_request.assert_called_once_with(
requires_id=True, base_path=None)
self.session.get.assert_called_once_with(
self.request.url, microversion='1.42', params={})
self.assertEqual(sot.microversion, '1.42')
sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, sot)
def test_fetch_not_requires_id(self):
result = self.sot.fetch(self.session, False)
self.sot._prepare_request.assert_called_once_with(
requires_id=False, base_path=None)
self.session.get.assert_called_once_with(
self.request.url, microversion=None, params={})
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_fetch_base_path(self):
result = self.sot.fetch(self.session, False, base_path='dummy')
self.sot._prepare_request.assert_called_once_with(
requires_id=False,
base_path='dummy')
self.session.get.assert_called_once_with(
self.request.url, microversion=None, params={})
self.sot._translate_response.assert_called_once_with(self.response)
self.assertEqual(result, self.sot)
def test_head(self):
result = self.sot.head(self.session)
self.sot._prepare_request.assert_called_once_with(base_path=None)
self.session.head.assert_called_once_with(
self.request.url,
microversion=None)
self.assertIsNone(self.sot.microversion)
self.sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, self.sot)
def test_head_base_path(self):
result = self.sot.head(self.session, base_path='dummy')
self.sot._prepare_request.assert_called_once_with(base_path='dummy')
self.session.head.assert_called_once_with(
self.request.url,
microversion=None)
self.assertIsNone(self.sot.microversion)
self.sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, self.sot)
def test_head_with_microversion(self):
class Test(resource.Resource):
service = self.service_name
base_path = self.base_path
allow_head = True
_max_microversion = '1.42'
sot = Test(id='id')
sot._prepare_request = mock.Mock(return_value=self.request)
sot._translate_response = mock.Mock()
result = sot.head(self.session)
sot._prepare_request.assert_called_once_with(base_path=None)
self.session.head.assert_called_once_with(
self.request.url,
microversion='1.42')
self.assertEqual(sot.microversion, '1.42')
sot._translate_response.assert_called_once_with(
self.response, has_body=False)
self.assertEqual(result, sot)
def _test_commit(self, commit_method='PUT', prepend_key=True,
has_body=True, microversion=None,
commit_args=None, expected_args=None, base_path=None):
self.sot.commit_method = commit_method
# Need to make sot look dirty so we can attempt an update
self.sot._body = mock.Mock()
self.sot._body.dirty = mock.Mock(return_value={"x": "y"})
self.sot.commit(self.session, prepend_key=prepend_key,
has_body=has_body, base_path=base_path,
**(commit_args or {}))
self.sot._prepare_request.assert_called_once_with(
prepend_key=prepend_key, base_path=base_path)
if commit_method == 'PATCH':
self.session.patch.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, **(expected_args or {}))
elif commit_method == 'POST':
self.session.post.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, **(expected_args or {}))
elif commit_method == 'PUT':
self.session.put.assert_called_once_with(
self.request.url,
json=self.request.body, headers=self.request.headers,
microversion=microversion, **(expected_args or {}))
self.assertEqual(self.sot.microversion, microversion)
self.sot._translate_response.assert_called_once_with(
self.response, has_body=has_body)
def test_commit_put(self):
self._test_commit(commit_method='PUT', prepend_key=True, has_body=True)
def test_commit_patch(self):
self._test_commit(
commit_method='PATCH', prepend_key=False, has_body=False)
def test_commit_base_path(self):
self._test_commit(commit_method='PUT', prepend_key=True, has_body=True,
base_path='dummy')
def test_commit_patch_retry_on_conflict(self):
self._test_commit(
commit_method='PATCH',
commit_args={'retry_on_conflict': True},
expected_args={'retriable_status_codes': {409}})
def test_commit_put_retry_on_conflict(self):
self._test_commit(
commit_method='PUT',
commit_args={'retry_on_conflict': True},
expected_args={'retriable_status_codes': {409}})
def test_commit_patch_no_retry_on_conflict(self):
self.session.retriable_status_codes = {409, 503}
self._test_commit(
commit_method='PATCH',
commit_args={'retry_on_conflict': False},
expected_args={'retriable_status_codes': {503}})
def test_commit_put_no_retry_on_conflict(self):
self.session.retriable_status_codes = {409, 503}
self._test_commit(
commit_method='PATCH',
commit_args={'retry_on_conflict': False},
expected_args={'retriable_status_codes': {503}})
def test_commit_not_dirty(self):
self.sot._body = mock.Mock()
self.sot._body.dirty = dict()
self.sot._header = mock.Mock()
self.sot._header.dirty | |
1 / Fs)
if len_x is not None:
x = x[:len_x]
# get the stimulus frequencies, defaulting to None
fstims = [Fs / fstim for fstim in fstims]
# get the constants, default to calculated values
if NFFT_density is None:
NFFT_density_real = 256
elif NFFT_density < 0:
NFFT_density_real = NFFT_density = 100
else:
NFFT_density_real = NFFT_density
if nover_density is None:
nover_density_real = 0
elif nover_density < 0:
nover_density_real = nover_density = NFFT_density_real // 2
else:
nover_density_real = nover_density
if pad_to_density is None:
pad_to_density_real = NFFT_density_real
elif pad_to_density < 0:
pad_to_density = int(2**np.ceil(np.log2(NFFT_density_real)))
pad_to_density_real = pad_to_density
else:
pad_to_density_real = pad_to_density
if pad_to_spectrum is None:
pad_to_spectrum_real = len(x)
elif pad_to_spectrum < 0:
pad_to_spectrum_real = pad_to_spectrum = len(x)
else:
pad_to_spectrum_real = pad_to_spectrum
if pad_to_spectrum is None:
NFFT_spectrum_real = NFFT_spectrum = pad_to_spectrum_real
else:
NFFT_spectrum_real = NFFT_spectrum = len(x)
nover_spectrum = 0
NFFT_specgram = NFFT_density
nover_specgram = nover_density
pad_to_specgram = pad_to_density
NFFT_specgram_real = NFFT_density_real
nover_specgram_real = nover_density_real
if sides == 'onesided' or (sides == 'default' and not iscomplex):
# frequencies for specgram, psd, and csd
# need to handle even and odd differently
if pad_to_density_real % 2:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real,
endpoint=False)[::2]
else:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real // 2 + 1)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)[::2]
else:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real // 2 + 1)
else:
# frequencies for specgram, psd, and csd
# need to handle even and odd differentl
if pad_to_density_real % 2:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_density_real,
endpoint=False)[1::2]
else:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_density_real,
endpoint=False)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_spectrum_real,
endpoint=False)[1::2]
else:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)
freqs_specgram = freqs_density
# time points for specgram
t_start = NFFT_specgram_real // 2
t_stop = len(x) - NFFT_specgram_real // 2 + 1
t_step = NFFT_specgram_real - nover_specgram_real
t_specgram = x[t_start:t_stop:t_step]
if NFFT_specgram_real % 2:
t_specgram += 1 / Fs / 2
if len(t_specgram) == 0:
t_specgram = np.array([NFFT_specgram_real / (2 * Fs)])
t_spectrum = np.array([NFFT_spectrum_real / (2 * Fs)])
t_density = t_specgram
y = np.zeros_like(x)
for i, fstim in enumerate(fstims):
y += np.sin(fstim * x * np.pi * 2) * 10**i
if iscomplex:
y = y.astype('complex')
# Interestingly, the instance on which this fixture is called is not
# the same as the one on which a test is run. So we need to modify the
# class itself when using a class-scoped fixture.
cls = request.cls
cls.Fs = Fs
cls.sides = sides
cls.fstims = fstims
cls.NFFT_density = NFFT_density
cls.nover_density = nover_density
cls.pad_to_density = pad_to_density
cls.NFFT_spectrum = NFFT_spectrum
cls.nover_spectrum = nover_spectrum
cls.pad_to_spectrum = pad_to_spectrum
cls.NFFT_specgram = NFFT_specgram
cls.nover_specgram = nover_specgram
cls.pad_to_specgram = pad_to_specgram
cls.t_specgram = t_specgram
cls.t_density = t_density
cls.t_spectrum = t_spectrum
cls.y = y
cls.freqs_density = freqs_density
cls.freqs_spectrum = freqs_spectrum
cls.freqs_specgram = freqs_specgram
cls.NFFT_density_real = NFFT_density_real
def check_freqs(self, vals, targfreqs, resfreqs, fstims):
assert resfreqs.argmin() == 0
assert resfreqs.argmax() == len(resfreqs)-1
assert_allclose(resfreqs, targfreqs, atol=1e-06)
for fstim in fstims:
i = np.abs(resfreqs - fstim).argmin()
assert vals[i] > vals[i+2]
assert vals[i] > vals[i-2]
def check_maxfreq(self, spec, fsp, fstims):
# skip the test if there are no frequencies
if len(fstims) == 0:
return
# if twosided, do the test for each side
if fsp.min() < 0:
fspa = np.abs(fsp)
zeroind = fspa.argmin()
self.check_maxfreq(spec[:zeroind], fspa[:zeroind], fstims)
self.check_maxfreq(spec[zeroind:], fspa[zeroind:], fstims)
return
fstimst = fstims[:]
spect = spec.copy()
# go through each peak and make sure it is correctly the maximum peak
while fstimst:
maxind = spect.argmax()
maxfreq = fsp[maxind]
assert_almost_equal(maxfreq, fstimst[-1])
del fstimst[-1]
spect[maxind-5:maxind+5] = 0
def test_spectral_helper_raises(self):
# We don't use parametrize here to handle ``y = self.y``.
for kwargs in [ # Various error conditions:
{"y": self.y+1, "mode": "complex"}, # Modes requiring ``x is y``.
{"y": self.y+1, "mode": "magnitude"},
{"y": self.y+1, "mode": "angle"},
{"y": self.y+1, "mode": "phase"},
{"mode": "spam"}, # Bad mode.
{"y": self.y, "sides": "eggs"}, # Bad sides.
{"y": self.y, "NFFT": 10, "noverlap": 20}, # noverlap > NFFT.
{"NFFT": 10, "noverlap": 10}, # noverlap == NFFT.
{"y": self.y, "NFFT": 10,
"window": np.ones(9)}, # len(win) != NFFT.
]:
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, **kwargs)
@pytest.mark.parametrize('mode', ['default', 'psd'])
def test_single_spectrum_helper_unsupported_modes(self, mode):
with pytest.raises(ValueError):
mlab._single_spectrum_helper(x=self.y, mode=mode)
@pytest.mark.parametrize("mode, case", [
("psd", "density"),
("magnitude", "specgram"),
("magnitude", "spectrum"),
])
def test_spectral_helper_psd(self, mode, case):
freqs = getattr(self, f"freqs_{case}")
spec, fsp, t = mlab._spectral_helper(
x=self.y, y=self.y,
NFFT=getattr(self, f"NFFT_{case}"),
Fs=self.Fs,
noverlap=getattr(self, f"nover_{case}"),
pad_to=getattr(self, f"pad_to_{case}"),
sides=self.sides,
mode=mode)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, getattr(self, f"t_{case}"), atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == getattr(self, f"t_{case}").shape[0]
def test_csd(self):
freqs = self.freqs_density
spec, fsp = mlab.csd(x=self.y, y=self.y+1,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_csd_padding(self):
"""Test zero padding of csd()."""
if self.NFFT_density is None: # for derived classes
return
sargs = dict(x=self.y, y=self.y+1, Fs=self.Fs, window=mlab.window_none,
sides=self.sides)
spec0, _ = mlab.csd(NFFT=self.NFFT_density, **sargs)
spec1, _ = mlab.csd(NFFT=self.NFFT_density*2, **sargs)
assert_almost_equal(np.sum(np.conjugate(spec0)*spec0).real,
np.sum(np.conjugate(spec1/2)*spec1/2).real)
def test_psd(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert spec.shape == freqs.shape
self.check_freqs(spec, freqs, fsp, self.fstims)
@pytest.mark.parametrize(
'make_data, detrend',
[(np.zeros, mlab.detrend_mean), (np.zeros, 'mean'),
(np.arange, mlab.detrend_linear), (np.arange, 'linear')])
def test_psd_detrend(self, make_data, detrend):
if self.NFFT_density is None:
return
ydata = make_data(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=detrend)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=detrend)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning(self):
if self.NFFT_density is None:
return
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1, windowVals = _apply_window(ydata1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning_detrend_linear(self):
if self.NFFT_density is None:
return
ydata = np.arange(self.NFFT_density)
ycontrol = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = ycontrol
ycontrol2 = ycontrol
ycontrol1, windowVals = _apply_window(ycontrol1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ycontrol2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_windowarray(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=np.ones(self.NFFT_density_real))
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_psd_windowarray_scale_by_freq(self):
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning)
spec_s, fsp_s = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=True)
spec_n, fsp_n = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=False)
assert_array_equal(fsp, fsp_s)
assert_array_equal(fsp, fsp_n)
assert_array_equal(spec, spec_s)
assert_allclose(spec_s*(win**2).sum(),
spec_n/self.Fs*win.sum()**2,
atol=1e-08)
@pytest.mark.parametrize(
"kind", ["complex", "magnitude", "angle", "phase"])
def test_spectrum(self, kind):
freqs = self.freqs_spectrum
spec, fsp = getattr(mlab, f"{kind}_spectrum")(
x=self.y,
Fs=self.Fs, sides=self.sides, pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
if kind == "magnitude":
self.check_maxfreq(spec, fsp, self.fstims)
self.check_freqs(spec, freqs, fsp, self.fstims)
@pytest.mark.parametrize(
'kwargs',
[{}, {'mode': 'default'}, {'mode': 'psd'}, {'mode': 'magnitude'},
{'mode': | |
<reponame>francaracuel/UGR-GII-CCIA-4-VC-Vision_por_computador-17-18-Practicas
# -*- coding: utf-8 -*-
"""
<NAME>
VC - Visión por Computador
4º - GII - CCIA - ETSIIT - UGR
Curso 2017/2018
"""
import cv2
import numpy as np
import math
import copy
from matplotlib import pyplot as plt
###############################################################################
# Configuración general
#
# Ruta hacia las imágenes
path = "imagenes/"
# Texto que se muestra para continuar ejecutando el fichero
continue_text = "Pulsa \"Enter\" para continuar..."
# Título para las imágenes
img_title = ""
# Número de columnas que tendrá la tabla con la salida de las imágenes
num_cols = 3
# Esquema de color que se utiliza por defecto para convertir las imágenes
cmap = cv2.COLOR_RGB2GRAY
# Esquema de color que se utiliza por defecto en plt.imshow()
plt.rcParams['image.cmap'] = 'gray'
#
################################################################################
################################################################################
# Utils
#
def set_c_map(imgs, cmap = cv2.COLOR_RGB2BGR):
"""
Asigna un esquema de color a todas las imágenes que se reciben.
Si se recibe una lista, las imágenes verán reflejadas su nuevo esquema de
color directamente.
Si se recibe una imagen, es necesario hacer una asignación con el resultado
de aplicar esta función.
------------
Para aplicar el color se puede utilizar:
- cv2.COLOR_RGB2BGR (color)
- cv2.COLOR_RGB2GRAY (escala de grises)
- ...
"""
# Se comprueba si el elemento que se recibe es una imagen o es una lista
# con imágenes.
if isinstance(imgs, list):
# Se guarda la longitud de la lista de imágenes
length = len(imgs)
# Es una lista de imágenes, por lo que se recorre cada una para cambiar
# el esquema de color
for i in range(length):
imgs[i] = cv2.cvtColor(imgs[i], cmap)
else:
# Si es una imagen se le aplica el esquema de color
imgs = cv2.cvtColor(imgs, cmap)
return imgs
def power_two(n):
"""
Calcula el logaritmo en base 2 de un número, truncando los decimales
"""
return int(math.log(n, 2))
def next_power_two(n):
"""
Calcula el siguiente número potencia de 2 de otro número recibido por
parámetro.
"""
return pow(2, power_two(n)+1)
#
################################################################################
################################################################################
# Apartado 1
#
########
# Sección A
#
def show_images(imgs, names = list(), cols = num_cols, title = ""):
"""
Dada una lista de imágenes (imgs) y una lista de nombres (names), muestra en
una tabla todas estas imágenes con su nombre correspondiente.
Por defecto, el número de columnas que se van a mostrar es 3.
Por defecto, el título que acompaña a cada imagen es "".
"""
# Se guarda la cantidad de imágenes que se van a mostrar
imgs_length = len(imgs)
# Si la lista está vacía, se crea una con el tamaño de imágenes y rellena de
# espacios en blanco para que no haya ningún error al mostrarlo
if not names:
names = [""]*imgs_length
# Si hay menos imágenes que número de columnas se ha establecido, se
# disminuye el número de columnas al número de imágenes y el número de filas
# a 1.
if imgs_length <= cols:
cols = imgs_length
rows = 1
# Si hay más imágenes, el número de filas viene determinado por el número de
# imágenes. Se debe redondear siempre al alza para poder colocar las
# imágenes en la última fila
else:
rows = math.ceil(imgs_length/cols)
# Se recorren todas las imágenes para colocarlas una a una en la posición
# que les corresponde en la tabla
for i, img in enumerate(imgs):
# La imagen se recibe con flotantes, por lo que se hace el cambio a
# enteros
img = copy.deepcopy(img).astype(int)
# Se indica el número de filas y columnas y la posición que ocupa la
# imagen actual en esa tabla
plt.subplot(rows, cols, i+1)
# Se cambia el esquema de color de la imagen
#img = cv2.cvtColor(img, cmap)
# Se indica el título que tendrá la imagen
plt.title(title+names[i])
# Se indica el valor que se mostrará en el eje X
#plt.xlabel(i)
# Se eliminan las marcas de los ejes X e Y
plt.xticks([])
plt.yticks([])
# Se muestra la imagen en su lugar correspondiente
plt.imshow(img)
# Se visualiza la tabla con todas las imágenes
plt.show()
#
########
########
# Sección B
#
def convolution_b(img, sigma, mask = -1, border = -1):
"""
Dada una imagen (img), realiza una convolución con máscara gaussiana. El
tamaño de la máscara por defecto se calcula a partir del sigma recibido,
como 6*sigma+1. Esto es debido a que la medida más óptima es crear un
intervalo de 3*sigma por cada lado, lo que hace que se tenga 6*sigma. Para
no dejar sin contar el punto intermedio se suma 1, lo que termina resultando
6*sigma+1.
Se permite especificar una máscara determinada.
Se puede indicar un borde, que por defecto está deshabilitado.
-------------------
Opciones para el borde:
- cv2.BORDER_REPLICATE
- cv2.BORDER_REFLECT
- cv2.BORDER_REFLECT_101
- cv2.BORDER_WRAP
- cv2.BORDER_CONSTANT
-------------------
Devuelve la imagen con la transformación realizada.
"""
# Opciones para el borde:
# - BORDER_REPLICATE
# - BORDER_REFLECT
# - BORDER_REFLECT_101
# - BORDER_WRAP
# - BORDER_CONSTANT
# Se comprueba si no se ha recibido la máscara. En el caso de no recibirla
# se le asigna el valor por defecto 6*sigma+1
if mask == -1:
mask = 6*sigma+1
# Si no se recibe ningún borde, la convolución será por defecto. En caso de
# recibirlo, se le indica por parámetro el especificado
if border == -1:
img = cv2.GaussianBlur(img, (mask, mask), sigma)
else:
img = cv2.GaussianBlur(img, (mask, mask), sigma, borderType = border)
# Se devuelve la imagen con la convolución realizada
return img
#
########
########
# Sección C
#
def convolution_c(img , kernel_x = None, kernel_y = None, sigma = 0, border = cv2.BORDER_DEFAULT, normalize = True, own = False):
"""
Dada una imagen (img) y dos núcleos (kernel_x, kernel_y) realiza una
convolución de la imagen utilizando dichos núcleos.
Se aplicará sucesivamente el núcleo por todas las filas de la imagen y con
esta transformación se vuelve a aplicar a todas las columnas.
Si se recibe un kernel_x "None", se extrae el kernel a través de la
función cv2.getGaussianKernel().
Si se recibe un kernel_y "None", será el mismo kernel ya calculado en
kernel_x.
Se puede indicar un borde, que por defecto está deshabilitado. Nota: si se
utiliza la propia función de convolución añadida a partir del Bonus 2, la
opción de borde no se contempla al utilizarse por defecto un borde
reflejado.
Permite la opción de normalizar, que por defecto está activa. Se le puede
enviar el parámetro normalize = False para que no la haga.
En el Bonus 2 se desarrolla la propia función de convolución y se adapta
en este apartado (ya que se utiliza para el resto de práctica) para poder
utilizarla. El parámetro "own" indica si se utiliza o no la propia función
de convolución. Por defecto no se utiliza.
-------------------
Opciones para el borde:
- cv2.BORDER_REPLICATE
- cv2.BORDER_REFLECT
- cv2.BORDER_REFLECT_101
- cv2.BORDER_WRAP
- cv2.BORDER_CONSTANT
-------------------
Devuelve la imagen con la transformación realizada.
"""
# Si se modifican los valores de img, se verán reflejados en el resto de
# imágenes que hagan uso de la imagen base. Para evitar esto, se hace un
# copiado en el que no es por referencia
img = copy.deepcopy(img)
# Se comprueba si se calcula el kernel
if kernel_x is None:
kernel_x = cv2.getGaussianKernel(6*sigma+1, sigma)
# Se comprueba si se calcula el kernel_y
if kernel_y is None:
kernel_y = kernel_x
# Se obtiene el número de filas y columnas que tiene la imagen. La función
# shape devuelve también los canales siempre que la imagen no esté en
# escala de grises, por lo que se comprueba cuantos valores devuelve la
# función y en base a ello se guardan en dos o en tres variables (para
# evitar errores de ejecución)
shape = img.shape
# Para no tener error en el ámbito de las filas y las columnas se declaran
# en este punto
# También se pueden obtener las filas como len(img) y las columnas como
# len(img[0])
rows = 0
cols = 0
# Si la imagen está en escala de grises devuelve dos valores
if len(shape) == 2:
rows, cols = shape
# Si la imagen tiene un esquema de color | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# webapp.py
#
# Copyright 2018 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
# This code was inspired by and contains code snippets from Pradeep Singh:
# https://github.com/pradeesi/Incoming_Call_Detail_Logger
# https://iotbytes.wordpress.com/incoming-call-details-logger-with-raspberry-pi/
# ==============================================================================
from __future__ import division
import logging
import os
import random
import string
import _thread
from datetime import datetime, timedelta
from pprint import pformat
import sqlite3
from flask import Flask, request, g, current_app, render_template, redirect, \
jsonify, flash
from flask_paginate import Pagination, get_page_args
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from screening.query_db import query_db
from screening.blacklist import Blacklist
from screening.whitelist import Whitelist
from messaging.voicemail import Message
# Create the Flask micro web-framework application
app = Flask(__name__)
app.config.from_pyfile('webapp.cfg')
@app.before_request
def before_request():
"""
Establish a database connection for the current request
"""
master_config = current_app.config.get("MASTER_CONFIG")
g.conn = sqlite3.connect(master_config.get("DB_FILE"))
g.conn.row_factory = sqlite3.Row
g.cur = g.conn.cursor()
@app.teardown_request
def teardown(error):
"""
Closes the database connection for the last request
"""
if hasattr(g, 'conn'):
g.conn.close()
@app.route('/')
def dashboard():
"""
Display the dashboard, i.e,, the home page
"""
# Count totals calls
sql = "SELECT COUNT(*) FROM CallLog"
g.cur.execute(sql)
total_calls = g.cur.fetchone()[0]
# Count blocked calls
sql = "SELECT COUNT(*) FROM CallLog WHERE `Action` = 'Blocked'"
g.cur.execute(sql)
total_blocked = g.cur.fetchone()[0]
# Compute percentage blocked
percent_blocked = 0
if total_calls > 0:
percent_blocked = total_blocked / total_calls * 100
# Get the number of unread messages
sql = "SELECT COUNT(*) FROM Message WHERE Played = 0"
g.cur.execute(sql)
new_messages = g.cur.fetchone()[0]
# Get the Recent Calls subset
max_num_rows = 10
sql = """SELECT
a.CallLogID,
CASE
WHEN b.PhoneNo is not null then b.Name
WHEN c.PhoneNo is not null then c.Name
ELSE a.Name
END Name,
a.Number,
a.Date,
a.Time,
a.Action,
a.Reason,
CASE WHEN b.PhoneNo is null THEN 'N' ELSE 'Y' END Whitelisted,
CASE WHEN c.PhoneNo is null THEN 'N' ELSE 'Y' end Blacklisted,
d.MessageID,
d.Played,
d.Filename,
a.SystemDateTime
FROM CallLog as a
LEFT JOIN Whitelist AS b ON a.Number = b.PhoneNo
LEFT JOIN Blacklist AS c ON a.Number = c.PhoneNo
LEFT JOIN Message AS d ON a.CallLogID = d.CallLogID
ORDER BY a.SystemDateTime DESC
LIMIT {}""".format(max_num_rows)
g.cur.execute(sql)
result_set = g.cur.fetchall()
recent_calls = []
for row in result_set:
# Flask pages use the static folder to get resources.
# In the static folder we have created a soft-link to the
# data/messsages folder containing the actual messages.
# We'll use the static-based path for the wav-file urls
# in the web app
filepath = row[11]
if filepath is not None:
basename = os.path.basename(filepath)
filepath = os.path.join("../static/messages", basename)
# Create a date object from the date time string
date_time = datetime.strptime(row[12][:19], '%Y-%m-%d %H:%M:%S')
recent_calls.append(dict(
call_no=row[0],
name=row[1],
phone_no=format_phone_no(row[2]),
date=date_time.strftime('%d-%b-%y'),
time=date_time.strftime('%I:%M %p'),
action=row[5],
reason=row[6],
whitelisted=row[7],
blacklisted=row[8],
msg_no=row[9],
msg_played=row[10],
wav_file=filepath))
# Get top permitted callers
sql = """SELECT COUNT(Number), Number, Name
FROM CallLog
WHERE Action IN ('Permitted', 'Screened')
GROUP BY Number
ORDER BY COUNT(Number) DESC LIMIT 10"""
g.cur.execute(sql)
result_set = g.cur.fetchall()
top_permitted = []
for row in result_set:
top_permitted.append(dict(
count=row[0],
phone_no=format_phone_no(row[1]),
name=row[2]))
# Get top blocked callers
sql = """SELECT COUNT(Number), Number, Name
FROM CallLog
WHERE Action = 'Blocked'
GROUP BY Number
ORDER BY COUNT(Number) DESC LIMIT 10"""
g.cur.execute(sql)
result_set = g.cur.fetchall()
top_blocked = []
for row in result_set:
top_blocked.append(dict(
count=row[0],
phone_no=format_phone_no(row[1]),
name=row[2]))
# Get num calls per day for graphing
num_days = current_app.config.get("GRAPH_NUM_DAYS", 30)
# Query num blocked calls
sql = """SELECT COUNT(DATE(SystemDateTime)) Count, DATE(SystemDateTime) CallDate
FROM CallLog
WHERE SystemDateTime > DATETIME('now','-{} day') AND Action = 'Blocked'
GROUP BY CallDate
ORDER BY CallDate""".format(num_days)
g.cur.execute(sql)
result_set = g.cur.fetchall()
blocked_per_day = {}
for row in result_set:
# key value = date, count
blocked_per_day[row[1]] = row[0]
# Query number of allowed calls
sql = """SELECT COUNT(DATE(SystemDateTime)) Count, DATE(SystemDateTime) CallDate
FROM CallLog
WHERE SystemDateTime > DATETIME('now','-{} day') AND Action = 'Permitted'
GROUP BY CallDate
ORDER BY CallDate""".format(num_days)
g.cur.execute(sql)
result_set = g.cur.fetchall()
allowed_per_day = {}
for row in result_set:
# key value = date, count
allowed_per_day[row[1]] = row[0]
# Query number of screened calls
sql = """SELECT COUNT(DATE(SystemDateTime)) Count, DATE(SystemDateTime) CallDate
FROM CallLog
WHERE SystemDateTime > DATETIME('now','-{} day') AND Action = 'Screened'
GROUP BY CallDate
ORDER BY CallDate""".format(num_days)
g.cur.execute(sql)
result_set = g.cur.fetchall()
screened_per_day = {}
for row in result_set:
# key value = date, count
screened_per_day[row[1]] = row[0]
# Conflate the results
base_date = datetime.today()
date_list = [base_date - timedelta(days=x) for x in range(num_days)]
date_list.reverse()
calls_per_day = []
for date in date_list:
date_key = date.strftime("%Y-%m-%d")
calls_per_day.append(dict(
date=date_key,
blocked=blocked_per_day.get(date_key, 0),
allowed=allowed_per_day.get(date_key, 0),
screened=screened_per_day.get(date_key, 0)))
if not current_app.config.get("MASTER_CONFIG").get("MODEM_ONLINE", True):
flash('The modem is not online. Calls will not be screened or blocked. Check the logs and restart the CallAttendant.')
# Render the resullts
return render_template(
'dashboard.html',
active_nav_item="dashboard",
recent_calls=recent_calls,
top_permitted=top_permitted,
top_blocked=top_blocked,
calls_per_day=calls_per_day,
new_messages=new_messages,
total_calls='{:,}'.format(total_calls),
blocked_calls='{:,}'.format(total_blocked),
percent_blocked='{0:.0f}%'.format(percent_blocked))
@app.route('/calls', methods=['GET'])
def calls():
"""
Display the call history from the call log table.
"""
# Get GET request args, if available
number = request.args.get('number')
search_text = request.args.get('search')
search_type = request.args.get('submit')
# Get search criteria, if applicable
search_criteria = ""
if search_text:
if search_type == "phone":
number = transform_number(search_text) # override GET arg if we're searching
search_criteria = "WHERE Number='{}'".format(number)
else:
search_criteria = "WHERE Caller LIKE '%{}%'".format(search_text)
# Get values used for pagination of the call log
sql = "SELECT COUNT(*), Number, Name Caller FROM CallLog {}".format(search_criteria)
g.cur.execute(sql)
total = g.cur.fetchone()[0]
page, per_page, offset = get_page_args(
page_parameter="page",
per_page_parameter="per_page")
# Get the call log subset, limited to the pagination settings
sql = """SELECT
a.CallLogID,
CASE
WHEN b.PhoneNo is not null then b.Name
WHEN c.PhoneNo is not null then c.Name
ELSE a.Name
END Caller,
a.Number Number,
a.Date,
a.Time,
a.Action,
a.Reason,
CASE WHEN b.PhoneNo is null THEN 'N' ELSE 'Y' END Whitelisted,
CASE WHEN c.PhoneNo is null THEN 'N' ELSE 'Y' end Blacklisted,
d.MessageID,
d.Played,
d.Filename,
a.SystemDateTime
FROM CallLog as a
LEFT JOIN Whitelist AS b ON a.Number = b.PhoneNo
LEFT JOIN Blacklist AS c ON a.Number = c.PhoneNo
LEFT JOIN Message AS d ON a.CallLogID = d.CallLogID
{}
ORDER BY a.SystemDateTime DESC
LIMIT {}, {}""".format(search_criteria, offset, per_page)
g.cur.execute(sql)
result_set = g.cur.fetchall()
# Create a formatted list of records including some derived values
calls = []
for row in result_set:
number = row[2]
phone_no = format_phone_no(number)
# Flask pages use the static folder to get resources.
# In the static folder we have created a soft-link to the
# data/messsages folder containing the actual messages.
# We'll use the static-based path for the wav-file urls
filepath = row[11]
if filepath is not None:
basename = os.path.basename(filepath)
filepath = os.path.join("../static/messages", basename)
# Create a date object from the date time string
date_time = datetime.strptime(row[12][:19], '%Y-%m-%d %H:%M:%S')
calls.append(dict(
call_no=row[0],
phone_no=phone_no,
name=row[1],
date=date_time.strftime('%d-%b-%y'),
time=date_time.strftime('%I:%M %p'),
action=row[5],
reason=row[6],
whitelisted=row[7],
blacklisted=row[8],
msg_no=row[9],
msg_played=row[10],
wav_file=filepath))
# Create a pagination object for the page
pagination = get_pagination(
page=page,
per_page=per_page,
total=total,
record_name="calls",
format_total=True,
format_number=True)
# Render the resullts with pagination
return render_template(
'calls.html',
active_nav_item='calls',
calls=calls,
search_criteria=search_criteria,
page=page,
per_page=per_page,
pagination=pagination)
@app.route('/calls/view/<int:call_no>', methods=['GET'])
def calls_view(call_no):
"""
Display the call details
"""
# Get the call log subset, limited to the pagination settings
sql = """SELECT
a.CallLogID,
CASE
WHEN b.PhoneNo is not null then b.Name
WHEN c.PhoneNo is not null then c.Name
ELSE a.Name
END Name,
a.Number Number,
a.Date,
a.Time,
a.Action,
a.Reason,
CASE | |
data.get("_id", data.get("mongo_id"))
if mongo_id is None:
raise KeyError("Missing `_id` or `mongo_id` on the data, it's needed to update the database!")
if not isinstance(mongo_id, ObjectId):
mongo_id = ObjectId(mongo_id)
new_cls = cls(id, parsed_servers)
new_cls.mongo_id = mongo_id
return new_cls
def serialize(self):
return {
"_id": self._mongo_id,
"id": self._id,
"servers": list(map(str, self._servers)),
}
class ShowtimesLock:
def __init__(self, server_id: Union[str, int]):
self._id = str(server_id)
self._lock = False
self._log = logging.getLogger(f"ShowtimesLock[{server_id}]")
async def __aenter__(self, *args, **kwargs):
await self.hold()
return self._id
async def __aexit__(self, *args, **kwargs):
await self.release()
async def hold(self):
timeout_max = 10 # In seconds
current_time = 0
increment = 0.2
while self._lock:
if not self._lock:
break
if current_time > timeout_max:
self._log.warning("Waiting timeout occured, relocking!")
break
await asyncio.sleep(increment)
current_time += increment
self._log.info("Holding access to lock!")
self._lock = True
async def release(self):
self._log.info("Releasing lock...")
self._lock = False
#####################
# FansubRSS #
#####################
class FansubRSSEmbed:
def __init__(
self,
title: str,
description: str,
url: str,
thumbnail: str,
image: str,
footer: str,
footer_img: str,
color: int = None,
timestamp: bool = False,
):
self._title = title
self._description = description
self._url = url
self._thumbnail = thumbnail
self._image = image
self._footer = footer
self._footer_img = footer_img
self._color = color
self._timestamp = timestamp
def __getitem__(self, name: str):
if not name.startswith("_"):
name = "_" + name
return getattr(self, name, None)
def __setitem__(self, name: str, value: Union[str, bool, int]):
if not name.startswith("_"):
name = "_" + name
if not hasattr(self, name):
return
setattr(self, name, value)
@property
def title(self):
return self._title
@title.setter
def title(self, data: str):
self._title = data
@property
def description(self):
return self._description
@description.setter
def description(self, data: str):
self._description = data
@property
def url(self):
return self._url
@url.setter
def url(self, data: str):
self._url = data
@property
def thumbnail(self):
return self._thumbnail
@thumbnail.setter
def thumbnail(self, data: str):
self._thumbnail = data
@property
def image(self):
return self._image
@image.setter
def image(self, data: str):
self._image = data
@property
def footer(self):
return self._footer
@footer.setter
def footer(self, data: str):
self._footer = data
@property
def footer_img(self):
return self._footer_img
@footer_img.setter
def footer_img(self, data: str):
self._footer_img = data
@property
def color(self):
return self._color
@color.setter
def color(self, data: Union[discord.Color, int]):
if isinstance(data, int):
self._color = data
elif isinstance(data, discord.Color):
self._color = data.value
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, data: bool):
self._timestamp = data
@classmethod
def from_dict(cls, data: dict):
return cls(
data.get("title"),
data.get("description"),
data.get("url"),
data.get("thumbnail"),
data.get("image"),
data.get("footer"),
data.get("footer_img"),
data.get("color", 0x525252),
data.get("timestamp", False),
)
def serialize(self):
return {
"title": self._title,
"description": self._description,
"url": self._url,
"thumbnail": self._thumbnail,
"image": self._image,
"footer": self._footer,
"footer_img": self._footer_img,
"color": self._color,
"timestamp": self._timestamp,
}
@property
def is_valid(self):
# A minimum of title and description must be available
if not self._title or not self._description:
return False
return True
def generate(self, entry_data: dict, template_mode=False) -> Optional[discord.Embed]:
if not self.is_valid and not template_mode:
return None
regex_embed = re.compile(r"(?P<data>{[^{}]+})", re.MULTILINE | re.IGNORECASE)
filtered = {}
for key, value in self.serialize().items():
if not value:
continue
if isinstance(value, bool):
continue
if isinstance(value, int):
filtered[key] = value
continue
matched = re.findall(regex_embed, value)
formatted = list(map(lambda x: x.replace("{", "").replace("}", ""), matched))
for fmt in formatted:
try:
if isinstance(entry_data[fmt], (tuple, list)):
joined = ", ".join(map(str, entry_data[fmt]))
entry_data[fmt] = joined
value = value.replace("{" + fmt + "}", entry_data[fmt])
except KeyError:
pass
filtered[key] = value
embedded = discord.Embed()
title = filtered.get("title")
description = filtered.get("description")
url: str = filtered.get("url")
if title is not None:
embedded.title = title
elif template_mode:
embedded.title = "Tidak ada judul"
if description is not None:
embedded.description = description
elif template_mode:
embedded.description = "*Tidak ada deskripsi*"
if url is not None and url.startswith("http"):
embedded.url = url
embedded.colour = discord.Color(self.color)
thumbnail: str = filtered.get("thumbnail")
image: str = filtered.get("image")
if thumbnail is not None and thumbnail.startswith("http"):
embedded.set_thumbnail(url=thumbnail)
if image is not None and image.startswith("http"):
embedded.set_image(url=image)
if self.timestamp:
try:
_, dt_data = time_struct_dt(entry_data["published_parsed"])
except (AttributeError, KeyError, ValueError):
dt_data = arrow.utcnow().datetime
embedded.timestamp = dt_data
footer = filtered.get("footer")
if footer is not None:
kwargs_footer = {"text": footer}
footer_img = filtered.get("footer_img")
if footer_img is not None:
kwargs_footer["icon_url"] = footer_img
embedded.set_footer(**kwargs_footer)
elif template_mode:
embedded.set_footer(text="*Tidak ada footer*")
return embedded
class FansubRSSPremium:
def __init__(
self,
start: int,
duration: int,
):
self._start = start
self._duration = duration
@property
def start(self):
return self._start
@start.setter
def start(self, data: int):
self._start = data
def set_now(self):
self._start = arrow.utcnow().int_timestamp
@property
def duration(self):
return self._duration
@duration.setter
def duration(self, data: int):
self._duration = data
def add_duration(self, data: int):
self._duration += data
@property
def is_infinite(self):
if self._start < 0 or self._duration < 0:
return True
return False
@property
def is_valid(self):
if self.is_infinite:
return True
now = arrow.utcnow().int_timestamp
max_time = self._start + self._duration
return now < max_time
@property
def time_left(self):
now = arrow.utcnow().int_timestamp
max_time = self._start + self._duration
time_left = int(round(max_time - now))
if time_left < 0:
return None
return time_left
def is_intersecting(self, target: Union[int, arrow.Arrow]):
if isinstance(target, arrow.Arrow):
target = target.int_timestamp
max_time = self._start + self._duration
return self._start <= target < max_time
def exhaust(self):
self._duration = 0
@classmethod
def from_dict(cls, data: Union[bool, int, dict]):
if isinstance(data, (bool, int)):
data = bool(data)
if data:
return cls(-1, -1)
else:
return None
return cls(
data.get("start"),
data.get("duration"),
)
def serialize(self):
irnd = lambda x: int(round(x)) # noqa: E731
return {
"start": irnd(self._start),
"duration": irnd(self._duration),
}
class FansubRSSFeed:
def __init__(
self,
id: Union[str, int],
channel: Union[str, int],
feed_url: str,
message: Optional[str] = None,
embed: Optional[FansubRSSEmbed] = None,
last_etag: str = None,
last_modified: str = None,
):
self._id = str(id)
self._channel = str(channel)
self._feed_url = feed_url
self._message = message
self._embed = embed
self._last_etag = last_etag
self._last_modified = last_modified
def __eq__(self, other: Union["FansubRSSFeed", str, int]):
if isinstance(other, (int, str)):
return self._id == str(other)
elif isinstance(other, FansubRSSFeed):
return self.id == other.id
return False
def __repr__(self):
_attr = [
f"id={self._id!r}",
f"channel={self._channel!r}",
f"url={self._feed_url!r}",
]
return f"<FansubRSSFeed {' '.join(_attr)}>"
@property
def id(self):
return self._id
@id.setter
def id(self, data: Union[str, int]):
self._id = str(data)
@property
def channel(self):
return int(self._channel)
@channel.setter
def channel(self, data: Union[str, int, discord.TextChannel]):
if isinstance(data, (int, str)):
self._channel = str(data)
elif isinstance(data, discord.TextChannel):
self._channel = str(data.id)
@property
def feed_url(self):
return self._feed_url
@feed_url.setter
def feed_url(self, data: str):
self._feed_url = str(data)
@property
def message(self):
return self._message
@message.setter
def message(self, data: str):
self._message = data
@property
def embed(self):
return self._embed
@embed.setter
def embed(self, data: Union[dict, FansubRSSEmbed]):
if isinstance(data, dict):
self._embed = FansubRSSEmbed.from_dict(data)
elif isinstance(data, FansubRSSEmbed):
self._embed = data
elif data is None:
self._embed = FansubRSSEmbed.from_dict({})
@property
def last_etag(self):
return self._last_etag or ""
@last_etag.setter
def last_etag(self, data: str):
self._last_etag = data
@property
def last_modified(self):
return self._last_modified or ""
@last_modified.setter
def last_modified(self, data: str):
self._last_modified = data
@classmethod
def from_dict(cls, data: dict):
return cls(
id=data.get("id"),
channel=data.get("channel"),
feed_url=data.get("feedUrl"),
message=data.get("message"),
embed=FansubRSSEmbed.from_dict(data.get("embed", {})),
last_etag=data.get("lastEtag", ""),
last_modified=data.get("lastModified", ""),
)
def serialize(self):
return {
"id": self.id,
"channel": self.channel,
"feedUrl": self.feed_url,
"message": self.message,
"embed": self.embed.serialize(),
"lastEtag": self.last_etag,
"lastModified": self.last_modified,
}
def __parse_message(self, entry_data: dict):
message = self.message
if not message:
return ""
matches = re.findall(r"(?P<data>{[^{}]+})", message, re.MULTILINE | re.IGNORECASE)
msg_fmt_data = [m.strip(r"{}") for m in matches]
for fmt in msg_fmt_data:
try:
message = message.replace("{" + fmt + "}", entry_data[fmt])
except KeyError:
pass
return message.replace("\\n", "\n")
def generate(self, entry_data: dict) -> Tuple[Optional[str], Optional[discord.Embed]]:
parsed_message = None
if self.message:
parsed_message = self.__parse_message(entry_data)
parsed_embed = None
if self.embed and self.embed.is_valid:
parsed_embed = self.embed.generate(entry_data)
return parsed_message, parsed_embed
class FansubRSS:
def __init__(self, id: int, feeds: List[FansubRSSFeed] = [], premiums: List[FansubRSSPremium] = []):
self._id = id
self._feeds = feeds
self._premiums = premiums
def __eq__(self, other: Union["FansubRSS", int]):
if isinstance(other, int):
return self._id == other
elif isinstance(other, FansubRSS):
return self.id == other.id
return False
def __repr__(self):
return f"<FansubRSS id={self._id!r} feeds={len(self._feeds)!r} premium={self.has_premium!r}>"
@property
def id(self):
return self._id
@property
def feeds(self):
return self._feeds
@feeds.setter
def feeds(self, data: Union[FansubRSSFeed, List[FansubRSSFeed]]):
if isinstance(data, list):
self._feeds = data
elif isinstance(data, FansubRSSFeed):
self.update_feed(data)
def get_feed(self, id: Union[str, int]) -> Optional[FansubRSSFeed]:
for feed in self.feeds:
if feed == id:
return feed
return None
def add_feed(self, feed: FansubRSSFeed):
self.update_feed(feed)
def remove_feed(self, data: Union[FansubRSSFeed, str, int]):
feed_idx = -1
for idx, feed in enumerate(self._feeds):
if feed == data:
feed_idx = idx
break
if feed_idx >= 0:
self._feeds.pop(feed_idx)
def update_feed(self, data: FansubRSSEmbed):
feed_idx = -1
for idx, feed in enumerate(self._feeds):
if feed == data:
feed_idx = | |
# document_retrieval functions
import math
import string
import collections
from gensim.models import KeyedVectors
import numpy as np
def change_dict_structure(dict_list):
"""Takes list of dicts from db_query and changes to dict with key=id, value = text (used for metrices).
Args:
dict_list (list): List of dictionaries from db_query.
Returns:
texts (dictionary): Dictionary with document IDs as keys and document text as values.
"""
texts = {}
for dict in dict_list:
doc_id = dict.get('document_id')
text = dict.get('fulltext_cleaned')
texts.update({doc_id: text})
return texts
def similarity(token, token_list, wv):
"""Calculates similarity between token and list of tokens.
Args:
token (str): String for wich we are calculating similarity.
token_list (list): List of tokens to which we are calculating similarity.
wv (Word2VecKeyedVectors): Word embeddings.
Returns:
avreage_similarity (float): Number that signifes the similarity of token to token_list words.
"""
similarity = 0
num_of_tokens = 0
for toks in token_list:
# check if the token is in the vocabulary
if toks in wv.vocab.keys():
num_of_tokens += 1
similarity += wv.similarity(toks, token)
avreage_similarity = similarity/num_of_tokens
return avreage_similarity
def probability_multiply(probability, token_frequency, n):
"""Assigns score to document based on multiplication of probabilities. Probability is token frequency devided by length of document.
In this metric only documents containing all query words have positive probability.
Args:
probability (float): Previously calculated probability.
token_frequency (float): Number of appearances of token in text.
n (int): Length of text.
Returns:
probability_value (float): New caclculated probability.
"""
probability_value = probability*(token_frequency/n)
return probability_value
def probability_sum(probability, token_frequency, n):
"""Assigns score to document based on summation of probabilities.
Args:
probability (float): Previously calculated probability.
token_frequency (float): Number of appearances of token in text.
n (int): Length of text.
Returns:
probability_value (float): New caclculated probability.
"""
probability_value = probability+(token_frequency/n)
return probability_value
def word_value(word, alpha, original_tokens, top_expansion, wv):
"""values word based on whether is in original token set or expanded, if alpha -1 value equals to cosine similarity
Args:
word (string): Word or token for which we are calculating value.
alpha (float): Number between 0 and 1. Weight that emphasizes the difference between original query words and expansions.
Usually between 0.5 (all words are treated equal) and 1 (expansion words have value 0).
For alpha -1 values equal to cosine similarity to query words.
original_tokens(list): List of strings. Tokenized original query. Usually also extension (extension by summation of 2 consecutive words).
top_expansion (list): List of expanded words. Usually candidates (kNN expansion).
wv (Word2VecKeyedVectors): Word embeddings.
Returns:
value (float): Value of the word based on whether is in original token set or expanded set.
"""
only_expanded = []
for token in top_expansion:
if token not in original_tokens:
only_expanded.append(token)
sum_similarity = 0
for exp_token in only_expanded:
sum_similarity += similarity(exp_token, original_tokens, wv)
if alpha == -1:
if word in original_tokens:
value = 1
else:
value = similarity(word, original_tokens, wv)/sum_similarity
else:
if word in original_tokens:
value = alpha
else:
value = (1-alpha)*similarity(word, original_tokens, wv)/sum_similarity
return value
def probability_sum_weight(probability, token_frequency, n, word, alpha, original_tokens, top_expansion, wv):
"""Assigns weighted score to document based on summation of probabilities.
Args:
probability (float): Previously calculated probability.
token_frequency (float): Number of appearances of token in text.
n (int): Length of text.
word (string): Word or token for which we are calculating value.
alpha (float): Number between 0 and 1. Weight that emphasizes the difference between original query words and expansions.
Usually between 0.5 (all words are treated equal) and 1 (expansion words have value 0).
For alpha -1 values equal to cosine similarity to query words.
original_tokens(list): List of strings. Tokenized original query. Usually also extension (extension by summation of 2 consecutive words)
top_expansion (list): List of expanded words. Usually candidates (kNN expansion).
wv (Word2VecKeyedVectors): Word embeddings.
Returns:
probability_value (float): New caclculated probability.
"""
probability_value = probability+(token_frequency/n)*word_value(word, alpha, original_tokens, top_expansion, wv)
return probability_value
def top_positives(dictionary, n):
"""Takes dict and returns first n tuples of key,values sorted by values descending, returns only items with positive values.
Args:
dictionary (dict): Dictionary we want to sort by values.
n (int): Number of returned items. If there are less than n items in dictonary or less than n items with positive values,
returns all items (with positive valuses) sorted.
Returns:
sorted_positives_top (list): List of n tuples. If there are less than n items in dictonary or less than n items with
positive values, returns all items (with positive valuses) sorted.
"""
positives = {}
for k,v in dictionary.items():
if v > 0:
positives.update({k: v})
sorted_positives = sorted(positives.items(), key=lambda x: x[1], reverse=True)
if len(sorted_positives) > n:
sorted_positives_top = sorted_positives[0:n]
else:
sorted_positives_top = sorted_positives
return sorted_positives_top
def probability_score(tokens, texts, probability_function, m, *args):
# final function, takes also probability_function probability_sum_weight, but doesnt give final result (used in probability_score_sum_weights)
"""Assigns score to documents based on probability_function metric.
Args:
tokens (list): List of tokens (tokenized query). If needed also extension (extension by summation of 2 consecutive words).
texts (dict): Keys represent document ids, values are document text.
probability_function (function): Metric function that calculates document relavance. Functions: probability_multiply, probability_sum. Require only first 4 arguments.
m (int): Number of returned tuples (positive scores), sorted by highest scores. If m=0 returns all.
top_expansion (list): List of expanded words. Usually candidates (kNN expansion).
alpha (float): Number between 0 and 1. Weight that emphasizes the difference between original query words and expansions.
For alpha 0.5 all words have same weights (but not same values!), for alpha 1 expansion words have value 0.
For alpha -1 values equal to cosine similarity to query words.
wv (Word2VecKeyedVectors): Word embeddings.
Returns:
document_probability (list): Tuples of document ids and scores that measure document relavance. Returns n tuples with highest score.
"""
document_probability = {}
for k, v in texts.items():
n = len(v)
if probability_function == probability_multiply:
probability = 1
else:
probability = 0
if probability_function == probability_sum_weight:
if len(args) == 3:
for i in range(len(tokens)):
token_frequency = v.count(tokens[i])
probability = probability_sum_weight(probability, token_frequency, n,tokens[i], args[1], tokens, args[0], args[2])
document_probability.update({k: probability})
else:
raise Exception("Error, number of arguments does not match.")
elif probability_function == probability_sum or probability_function == probability_multiply:
if len(args) == 0:
for i in range(len(tokens)):
token_frequency = v.count(tokens[i])
probability = probability_function(probability, token_frequency, n)
document_probability.update({k: probability})
else:
raise Exception("Error, number of arguments does not match.")
else:
raise Exception("Error, metric function not defined.")
if m == 0:
return [(k, v) for k, v in document_probability.items()]
else:
document_probability = top_positives(document_probability ,m)
return document_probability
def probability_score_sum_weights(original_tokens, top_expansion, texts, m, alpha, wv):
"""As probability_score only weighted.
Args:
original_tokens(list): List of strings. Tokenized original query. Usually also extension (extension by summation of 2 consecutive words)
top_expansion (list): List of expanded words. Usually candidates (kNN expansion).
texts (dict): Keys represent document ids, values are document text.
m (int): Number of returned tuples (positive scores), sorted by highest scores. If m=0 returns all.
alpha (float): Number between 0 and 1. Weight that emphasizes the difference between original query words and expansions.
For alpha 0.5 all words have same weights (but not same values!), for alpha 1 expansion words have value 0.
For alpha -1 values equal to cosine similarity to query words.
wv (Word2VecKeyedVectors): Word embeddings.
Returns:
document_score (list): Tuples of document ids and scores that measure document relavance. Returns n tuples with highest score.
"""
tokens = original_tokens+top_expansion
document_score = probability_score(tokens, texts, probability_sum_weight, m, top_expansion, alpha, wv)
return document_score
def number_documents_tokens_appear(tokens, texts):
"""For each token in tokens counts the number of documents in which token has appeared.
Args:
tokens (list): List of tokens.
texts (dict): Keys represent document ids, values are document text.
Returns:
documents_per_token (list): List of numbers that count number of documnets in which certain token appears.
Index of element in tokens list is the same as index in documents_per_token list for that element value.
"""
documents_per_token = []
for i in range(len(tokens)):
documents_per_token.append(0)
for text in texts.values():
for i in range(len(tokens)):
token = tokens[i]
if token in text:
documents_per_token[i] = documents_per_token[i]+1
return documents_per_token
def tfidf_sum(probability, token_frequency, n, idf):
"""Assigns score to document based on TF-IDF metric.
Args:
probability (float): Previously calculated tfidf score.
token_frequency (float): Number of | |
<reponame>MaayanLab/creeds<gh_stars>1-10
'''
ORMs for signature, signatures in the MongoDB and collection of signatures.
'''
import os, sys, json
import hashlib
from collections import Counter
import numpy as np
import pandas as pd
import scipy.sparse as sp
import requests
from joblib import Parallel, delayed
from .gene_converter import *
from .matrix_ops import (fast_jaccard, fast_signed_jaccard)
## connect to mongodb via pymongo.MongoClient imported from the module
from creeds import conn
################################ Global variables ################################
COLL = conn['microtask_signatures'].signatures
COLL_GENES = conn['microtask_signatures'].genes
COLL_USER_SIGS = conn['microtask_signatures'].userSignatures
COLL_USER_SIGS.create_index('id', unique=True, sparse=False)
ALL_GENES = COLL_GENES.find_one({'case_sensitive': {'$exists':True}})['case_sensitive']
ALL_GENES = np.array(ALL_GENES)
ALL_GENES_I = COLL_GENES.find_one({'case_insensitive': {'$exists':True}})['case_insensitive']
ALL_GENES_I = np.array(ALL_GENES_I)
ALL_UIDS = COLL.find(
{'$and': [
{'chdir_sva_exp2': {'$exists': True}},
{'version': {'$in':['1.0', '1.1', '1.2', '2.0']}},
{"incorrect": {"$ne": True}}
]},
{'id': True}).distinct('id')
## load gene symbol to gene ID conversion dict
GENE_SYMBOLS = load_gene_symbol_dict()
## Fields in the mongodb for interal use only
FIELDS_EXCLUDE = ['_id', 'time',
'limma', 'limma_sva', 'limma_norm', 'limma_combat',
'fold_changes', 'log2FC_norm',
'chdir', 'chdir_combat_exp2',
'pvca', 'pvca_sva', 'pvca_combat']
PROJECTION_EXCLUDE = dict(zip(FIELDS_EXCLUDE, [False] * len(FIELDS_EXCLUDE)))
################################ Util functions ################################
def find_name(doc):
## find the name for a doc in the mongodb based on uid
uid = doc['id']
prefix = uid.split(':')[0]
if prefix == 'gene':
if doc['organism'] == 'human':
name = doc.get('hs_gene_symbol', None)
if name is None:
name = doc.get('mm_gene_symbol', None)
else:
name = doc.get('mm_gene_symbol', None)
if name is None:
name = doc.get('hs_gene_symbol', None)
elif prefix == 'dz':
name = doc.get('disease_name', None)
else:
name = doc.get('drug_name', None)
if type(name) == list: # predicted signatures
# extract name fields and convert to string
name = [item['name'] for item in name]
return name
def sparse_matrix_size(mat):
## get size of a sparse matrix
if type(mat) == sp.csr_matrix:
nbytes = mat.data.nbytes + mat.indptr.nbytes + mat.indices.nbytes
elif type(mat) == sp.lil_matrix:
nbytes = mat.data.nbytes + mat.rows.nbytes
return nbytes
################################ Classes ################################
class Signature(object):
def __init__(self, name=None, meta=None, up_genes=None, dn_genes=None,
query_params={'direction': 'similar', 'db_version':'v1.0'}):
## defaults:
if name is None: name = ''
if meta is None: meta = {}
if up_genes is None: up_genes = []
if dn_genes is None: dn_genes = []
self.name = name
self.meta = meta
self.up_genes = up_genes
self.dn_genes = dn_genes
self.query_params = query_params
def save(self):
'''Hash the attributes associate with self as well as query params,
and save to COLL_USER_SIGS
'''
d = {'name': self.name, 'meta': self.meta,
'up_genes': self.up_genes, 'dn_genes': self.dn_genes,
'query_params': self.query_params}
h = hashlib.md5(json.dumps(d)).hexdigest()
d['id'] = h
COLL_USER_SIGS.update_one({'id': h}, {'$set': d}, upsert=True)
return h
def init_vectors(self):
'''Init binary vectors representing of the siganture,
for fast computation of jaccard.'''
v_up = np.zeros(len(ALL_GENES_I), dtype=np.int8)
up_genes_i = map(lambda x: x.upper(), self.up_genes)
v_up[np.in1d(ALL_GENES_I, up_genes_i)] = 1
v_dn = np.zeros(len(ALL_GENES_I), dtype=np.int8)
dn_genes_i = map(lambda x: x.upper(), self.dn_genes)
v_dn[np.in1d(ALL_GENES_I, dn_genes_i)] = 1
self.v_up = v_up
self.v_dn = v_dn
def calc_all_scores(self, db_sig_collection):
'''
Calcuated signed jaccard score for this signatures against
a DBSignatureCollection instance or a list of DBSignatureCollection instances.
'''
uid_scores = []
if type(db_sig_collection) != list:
# a single DBSignatureCollection instance
scores = fast_signed_jaccard(db_sig_collection.mat_up, db_sig_collection.mat_dn,
self.v_up, self.v_dn)
uids = db_sig_collection.uids
else:
# a list of DBSignatureCollection instances
# stack sparse matrices first
mat_up = sp.vstack([dbsc.mat_up for dbsc in db_sig_collection])
mat_dn = sp.vstack([dbsc.mat_dn for dbsc in db_sig_collection])
scores = fast_signed_jaccard(mat_up, mat_dn,
self.v_up, self.v_dn)
uids = []
for dbsc in db_sig_collection:
uids.extend(dbsc.uids)
uid_scores = zip(uids, scores)
return dict(uid_scores)
def _get_query_results(self, db_sig_collection):
'''
Handle querying signatures from the DB with custom up/down genes,
return a list of objects
'''
direction = self.query_params['direction']
d_uid_score = self.calc_all_scores(db_sig_collection)
scores = np.array(d_uid_score.values())
uids = np.array(d_uid_score.keys())
uid_data = [] # a list of meta data {} sorted by score
# mask for signs of scores
if direction == 'similar':
score_sign_mask = scores > 0
elif direction == 'opposite':
score_sign_mask = scores < 0
# sort uids by abs(scores) in descending order
srt_idx = np.abs(scores[score_sign_mask]).argsort()[::-1]
scores = scores[score_sign_mask][srt_idx]
uids = uids[score_sign_mask][srt_idx]
# retrieve meta-data for all uids
projection ={'geo_id':True, 'id':True, '_id':False,
'hs_gene_symbol':True, 'mm_gene_symbol':True, 'organism':True,
'disease_name':True, 'drug_name':True, 'do_id':True,
'drugbank_id':True, 'pubchem_cid':True}
uid_docs = COLL.find({'id': {'$in': uids.tolist()}}, projection)
uid_docs = list(uid_docs)
# make uid_docs have the same order of id with uids
uids = uids.tolist()
uid_docs_ = [None] * len(uid_docs)
for uid_doc in uid_docs:
idx = uids.index(uid_doc['id'])
uid_docs_[idx] = uid_doc
uid_docs = uid_docs_
for doc, score in zip(uid_docs, scores):
sig_ = DBSignature(None, doc=doc)
meta = {
'id': sig_.meta['id'],
'geo_id': sig_.meta['geo_id'],
'name': [sig_.name, sig_.get_url()], # [name, url]
'signed_jaccard': float('%.5f'%score)
}
uid_data.append(meta)
return uid_data
def get_query_results(self, d_dbsc):
'''Wrapper for _get_query_results handling db_version
'''
db_version = self.query_params['db_version']
if type(db_version) != list:
uid_data = self._get_query_results(d_dbsc[db_version])
else:
uid_data = self._get_query_results([d_dbsc[v] for v in db_version])
return uid_data
@classmethod
def from_hash(cls, h):
'''To retrieve a Signature using a hash'''
if h is None:
return None
else:
doc = COLL_USER_SIGS.find_one({'id': h}, {'_id': False, 'id': False})
signature = Signature(**doc)
return signature
class DBSignature(Signature):
'''
Signature instance from the mongodb.
'''
chdir_field = 'chdir_sva_exp2'
def __init__(self, uid, projection=PROJECTION_EXCLUDE, doc=None):
## the constructor also act as a way to query mongodb using
## the id and return desirable fields by specifying projection
if doc is None: ## if doc is given, do not retrieve from DB
doc = COLL.find_one({'id':uid}, projection)
name = find_name(doc)
if self.chdir_field in doc:
chdir = doc[self.chdir_field]
del doc[self.chdir_field]
self.chdir = chdir
Signature.__init__(self, name, doc)
def has_chdir(self):
## assert if a signature has the chdir field
if hasattr(self, 'chdir'): return True
else: return False
def get_vectors_and_clear(self, cutoff=600):
v_cs = np.zeros(len(ALL_GENES), dtype=np.float32)
genes = self.chdir['genes'][:cutoff]
genes, uniq_idx = np.unique(genes, return_index=True)
vals = np.array(self.chdir['vals'][:cutoff])[uniq_idx]
v_cs[np.in1d(ALL_GENES, genes)] = vals
self.v_cs = sp.lil_matrix(v_cs)
up_genes_i = map(lambda x: x.upper(), genes[vals > 0])
dn_genes_i = map(lambda x: x.upper(), genes[vals < 0])
up_idx = np.in1d(ALL_GENES_I, up_genes_i, assume_unique=True)
dn_idx = np.in1d(ALL_GENES_I, dn_genes_i, assume_unique=True)
del self.chdir
return up_idx, dn_idx
def get_vector_indexes(self, cutoff=600):
'''
Get indexes of up and down genes in ALL_GENES_I
Used for construction of sparse matices in DBSignatureCollection.mat_*
'''
genes_i = np.array(map(lambda x:x.upper(), self.chdir['genes'][:cutoff]))
genes_i, uniq_idx = np.unique(genes_i, return_index=True)
vals = np.array(self.chdir['vals'][:cutoff])[uniq_idx]
up_genes_i = genes_i[vals > 0]
dn_genes_i = genes_i[vals < 0]
up_idx = np.in1d(ALL_GENES_I, up_genes_i, assume_unique=True)
dn_idx = np.in1d(ALL_GENES_I, dn_genes_i, assume_unique=True)
return up_idx, dn_idx
def init_cs_vectors(self, cutoff=600):
'''Init case sensitive vectors with CD values.
This vector is intended to for exporting purpose used in to_json, to_dict.
'''
if not hasattr(self, 'v_cs'):
v_cs = np.zeros(len(ALL_GENES), dtype=np.float32)
genes = self.chdir['genes'][:cutoff]
genes, uniq_idx = np.unique(genes, return_index=True)
vals = np.array(self.chdir['vals'][:cutoff])[uniq_idx]
v_cs[np.in1d(ALL_GENES, genes)] = vals
self.v_cs = sp.lil_matrix(v_cs)
def fill_top_genes(self):
'''Get top up/dn genes from `v_cs`
'''
# get mask of non zero index
mask_non_zero = (self.v_cs != 0).toarray().ravel()
# retrieve CD vals and genes
vals = self.v_cs[0, mask_non_zero].toarray().ravel()
genes = ALL_GENES[mask_non_zero]
# sort CD vals on abs(vals)
srt_idx = np.abs(vals).argsort()[::-1]
up_genes = []
dn_genes = []
for gene, val in zip(genes[srt_idx].tolist(), vals[srt_idx].tolist()):
if val > 0:
up_genes.append( (gene, val) )
else:
dn_genes.append( (gene, val) )
return up_genes, dn_genes
def clear(self, cutoff=600):
'''Clear unnecessary fields to reduce RAM usage.
'''
self.init_cs_vectors(cutoff=cutoff)
del self.chdir
def __sizeof__(self):
size = sum(map(sys.getsizeof, [self.name, self.meta]))
size += sparse_matrix_size(self.v_cs)
return size
def to_json(self, meta_only=False):
## to export the document into json
json_data = self.meta
if not meta_only:
up_genes, dn_genes = self.fill_top_genes()
json_data['up_genes'] = up_genes
json_data['down_genes'] = dn_genes
return json.dumps(json_data)
def to_dict(self, format='gmt'):
## method to generate files for downloading
if format == 'gmt':
dict_data = {'name': self.name, 'id': self.meta['id']}
else:
dict_data = self.meta
up_genes, dn_genes = self.fill_top_genes()
dict_data['up_genes'] = up_genes
dict_data['down_genes'] = dn_genes
return dict_data
def post_to_paea(self, cutoff=2000):
## post top n genes to PAEA and return a PAEA url
## return None if instance has no chdir
post_url = 'http://amp.pharm.mssm.edu/Enrichr/addList'
base_url = 'http://amp.pharm.mssm.edu/PAEA?id='
paea_url = None
if self.has_chdir():
up_genes, dn_genes = self.fill_top_genes()
gene_list = []
for gene, coef in up_genes + dn_genes:
gene_list.append( '%s,%s\n'% (gene, coef) )
gene_list = ''.join(gene_list)
data = {'list': gene_list, 'inputMethod': "PAEA", 'description': self.name}
r = requests.post(post_url, files=data)
paea_url = base_url + str(json.loads(r.text)['userListId'])
return paea_url
def post_to_cds2(self, cutoff=2000):
## post top n genes to L1000CDS2 API and return a CDS2 url
url = 'http://amp.pharm.mssm.edu/L1000CDS2/query'
cds2_url = None
if self.has_chdir():
up_genes, dn_genes = self.fill_top_genes()
data = {
"genes": map(lambda x: x[0].upper(), up_genes + dn_genes),
"vals": map(lambda x: x[1], up_genes + dn_genes)
}
config = {"aggravate":False,"searchMethod":"CD","share":True,"combination":True,"db-version":"latest"}
metadata = [{"key":"name","value": self.name}]
for key, val in self.meta.items():
if key not in ['pert_ids', 'ctrl_ids', 'curator']:
metadata.append({"key":key, "value":val})
payload = {"data":data,"config":config,"meta":metadata}
headers = {'content-type':'application/json'}
r = requests.post(url,data=json.dumps(payload),headers=headers)
resCD = r.json()
shareId = resCD['shareId']
cds2_url = 'http://amp.pharm.mssm.edu/L1000CDS2/#/result/' + shareId
return cds2_url
def get_url(self):
## get the url of the signature's gene, disease or drug
url = ''
meta = self.meta
uid = meta['id']
if ':P' not in uid: # not v2.0 signature
if uid.startswith('gene:'):
organism = meta['organism']
if organism == 'human':
gene_symbol = meta['hs_gene_symbol']
if gene_symbol is None:
gene_symbol = meta['mm_gene_symbol']
else:
gene_symbol = meta['mm_gene_symbol']
if gene_symbol is None:
gene_symbol = meta['hs_gene_symbol']
gene_id = GENE_SYMBOLS.get(gene_symbol, '')
url = 'http://www.ncbi.nlm.nih.gov/gene/%s' % gene_id
elif uid.startswith('dz:'):
do_id = meta.get('do_id', None)
if do_id is not None:
url = 'http://disease-ontology.org/term/%s' % do_id
else:
url = 'https://www.google.com/search?q=%s' % self.name.replace(' ', '+')
else:
db_id = meta.get('drugbank_id', None)
pubchem_cid = meta.get('pubchem_cid', None)
if db_id is not None:
url = 'http://www.drugbank.ca/drugs/%s' % db_id
elif pubchem_cid is not None:
url = 'https://pubchem.ncbi.nlm.nih.gov/compound/%s' % pubchem_cid
else:
url = 'https://www.google.com/search?q=%s' % self.name.replace(' ', '+')
else: # v2.0 signature
if uid.startswith('gene:'): key = 'hs_gene_symbol'
elif uid.startswith('dz:'): key = 'disease_name'
else: key = 'drug_name'
# becas concept ids
cids = [':'.join(item.get('cid', ':').split(':')[:2]) for item in meta.get(key, [])]
url_root = 'http://bioinformatics.ua.pt/becas/api/concept/redirect/'
url = [url_root + cid for cid in cids]
return url
def wrapper_func(i, doc):
sig = DBSignature(None, doc=doc)
# sig.fill_top_genes()
# fill the sparse matrices
# up_idx, dn_idx = sig.get_vector_indexes()
# mat_up[i, up_idx] = 1
# mat_dn[i, dn_idx] = 1
# clear `chdir` field and add `v_cs` for exporting
# sig.clear(cutoff=600)
# if i % 5 == 0:
# print i
up_idx, dn_idx = sig.get_vectors_and_clear()
uid = doc['id']
return uid, sig, up_idx, dn_idx
class DBSignatureCollection(dict):
'''
A collection of DBSignature from the mongodb
'''
formats = ['csv', 'json', 'gmt']
category2name = {
'gene': 'single_gene_perturbations',
'dz': 'disease_signatures',
'drug': 'single_drug_perturbations',
}
outfn_path = os.path.dirname(os.path.realpath(__file__)) + '/static/downloads/'
def __init__(self, filter_=None, name=None, limit=None, name_prefix=None):
'''
`filter_` should be a mongo query
'''
self.filter_ = filter_
self.name = name # 'v1.0', 'v1.1', 'p1.0'
self.name_prefix = name_prefix # 'Mannual', 'Drug Matrix', 'Automatated'
if not limit:
cur = COLL.find(self.filter_, PROJECTION_EXCLUDE)
else:
cur = COLL.find(self.filter_, PROJECTION_EXCLUDE).limit(limit)
# to preserve orders
self.uids = cur.distinct('id')
# sparse matrices
sparse_mat_shape = (len(self.uids), len(ALL_GENES_I))
mat_up = sp.lil_matrix(sparse_mat_shape, dtype=np.int8)
mat_dn = sp.lil_matrix(sparse_mat_shape, dtype=np.int8)
# Load signatures
tuple_list = Parallel(n_jobs=-1, backend='threading', verbose=10)(
delayed(wrapper_func)(i, | |
# This file is part of Flask-Multipass-CERN.
# Copyright (C) 2020 - 2021 CERN
#
# Flask-Multipass-CERN is free software; you can redistribute
# it and/or modify it under the terms of the MIT License; see
# the LICENSE file for more details.
import logging
from datetime import datetime
from functools import wraps
from importlib import import_module
from inspect import getcallargs
from authlib.integrations.requests_client import OAuth2Session
from flask import current_app, g, has_request_context
from flask_multipass import IdentityRetrievalFailed
from flask_multipass.data import IdentityInfo
from flask_multipass.exceptions import MultipassException
from flask_multipass.group import Group
from flask_multipass.identity import IdentityProvider
from flask_multipass.providers.authlib import AuthlibAuthProvider, _authlib_oauth
from requests.adapters import HTTPAdapter
from requests.exceptions import RequestException
from urllib3 import Retry
CACHE_LONG_TTL = 86400 * 2
CACHE_TTL = 1800
CERN_OIDC_WELLKNOWN_URL = 'https://auth.cern.ch/auth/realms/cern/.well-known/openid-configuration'
HTTP_RETRY_COUNT = 5
retry_config = HTTPAdapter(max_retries=Retry(total=HTTP_RETRY_COUNT,
status_forcelist=[503, 504],
allowed_methods=frozenset(['GET']),
raise_on_status=False))
_cache_miss = object()
class ExtendedCache:
def __init__(self, cache):
self.cache = self._init_cache(cache)
def _init_cache(self, cache):
if cache is None:
return None
elif callable(cache):
return cache()
elif isinstance(cache, str):
module_path, class_name = cache.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
else:
return cache
def get(self, key, default=None):
if self.cache is None:
return default
return self.cache.get(key, default)
def set(self, key, value, timeout=0, refresh_timeout=None):
if self.cache is None:
return
self.cache.set(key, value, timeout)
if refresh_timeout:
self.cache.set(f'{key}:timestamp', datetime.now(), refresh_timeout)
def should_refresh(self, key):
if self.cache is None:
return True
return self.cache.get(f'{key}:timestamp') is None
def memoize_request(f):
@wraps(f)
def memoizer(*args, **kwargs):
if not has_request_context() or current_app.config['TESTING'] or current_app.config.get('REPL'):
# No memoization outside request context
return f(*args, **kwargs)
try:
cache = g._cern_multipass_memoize
except AttributeError:
g._cern_multipass_memoize = cache = {}
key = (f.__module__, f.__name__, make_hashable(getcallargs(f, *args, **kwargs)))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return memoizer
def make_hashable(obj):
if isinstance(obj, (list, set)):
return tuple(obj)
elif isinstance(obj, dict):
return frozenset((k, make_hashable(v)) for k, v in obj.items())
return obj
def normalize_cern_person_id(value):
"""Normalize the CERN person ID.
We always want a string or None if it's missing.
"""
if value is None:
return None
elif isinstance(value, int):
return str(value)
elif not value:
return None
else:
return value
class CERNAuthProvider(AuthlibAuthProvider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.include_token = True
@property
def authlib_settings(self):
settings = dict(self.settings['authlib_args'])
settings.setdefault('server_metadata_url', CERN_OIDC_WELLKNOWN_URL)
# XXX should we request any other scopes?
settings.setdefault('client_kwargs', {'scope': 'openid'})
return settings
class CERNGroup(Group):
supports_member_list = True
def get_members(self):
assert '/' not in self.name
with self.provider._get_api_session() as api_session:
group_data = self.provider._get_group_data(self.name)
if group_data is None:
return
gid = group_data['id']
params = {
'limit': 5000,
'field': [
'upn',
'firstName',
'lastName',
'instituteName',
'telephone1',
'primaryAccountEmail',
'cernPersonId',
],
'recursive': 'true'
}
results = self.provider._fetch_all(api_session, f'/api/v1.0/Group/{gid}/memberidentities', params)[0]
for res in results:
del res['id'] # id is always included
self.provider._fix_phone(res)
identifier = res.pop('upn')
extra_data = self.provider._extract_extra_data(res)
yield IdentityInfo(self.provider, identifier, extra_data, **res)
def has_member(self, identifier):
cache = self.provider.cache
logger = self.provider.logger
cache_key = f'flask-multipass-cern:{self.provider.name}:groups:{identifier}'
all_groups = cache.get(cache_key)
if all_groups is None or cache.should_refresh(cache_key):
try:
all_groups = {g.name.lower() for g in self.provider.get_identity_groups(identifier)}
cache.set(cache_key, all_groups, CACHE_LONG_TTL, CACHE_TTL)
except RequestException:
logger.warning('Refreshing user groups failed for %s', identifier)
if all_groups is None:
logger.error('Getting user groups failed for %s, access will be denied', identifier)
return False
if self.provider.settings['cern_users_group'] and self.name.lower() == 'cern users':
return self.provider.settings['cern_users_group'].lower() in all_groups
return self.name.lower() in all_groups
class CERNIdentityProvider(IdentityProvider):
supports_refresh = True
supports_get = False
supports_search = True
supports_search_ex = True
supports_groups = True
supports_get_identity_groups = True
group_class = CERNGroup
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.authlib_client = _authlib_oauth.register(self.name + '-idp', **self.authlib_settings)
self.settings.setdefault('cache', None)
self.settings.setdefault('extra_search_filters', [])
self.settings.setdefault('authz_api', 'https://authorization-service-api.web.cern.ch')
self.settings.setdefault('phone_prefix', '+412276')
self.settings.setdefault('cern_users_group', None)
self.settings.setdefault('logger_name', 'multipass.cern')
self.logger = logging.getLogger(self.settings['logger_name'])
self.cache = ExtendedCache(self.settings['cache'])
if not self.settings.get('mapping'):
# usually mapping is empty, in that case we set some defaults
self.settings['mapping'] = {
'first_name': 'firstName',
'last_name': 'lastName',
'affiliation': 'instituteName',
'phone': 'telephone1',
'email': 'primaryAccountEmail',
}
@property
def authlib_settings(self):
settings = dict(self.settings['authlib_args'])
settings.setdefault('server_metadata_url', CERN_OIDC_WELLKNOWN_URL)
return settings
@property
def authz_api_base(self):
return self.settings['authz_api'].rstrip('/')
def refresh_identity(self, identifier, multipass_data):
data = self._get_identity_data(identifier)
self._fix_phone(data)
identifier = data.pop('upn')
extra_data = self._extract_extra_data(data)
return IdentityInfo(self, identifier, extra_data, **data)
def _fix_phone(self, data):
phone = data.get('telephone1')
if not phone or phone.startswith('+'):
return
data['telephone1'] = self.settings['phone_prefix'] + phone
def _extract_extra_data(self, data, default=None):
return {'cern_person_id': normalize_cern_person_id(data.pop('cernPersonId', default))}
def get_identity_from_auth(self, auth_info):
upn = auth_info.data.get('sub')
groups = auth_info.data.get('groups')
cache_key_prefix = f'flask-multipass-cern:{self.name}'
if groups is not None:
groups = {x.lower() for x in groups}
cache_key = f'{cache_key_prefix}:groups:{upn}'
self.cache.set(cache_key, groups, CACHE_LONG_TTL, CACHE_TTL)
try:
data = self._fetch_identity_data(auth_info)
# check for data mismatches between our id token and authz
self._compare_data(auth_info.data, data)
phone = data.get('telephone1')
affiliation = data.get('instituteName')
self.cache.set(f'{cache_key_prefix}:phone:{upn}', phone, CACHE_LONG_TTL)
self.cache.set(f'{cache_key_prefix}:affiliation:{upn}', affiliation, CACHE_LONG_TTL)
except RequestException:
self.logger.warning('Getting identity data for %s failed', upn)
phone = self.cache.get(f'{cache_key_prefix}:phone:{upn}', _cache_miss)
affiliation = self.cache.get(f'{cache_key_prefix}:affiliation:{upn}', _cache_miss)
if phone is _cache_miss or affiliation is _cache_miss:
self.logger.error('Getting identity data for %s failed without cache fallback', upn)
raise IdentityRetrievalFailed('Retrieving identity information from CERN SSO failed', provider=self)
data = {
'firstName': auth_info.data['given_name'],
'lastName': auth_info.data['family_name'],
'displayName': auth_info.data['name'],
'telephone1': phone,
'instituteName': affiliation,
'primaryAccountEmail': auth_info.data['email'],
}
self._fix_phone(data)
data.pop('upn', None)
extra_data = self._extract_extra_data(data, normalize_cern_person_id(auth_info.data.get('cern_person_id')))
return IdentityInfo(self, upn, extra_data, **data)
def search_identities(self, criteria, exact=False):
return iter(self.search_identities_ex(criteria, exact=exact)[0])
@memoize_request
def search_identities_ex(self, criteria, exact=False, limit=None):
emails_key = '-'.join(sorted(x.lower() for x in criteria['primaryAccountEmail']))
cache_key = f'flask-multipass-cern:{self.name}:email-identities:{emails_key}'
use_cache = exact and limit is None and len(criteria) == 1 and 'primaryAccountEmail' in criteria
if use_cache:
cached_data = self.cache.get(cache_key)
if cached_data:
cached_results = []
for res in cached_data[0]:
identifier = res.pop('upn')
extra_data = self._extract_extra_data(res)
cached_results.append(IdentityInfo(self, identifier, extra_data, **res))
if not self.cache.should_refresh(cache_key):
return cached_results, cached_data[1]
if any(len(x) != 1 for x in criteria.values()):
# Unfortunately the API does not support OR filters (yet?).
# Fortunately we never search for more than one value anyway, except for emails when
# looking up identities based on the user's email address.
if len(criteria) != 1:
raise MultipassException('This provider does not support multiple values for a search criterion',
provider=self)
field, values = dict(criteria).popitem()
seen = set()
total = 0
all_identities = []
for value in values:
identities = self.search_identities_ex({field: [value]}, exact=exact, limit=limit)[0]
for identity in identities:
if identity.identifier not in seen:
seen.add(identity.identifier)
all_identities.append(identity)
total += 1
return all_identities, total
criteria = {k: next(iter(v)) for k, v in criteria.items()}
op = 'eq' if exact else 'contains'
api_criteria = [f'{k}:{op}:{v}' for k, v in criteria.items()]
api_criteria.append('type:eq:Person')
api_criteria += self.settings['extra_search_filters']
params = {
'limit': limit or 5000,
'filter': api_criteria,
'field': [
'upn',
'firstName',
'lastName',
'displayName',
'instituteName',
'telephone1',
'primaryAccountEmail',
'cernPersonId',
],
}
with self._get_api_session() as api_session:
results = []
total = 0
try:
results, total = self._fetch_all(api_session, '/api/v1.0/Identity', params, limit=limit)
except RequestException:
self.logger.warning('Refreshing identities failed for criteria %s', criteria)
if use_cache and cached_data:
return cached_results, cached_data[1]
else:
self.logger.error('Getting identities failed for criteria %s', criteria)
raise
identities = []
cache_data = []
for res in results:
if not res['upn']:
total -= 1
continue
del res['id']
self._fix_phone(res)
res_copy = dict(res)
identifier = res_copy.pop('upn')
extra_data = self._extract_extra_data(res_copy)
identities.append(IdentityInfo(self, identifier, extra_data, **res_copy))
if use_cache:
cache_data.append(res)
if use_cache:
self.cache.set(cache_key, (cache_data, total), CACHE_LONG_TTL, CACHE_TTL * 2)
return identities, total
def get_identity_groups(self, identifier):
with self._get_api_session() as api_session:
resp = api_session.get(f'{self.authz_api_base}/api/v1.0/IdentityMembership/{identifier}/precomputed')
if resp.status_code == 404 or resp.status_code == 500:
return set()
resp.raise_for_status()
results = resp.json()['data']
return {self.group_class(self, res['groupIdentifier']) for res in results}
def get_group(self, name):
return self.group_class(self, name)
def search_groups(self, name, exact=False):
op = 'eq' if exact else 'contains'
params = {
'limit': 5000,
'filter': [f'groupIdentifier:{op}:{name}'],
'field': ['groupIdentifier'],
}
with self._get_api_session() as api_session:
results = self._fetch_all(api_session, '/api/v1.0/Group', params)[0]
rv = {self.group_class(self, res['groupIdentifier']) for res in results}
if (
self.settings['cern_users_group'] and
(name.lower() == 'cern users' or (not exact and name.lower() in 'cern users'))
):
rv.add(self.group_class(self, 'CERN Users'))
return rv
@memoize_request
def _get_api_session(self):
cache_key = f'flask-multipass-cern:{self.name}:api-token'
token = self.cache.get(cache_key)
if token:
oauth_session = OAuth2Session(token=token)
oauth_session.mount(self.authz_api_base, retry_config)
return oauth_session
meta = self.authlib_client.load_server_metadata()
token_endpoint = meta['token_endpoint'].replace('protocol/openid-connect', 'api-access')
oauth_session = OAuth2Session(
self.authlib_client.client_id,
self.authlib_client.client_secret,
token_endpoint=token_endpoint,
grant_type='client_credentials',
)
oauth_session.mount(self.authz_api_base, retry_config)
oauth_session.fetch_access_token(
audience='authorization-service-api',
headers={'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'},
)
self.cache.set(cache_key, oauth_session.token, oauth_session.token['expires_in'] - 30)
return oauth_session
def _fetch_identity_data(self, auth_info):
# Exchange the user token to one for the authorization API
user_api_token = self.authlib_client.fetch_access_token(
grant_type='urn:ietf:params:oauth:grant-type:token-exchange',
subject_token_type='urn:ietf:params:oauth:token-type:access_token',
audience='authorization-service-api',
subject_token=auth_info.data['token']['access_token'],
)
params = {
'field': [
'upn',
'firstName',
'lastName',
'instituteName',
'telephone1',
'primaryAccountEmail',
'cernPersonId',
],
}
resp = self.authlib_client.get(f'{self.authz_api_base}/api/v1.0/Identity/current', token=user_api_token,
params=params)
resp.raise_for_status()
data = resp.json()['data']
del data['id'] # id is always included
return data
def _fetch_all(self, api_session, endpoint, params, limit=None):
results = []
resp = api_session.get(self.authz_api_base + endpoint, params=params)
resp.raise_for_status()
data = resp.json()
total = data['pagination']['total']
while True:
results += data['data']
if not data['pagination']['next'] | |
<gh_stars>0
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
from gcn.input_data import pollute_data
import json
import os
from networkx.readwrite import json_graph as jg
import sys
sys.path.insert(1, '/Users/april/Downloads/GCN_detection_benchmarkFinal/GCN_detection_benchmark/gcn/Preprocessing/')
def create_G_idM_classM(adjacency, features, testMask, valMask, labels):
# 1. Create Graph
print("Creating graph...")
# Create graph from adjacency matrix
G = nx.from_numpy_matrix(adjacency)
num_nodes = G.number_of_nodes()
# Change labels to int from numpy.int64
labels = labels.tolist()
for arr in labels:
for integer in arr:
integer = int(integer)
# Iterate through each node, adding the features
i = 0
for n in list(G):
G.node[i]['feature'] = list(map(float, list(features[i])))
G.node[i]['test'] = bool(testMask[i])
G.node[i]['val'] = bool(valMask[i])
G.node[i]['labels'] = list(map(int, list(labels[i])))
i += 1
# 2. Create id-Map and class-Map
print("Creating id-Map and class-Map...")
# Initialize the dictionarys
idM = {}
classM = {}
# Populate the dictionarys
i = 0
while i < num_nodes:
idStr = str(i)
idM[idStr] = i
classM[idStr] = list(labels[i])
i += 1
return G, idM, classM
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset_str):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
#Use mask to translate a fully supervised setting to a semi-supervised setting
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
#combine all training and testing features as sparse matrix
features = sp.vstack((allx, tx)).tolil()
#change the testing features' order, the testing instances will follow training instances
features[test_idx_reorder, :] = features[test_idx_range, :]
#change graph adjacency matrix to sparse matrix format
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
#correspondingly adjust testing labels
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
#attributes, labels = pollute_data_2(labels, features)
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
#Just choose another 500 training instances as validation set
idx_val = range(len(y), len(y)+500)
'''
idx_train = range(1208)
idx_val = range(1208, 1208+ 500)
attributes, labels = pollute_data(labels, features, idx_train, idx_val, idx_test)
'''
#testing the label rate of cora dataset
if dataset_str == 'cora':
num_train = len(y)
total_num = len(ally)+len(ty)
label_ratio_cora = num_train *1.0/total_num
print(label_ratio_cora)
if dataset_str == 'citeseer':
num_train = len(y)
total_num = len(ally) + len(ty)
label_ratio_citeseer = num_train * 1.0 / total_num
print(label_ratio_citeseer)
#vector of size 2708, idx_train as true
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
#only assign label value when the train_mask as true
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
#testing instance starts from 1708
y_test[test_mask, :] = labels[test_mask, :]
#Translate adj to numpy arrays
adj_np = adj.toarray()
#translate features to numpy arrays
features_np = features.toarray()
#generate the graph and id_map, class_map
G, IDMap, classMap =create_G_idM_classM(adj_np, features_np, test_mask, val_mask, labels)
#at this stage, for all validation nodes, test nodes we have their labels but use mask tp make them
#all [0 0 0 0 0 0 0]
num_edges =len(G.edges())
print(num_edges)
print(G.number_of_edges())
#Dump everything into .json files and one .npy
if dataset_str == 'cora':
graphFile_prefix = '/Users/april/Downloads/GraphSAGE_Benchmark-master/processed/cora'
dataset_name = 'cora_process'
dumpJSON(graphFile_prefix, dataset_name, G, IDMap, classMap, features_np)
if dataset_str == 'citeseer':
graphFile_prefix = '/Users/april/Downloads/GraphSAGE_Benchmark-master/processed/citeseer'
dataset_name = 'citeseer_process'
dumpJSON(graphFile_prefix, dataset_name, G, IDMap, classMap, features_np)
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
def load_bsbm_data(path,prefix, normalize=True):
G_data = json.load(open(path+prefix + "-G.json"))
G = json_graph.node_link_graph(G_data)
# change graph adjacency matrix to sparse matrix format
adj = nx.adjacency_matrix(nx.from_dict_of_lists(G.adj))
print("The number of edges")
edge_num = G.number_of_edges()
print(edge_num)
print("The number of nodes")
nodes_num = G.number_of_nodes()
print(nodes_num)
# print G.nodes()[0]
# check G.nodes()[0] is an integer or not
if isinstance(G.nodes()[0], int):
conversion = lambda n: int(n)
else:
conversion = lambda n: n
if os.path.exists(path+prefix + "-feats.npy"):
feats = np.load(path+prefix + "-feats.npy")
else:
print("No features present.. Only identity features will be used.")
feats = None
id_map = json.load(open(path+prefix + "-id_map.json"))
id_map = {conversion(k): int(v) for k, v in id_map.items()}
# just print the id_map keys range:
# id_map_range = np.sort(id_map.keys())
walks = []
class_map = json.load(open(path+prefix + "-class_map.json"))
if isinstance(list(class_map.values())[0], list):
lab_conversion = lambda n: n
else:
lab_conversion = lambda n: int(n)
class_map = {conversion(k): lab_conversion(v) for k, v in class_map.items()}
# just print the class_map keys range:
class_map_int_list = []
for j in class_map.keys():
class_map_int_list.append(int(j))
class_map_range = np.sort(class_map_int_list)
#generate y_train, y_val, y_test ndarray
y_train =np.array([0,0])
y_val = np.array([0,0])
y_test =np.array([0,0])
idx_train =[]
idx_val=[]
idx_test=[]
for node in G.nodes():
if G.node[node]['test'] == False and G.node[node]['val']==False:
print("Train,currrent n is %d" % node)
train_label = G.node[node]['label']
train_label = np.array(train_label)
y_train = np.vstack((y_train, train_label))
y_val = np.vstack((y_val,[0,0]))
y_test = np.vstack((y_test,[0,0]))
idx_train.append(node)
elif G.node[node]['test'] == False and G.node[node]['val']==True:
print("Validation, current n is %d" %node)
validation_label = G.node[node]['label']
validation_label = np.array(validation_label)
y_val = np.vstack((y_val,validation_label))
y_train = np.vstack((y_train, [0, 0]))
y_test = np.vstack((y_test,[0,0]))
idx_val.append(node)
elif G.node[node]['test'] == True and G.node[node]['val']==False:
print("Test, current n is %d" %node)
test_label = G.node[node]['label']
test_label = np.array(test_label)
y_test = np.vstack((y_test,test_label))
y_train = np.vstack((y_train, [0, 0]))
y_val = np.vstack((y_val, [0, 0]))
idx_test.append(node)
print("training label shape is")
#print(y_train.shape)
y_train = np.delete(y_train,0,axis=0)
y_val = np.delete(y_val,0,axis=0)
y_test = np.delete(y_test,0,axis=0)
print(y_train.shape)
#generate train_mask, val_mask and test_mask
train_mask = sample_mask(idx_train, len(G.node))
val_mask = sample_mask(idx_val, len(G.node))
test_mask = sample_mask(idx_test, len(G.node))
#check how many train_mask is true:
train_true_num = np.count_nonzero(train_mask)
#Similarly for val_mask, test_mask
val_true_num = np.count_nonzero(val_mask)
test_true_num = np.count_nonzero(test_mask)
# print the | |
"""
█▀▀ █▀▀ █▄░█ █▀▀ █▀ █ █▀ █░░ █▀▀ █▀ █▀ █▀█ █▄░█ █▀ █░█ █▀█ █░░ ░ ▄█
█▄█ ██▄ █░▀█ ██▄ ▄█ █ ▄█ █▄▄ ██▄ ▄█ ▄█ █▄█ █░▀█ ▄█ ▀▄▀ █▄█ █▄▄ ▄ ░█
Welcome to the Genesis Gir lesson tutorials Volume 1! Genesis Chit Chat bot is a program that gathers info on
the user and displays all the cool info at the end of the program as shock value that the bot could remember
all of users fun little facts. Chit Chat bot will ask the user questions than display its results at the end of
the program really fun to make and trying to get out of writers block right now so if its not as immersive im sorry'
but this will be fun to make and learn from in the long run with functionalities like print, input() and even
using multiline strings and much more! Thanks for downloading! 山尺讠〸〸🝗𝓝 & ⼕ㄖᗪ🝗ᗪ ⻏丫 Ꮆ🝗𝓝🝗丂讠丂 Ꮆ讠尺
"""
# Loading screen
print()
print()
print('initializing bot! ')
print('(loading. . .)')
print('(press enter)')
input() # T/I
print('Tidying up mainframes!')
print('(loading. . . )')
print('(press enter)')
input() # T/I
print('Making things pretty for you!')
print('(loading. . . )')
print('(press enter)')
input()
print('picking up data Blocks!')
print('(loading. . . )')
print('(press enter)')
input()
print('Data Blocks loading in!')
print('(loading. . .)')
print('(press enter)')
input()
print('Letting things fall into place!')
print('(loading. . .)')
print('(press enter)')
input()
"""
█░█ ▄▀█ █▀█ █ ▄▀█ █▄▄ █░░ █▀▀ █▀
▀▄▀ █▀█ █▀▄ █ █▀█ █▄█ █▄▄ ██▄ ▄█
variables are like tiny little safes or storage units you can store values and data types inside to later use them in your
program ofr ease of use! variables are really useful if you want to store data inside them from integers , floats,
string data types and even Boolean values like True and false can be used and stored within them. In this program
the varaible 'prompt' is storing '(press enter)' so that way we dont have to type it all the time or at least thats why
im using it. variables are created using assignment statements that contain the variable name the assignment operator
and the value or data type to be stored within the variable whatever you named the variable it must have valid names or
you will get a syntax error, again a variable cant have any spaces or begin with a number or contain special characters
in it like '!@#$%' etc. so make sure to name your variables something in the valid formats.
Pick the right variable name that has to do with the item or variable to begin with like the automatetheboring stuff if you
label all of your moving boxes stuff you wont ever find anything to picking the right now that will suit your project or
program will best interest your organization later down the road!
██████
▓▓▓▓ ▓▓░░
░░░░ ▓▓▓▓
████ 𝕧𝕒𝕣𝕚𝕒𝕓𝕝𝕖𝕤 ████
████ ████
████ True ████
████ ████
██ ████ False ████ ▓▓
██ ▓▓▓▓ ▓▓▓▓ ▓▓
██ ▓▓▓▓ ▓▓▓▓ ▓▓
██ ▓▓▒▒ ▓▓▓▓ ▓▓
██ ▓▓▓▓ ▓▓▒▒ 78 ▓▓
██ 'strings' ██████ ▓▓
██ ▓▓ 1 ▓▓
██ ▓▓ integers ▓▓
██ ▓▓ ▓▓
██ DATA ▓▓ ▓▓
██ ▓▓ 500.1 ▓▓
██ 'Cat' ▓▓▓▓▓▓ (floats) ▓▓
██ ▓▓▓▓ ▓▓ ▓▓▒▒ ▓▓
██ ▓▓▒▒ ▓▓ ▓▓▓▓ ▓▓
██ ▓▓▓▓ ▓▓ ▓▓▓▓ ▓▓
██ ▒▒▒▒░░░░ ▓▓ 2.1 ░░▒▒▒▒ ▓▓
██ ▒▒▒▒░░░░ ▓▓ ░░░░▒▒▒▒ ▓▓
████ Your Mind ▓▓ ██▓▓
████ ▓▓ 95 ████ 𝙏𝙞𝙥: 𝙡𝙖𝙗𝙚𝙡 𝙮𝙤𝙪𝙧 𝙫𝙖𝙧𝙞𝙖𝙗𝙡𝙚𝙨 𝙨𝙤𝙢𝙚𝙩𝙝𝙞𝙣𝙜 𝙩𝙝𝙖𝙩 𝙝𝙖𝙨 𝙩𝙤 𝙙𝙤 𝙬𝙞𝙩𝙝 𝙩𝙝𝙚 𝙩𝙤𝙥𝙞𝙘
▓▓▓▓ ▓▓ ▓▓▓▓
▓▓▓▓ ▓▓ ▓▓▓▓ 𝙏𝙞𝙥: 𝙫𝙖𝙧𝙞𝙖𝙗𝙡𝙚𝙨 𝙘𝙖𝙣𝙣𝙤𝙩 𝙗𝙚𝙜𝙞𝙣 𝙬𝙞𝙩𝙝 𝙖 𝙣𝙪𝙢𝙗𝙚𝙧
▓▓▓▓ ▓▓ ▓▓▓▓
▓▓▓▓ ▓▓ ▓▓▓▓
▓▓▓▓▓▓
"""
#Bot variable
bot = '[Chit-Chat Bot]'
# prompt variable shortcuts
prompt = '(press enter)'
#T This program asks the user questions and gives the results from the variables at the end
print()#(ELOC)
print()#(ELOC)
print()#(ELOC)
print(' █▀▀ █▀▀ █▄░█ █▀▀ █░█ █ ▀█▀ █▀▀ █░█ ▄▀█ ▀█▀ █▄▄ █▀█ ▀█▀ ')
print(' █▄█ ██▄ █░▀█ █▄▄ █▀█ █ ░█░ █▄▄ █▀█ █▀█ ░█░ █▄█ █▄█ ░█░ ')
print()#(ELOC)
print(' ')
print(' ░░░░░░░▄█▄▄▄█▄ ')
print(' ▄▀░░░░▄▌─▄─▄─▐▄░░░░▀▄ ')
print(' █▄▄█░░▀▌─▀─▀─▐▀░░█▄▄█ ')
print(' ░▐▌░░░░▀▀███▀▀░░░░▐▌ ')
print(' ████░▄█████████▄░████ ')
print()#(ELOC)
print()#(ELOC)
print("""
Welcome to the Genesis Chit-Chat Bot! Ever felt bored or lonely and have nobody to talk to?
well designed by Genesis Gir this bot will hold converstations with you and remember all the facts you told it
so what are you waiting for it uses simple functions like input() to gather users inputs! The bot will ask you
various questions that you will have to answer to gather data! Goodluck on your journey with your questions
lets get started!
""")
input('(press enter to begin chatting!)')# waits for user to type than press enter and function call evaluates to standard input()
print()
# Question 1
print(bot+': Hello whats your name? ')# asking a question
name = input('(enter your name) ') # user creates variable using the input functionalities!
print()#(ELOC)
print(bot+': Nice to meet you '+name+' im glad to have you here today!') # speech dialog from (Gen Chit-Chat Bot)
print(prompt) # prompts user to press enter
input() # Takes input w/input()
# Question 2
print(bot+': So whats your favorite thing to do '+name+'?')# asking a question
hobby = input('(enter Hobby) ') # user creates variable using the input functionalities!
print()#(ELOC)
print(bot+':Nice that sounds interesting mine is to talk with people like you and gather data!') # speech dialog from (Gen Chit-Chat Bot)
print(prompt) # prompts user to press enter
input()# Takes input w/input()
# Question 3
print(bot+': Whats your mood like right now?')# asking a question
mood = input('(enter your mood) ')# user creates variable using the input functionalities!
print()#(ELOC)
print(bot+': Sometimes feeling '+mood+' is okay! But when i find myself feeling unhappy') # speech dialog from (Gen Chit-Chat Bot)
print('I just talk with someone about it and I feel a lot better in the end!') #speech dialog continues from line '57'
print('(press enter)') # prompts user to press enter
input()# Takes input w/input()
# Question 4
print(bot+': Whats your favorite game to play?')# asking a question
game = input('(enter your favorite game) ')# user creates variable using the input functionalities!
print()#(ELOC)
print(bot+': '+game+'? I never heard of it than again Im a bot!') # speech dialog from (Gen Chit-Chat Bot)
input(prompt)
print()
#Bot Color variable
bot_color = 'purple'
# Question 5
print(bot+': Whats your favorite color?')# asking a question
color = input('(enter your favorite color) ')# user creates variable using the input functionalities!
print()#(ELOC)
print(bot+':'+color+'? My favorite color is '+bot_color+'!') # speech dialog from (Gen Chit-Chat Bot)
print(prompt) # prompts user to press enter
input()# Takes input w/input()
"""
🅶🅴🅽🅴🆂🅸🆂 🅶🅴🅽🅴🆂🅸🆂 🅶🅴🅽🅴🆂🅸🆂 🅶🅴🅽🅴🆂🅸🆂
█░█ █▀█ █░█░█ ▀█▀ █▀█ █░█ █▀ █▀▀ █▀█ █▀█ █ █▄░█ ▀█▀
█▀█ █▄█ ▀▄▀▄▀ ░█░ █▄█ █▄█ ▄█ ██▄ █▀▀ █▀▄ █ █░▀█ ░█░
The print function always comes with two parentheses and any function in that matter and we
enter what data types we want inside of them and everything inside of it is called an
argument and the print is called a function and in this case im using a (stir) or better known
as a string to make a storyline! so print( ) and anything inside it will be printed onto
stream(screen) just make sure its a correct data type: interger , float or a string value.
🅶🅴🅽🅴🆂🅸🆂 🅶🅴🅽🅴🆂🅸🆂 🅶🅴🅽🅴🆂🅸🆂 🅶🅴🅽🅴🆂🅸🆂
"""
# Question 6
print(bot+': Where are you from im curious!')# asking a question
location = input('(enter your location) ')# user creates variable using the input functionalities!
print()
print(bot+': I never been to '+location+' but it sounds pretty nice!') # speech dialog from (Gen Chit-Chat Bot)
input('(press enter)') # prompts user to talk some more than press enter
print()
"""
█░░█ █▀▀█ █░░░█ ▀▀█▀▀ █▀▀█ █▀▀ █▀▀█ █▀▄▀█ █▀▄▀█ █▀▀ █▀▀▄ ▀▀█▀▀
█▀▀█ █░░█ █▄█▄█ ░░█░░ █░░█ █░░ █░░█ █░▀░█ █░▀░█ █▀▀ █░░█ ░░█░░ ✎
▀░░▀ ▀▀▀▀ ░▀░▀░ ░░▀░░ ▀▀▀▀ ▀▀▀ ▀▀▀▀ ▀░░░▀ ▀░░░▀ ▀▀▀ ▀░░▀ ░░▀░░
First things first you need to learn how to make single line comments and multi line comments
so you can add little reminders or give insight on what that line of code does! Comments are
fun great ways to just add notes to a line so you can reflect on them later. They wont show
up in the program its specifically for the developer(you) to look at! So single line comments
start with a hashtag (#) and anything after that will be comments in a greyish tone. Great! Now
you know how to make single line comments but what about multi line comments like this one bro?
Easy all you need to do is start the line with (3 quotes) and than anything after that will be
| |
<filename>paths_cli/wizard/wizard.py<gh_stars>1-10
import shutil
import os
import textwrap
from paths_cli.wizard.tools import yes_no, a_an
from paths_cli.wizard.core import get_object
from paths_cli.wizard.errors import (
FILE_LOADING_ERROR_MSG, RestartObjectException
)
from paths_cli.wizard.joke import name_joke
from paths_cli.wizard.helper import Helper, QuitWizard
from paths_cli.compiling.tools import custom_eval
from paths_cli.wizard import pause
class Console: # no-cov
"""Manage actual I/O for the Wizard.
All direct interaction with the user is performed in this class.
"""
# TODO: add logging so we can output the session
def print(self, *content):
"""Write content to screen"""
print(*content)
def input(self, content):
"""Read user input.
Parameters
----------
content : str
input prompt
"""
return input(content)
@property
def width(self):
"""Terminal width in columns"""
return shutil.get_terminal_size((80, 24)).columns
def draw_hline(self, char='═'):
"""Draw a separator line.
Parameters
----------
char : str
string to use for line separator
"""
n_chars = self.width // len(char)
self.print(char * n_chars)
class Wizard:
"""Friendly interactive Wizard
This class handles most of the user-facing interaction with the wizard,
including various conveniences for asking for certain user selections
(such as selecting between a number of possible choices.)
An instance of this class includes information about how the Wizard will
guide the user through various stages of simulation set-up.
The main method, ``Wizard.run_wizard()``, performs an entire simulation
set-up for that instance.
Parameters
----------
steps : List[:class:`.WizardStep`]
ordered list of steps in this particular simulation set-up process
"""
def __init__(self, steps):
self.steps = steps
self.requirements = {
step.display_name: (step.store_name, step.minimum, step.maximum)
for step in steps
}
self.engines = {}
self.cvs = {}
self.states = {}
self.networks = {}
self.schemes = {}
self.last_used_file = None # for loading
self.console = Console()
self.default = {}
self._patched = False # if we've done the monkey-patching
def _patch(self): # no-cov
import openpathsampling as paths
from openpathsampling.experimental.storage import monkey_patch_all
from paths_cli.param_core import StorageLoader
if not self._patched:
paths = monkey_patch_all(paths)
paths.InterfaceSet.simstore = True
self._patched = True
StorageLoader.has_simstore_patch = True
def debug(self, content): # no-cov
"""Print to console without pretty-printing"""
# debug does no pretty-printing
self.console.print(content)
def _speak(self, content, preface):
# we do custom wrapping here
# TODO: move this to the console class; should also wrap on `input`
width = self.console.width - len(preface)
statement = preface + content
lines = statement.split("\n")
wrapped = textwrap.wrap(lines[0], width=width, subsequent_indent=" "*3)
for line in lines[1:]:
if line == "":
wrapped.append("")
continue
wrap_line = textwrap.wrap(line, width=width,
initial_indent=" "*3,
subsequent_indent=" "*3)
wrapped.extend(wrap_line)
self.console.print("\n".join(wrapped))
@get_object
def ask(self, question, options=None, default=None, helper=None,
autohelp=False):
"""Ask the user a question."""
if helper is None:
helper = Helper(None)
if isinstance(helper, str):
helper = Helper(helper)
result = self.console.input("🧙 " + question + " ")
self.console.print()
if result == "":
if not autohelp:
return None
result = "?" # autohelp in this case
if helper and result[0] in ["?", "!"]:
self.say(helper(result))
return None
return result
def say(self, content, preface="🧙 "):
"""Let the wizard make a statement.
Parameters
----------
content : str or List[str]
Content to be presented to user. Input will be wrapped to fit
the user's terminal. If a list of strings, each element is
printed with a blank line separating them.
preface : str
preface, used only on the first line of the first element of the
``content``.
"""
self._speak(content, preface)
self.console.print() # adds a blank line
def start(self, content):
"""Specialized version of :method:`.say` for starting an object"""
# eventually, this will tweak so that we preface with a line and use
# green text here TODO: possibly remove?
self.say(content)
def bad_input(self, content, preface="👺 "):
"""Specialized version of :method:`.say` for printing errors"""
# just changes the default preface; maybe print 1st line red?
self.say(content, preface)
@get_object
def ask_enumerate_dict(self, question, options, helper=None,
autohelp=False):
"""Ask the user to select from a set of options.
Parameters
----------
question : str
the question to ask the user (asked before the options are
listed)
options: Dict[str, Any]
mapping of the string name (shown to the user in the list of
options) to the object to return
Returns
-------
Any :
the object the user selected by either name or number
"""
self.say(question)
opt_string = "\n".join([f" {(i+1):>3}. {opt}"
for i, opt in enumerate(options)])
self.say(opt_string, preface=" "*3)
choice = self.ask("Please select an option:", helper=helper)
# select by string
if choice in options:
return options[choice]
# select by number
try:
num = int(choice) - 1
result = list(options.values())[num]
except Exception:
self.bad_input(f"Sorry, '{choice}' is not a valid option.")
result = None
return result
def ask_enumerate(self, question, options):
"""Ask the user to select from a list of options"""
# NOTE: new code should use ask_enumerate_dict. If we were past the
# beta stage, this would probably issue a PendingDeprecationWarning
self.say(question)
opt_string = "\n".join([f" {(i+1):>3}. {opt}"
for i, opt in enumerate(options)])
self.say(opt_string, preface=" "*3)
result = None
while result is None:
choice = self.ask("Please select a number:",
options=[str(i+1)
for i in range(len(options))])
if choice in options:
return choice
try:
num = int(choice) - 1
result = options[num]
except Exception:
self.bad_input(f"Sorry, '{choice}' is not a valid option.")
result = None
return result
@get_object
def ask_load(self, question, loader, helper=None, autohelp=False):
"""Load from user input according to ``loader`` method
Parameters
----------
question : str
string to ask the user
loader : Callable[str] -> Any
method that converts user input into the desired format for the
object
helper : :class:`.Helper`
object to handle user requests for help
"""
as_str = self.ask(question, helper=helper)
try:
result = loader(as_str)
except Exception as e:
self.exception(f"Sorry, I couldn't understand the input "
f"'{as_str}'.", e)
return None
return result
@get_object
def ask_custom_eval(self, question, options=None, default=None,
helper=None, type_=float):
"""Get user input and convert using custom_eval.
.. note::
New code should use ask_load. If we were past beta, this would
have a PendingDeprecationWarning.
"""
as_str = self.ask(question, options=options, default=default,
helper=helper)
try:
result = type_(custom_eval(as_str))
except Exception as e:
self.exception(f"Sorry, I couldn't understand the input "
f"'{as_str}'", e)
return None
return result
def obj_selector(self, store_name, text_name, create_func):
"""Select an object from the wizard's pseudo-storage
"""
# TODO: this seems like something that might be possible to refactor
# out
opts = {name: lambda wiz, o=obj: o
for name, obj in getattr(self, store_name).items()}
create_new = f"Create a new {text_name}"
opts[create_new] = create_func
sel = self.ask_enumerate(f"Which {text_name} would you like to "
"use?", list(opts.keys()))
obj = opts[sel](self)
if sel == create_new:
obj = self.register(obj, text_name, store_name)
return obj
def exception(self, msg, exception):
"""Specialized version of :method:`.bad_input` for exceptions"""
self.bad_input(f"{msg}\nHere's the error I got:\n"
f"{exception.__class__.__name__}: {exception}")
def name(self, obj, obj_type, store_name):
"""Name a newly created object.
Parameters
----------
obj : Any
the new object
obj_type : str
user-facing name of the object type
store_name : str
name of the OPS store in which to save this object type
Returns
-------
Any :
named object
"""
self.say(f"Now let's name your {obj_type}.")
name = None
while name is None:
name_help = ("Objects in OpenPathSampling can be named. You'll "
"use these names to refer back to these objects "
"or to load them from a storage file.")
name = self.ask("What do you want to call it?", helper=name_help)
if name in getattr(self, store_name):
self.bad_input(f"Sorry, you already have {a_an(obj_type)} "
f"named {name}. Please try another name.")
name = None
obj = obj.named(name)
self.say(f"'{name}' is a good name for {a_an(obj_type)} {obj_type}. "
+ name_joke(name, obj_type))
return obj
def register(self, obj, obj_type, store_name):
"""Register a newly-created object in the storage
Parameters
----------
obj : Any
the new object
obj_type : str
user-facing name of the object type
store_name : str
name of the OPS store in which to save this object type
Returns
-------
Any :
input object, possibly after being named
"""
if not obj.is_named:
obj = self.name(obj, obj_type, store_name)
store_dict = getattr(self, store_name)
store_dict[obj.name] = obj
return obj
@get_object
def get_storage(self):
"""Create a file to store the object database to.
Returns
-------
:class:`openpathsampling.experimental.storage.Storage` :
the storage file object
"""
from openpathsampling.experimental.storage import Storage
filename = self.ask("Where would you like to save your setup "
"database?")
if not filename.endswith(".db"):
self.bad_input("Files produced by this wizard must end in "
"'.db'.")
return None
if os.path.exists(filename):
overwrite | |
<filename>autoarray/structures/arrays/two_d/abstract_array_2d.py
import logging
import numpy as np
from typing import List, Tuple, Union
from autoconf import conf
from autoarray.structures.abstract_structure import AbstractStructure2D
from autoarray.structures.arrays.one_d.array_1d import Array1D
from autoarray.mask.mask_2d import Mask2D
from autoarray import exc
from autoarray.structures.arrays import abstract_array
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.layout import layout_util
logging.basicConfig()
logger = logging.getLogger(__name__)
def check_array_2d(array_2d: np.ndarray):
if len(array_2d.shape) != 1:
raise exc.ArrayException(
"An array input into the Array2D.__new__ method is not of shape 1."
)
def convert_array_2d(array_2d: Union[np.ndarray, List], mask_2d: Mask2D) -> np.ndarray:
"""
The `manual` classmethods in the `Array2D` object take as input a list or ndarray which is returned as an
Array2D.
This function performs the following and checks and conversions on the input:
1) If the input is a list, convert it to an ndarray.
2) Check that the number of sub-pixels in the array is identical to that of the mask.
3) Map the input ndarray to its `slim` or `native` representation, depending on the `general.ini` config file
entry `store_slim`.
For an Array2D, `slim` refers to a 1D NumPy array of shape [total_values] and `native` a 2D NumPy array of shape
[total_y_values, total_values].
Parameters
----------
array_2d
The input structure which is converted to an ndarray if it is a list.
mask_2d
The mask of the output Array2D.
"""
array_2d = abstract_array.convert_array(array=array_2d)
if conf.instance["general"]["structures"]["store_slim"]:
return convert_array_2d_to_slim(array_2d=array_2d, mask_2d=mask_2d)
return convert_array_2d_to_native(array_2d=array_2d, mask_2d=mask_2d)
def convert_array_2d_to_slim(array_2d: np.ndarray, mask_2d: Mask2D) -> np.ndarray:
"""
The `manual` classmethods in the `Array2D` object take as input a list or ndarray which is returned as an
Array2D.
This function checks the dimensions of the input `array_2d` and maps it to its `slim` representation.
For an Array2D, `slim` refers to a 1D NumPy array of shape [total_values].
Parameters
----------
array_2d
The input structure which is converted to its slim representation.
mask_2d
The mask of the output Array2D.
"""
if len(array_2d.shape) == 1:
array_2d_slim = array_2d
if array_2d_slim.shape[0] != mask_2d.sub_pixels_in_mask:
raise exc.ArrayException(
"The input 1D array does not have the same number of entries as sub-pixels in"
"the mask."
)
return array_2d_slim
if array_2d.shape != mask_2d.sub_shape_native:
raise exc.ArrayException(
"The input array is 2D but not the same dimensions as the sub-mask "
"(e.g. the mask 2D shape multipled by its sub size.)"
)
return array_2d_util.array_2d_slim_from(
array_2d_native=array_2d, mask_2d=mask_2d, sub_size=mask_2d.sub_size
)
def convert_array_2d_to_native(array_2d: np.ndarray, mask_2d: Mask2D) -> np.ndarray:
"""
The `manual` classmethods in the `Array2D` object take as input a list or ndarray which is returned as an
Array2D.
This function checks the dimensions of the input `array_2d` and maps it to its `native` representation.
For an Array2D, `native` a 2D NumPy array of shape [total_y_values, total_values].
Parameters
----------
array_2d
The input structure which is converted to an ndarray if it is a list.
mask_2d : Mask2D
The mask of the output Array2D.
"""
if len(array_2d.shape) == 2:
array_2d_native = array_2d * np.invert(mask_2d)
if array_2d.shape != mask_2d.sub_shape_native:
raise exc.ArrayException(
"The input array is 2D but not the same dimensions as the sub-mask "
"(e.g. the mask 2D shape multipled by its sub size.)"
)
return array_2d_native
if array_2d.shape[0] != mask_2d.sub_pixels_in_mask:
raise exc.ArrayException(
"The input 1D array does not have the same number of entries as sub-pixels in"
"the mask."
)
return array_2d_util.array_2d_native_from(
array_2d_slim=array_2d, mask_2d=mask_2d, sub_size=mask_2d.sub_size
)
class AbstractArray2D(AbstractStructure2D):
header = None
def _new_structure(
self, array: "AbstractArray2D", mask: Mask2D
) -> "AbstractArray2D":
return self.__class__(array=array, mask=mask, header=self.header)
@property
def slim(self) -> Union["AbstractArray2D", "Array2D"]:
"""
Return an `Array2D` where the data is stored its `slim` representation, which is an ndarray of shape
[total_unmasked_pixels * sub_size**2].
If it is already stored in its `slim` representation it is returned as it is. If not, it is mapped from
`native` to `slim` and returned as a new `Array2D`.
"""
if len(self.shape) == 1:
return self
sub_array_1d = array_2d_util.array_2d_slim_from(
array_2d_native=self, mask_2d=self.mask, sub_size=self.mask.sub_size
)
return self._new_structure(array=sub_array_1d, mask=self.mask)
@property
def native(self) -> Union["AbstractArray2D", "Array2D"]:
"""
Return a `Array2D` where the data is stored in its `native` representation, which is an ndarray of shape
[sub_size*total_y_pixels, sub_size*total_x_pixels].
If it is already stored in its `native` representation it is return as it is. If not, it is mapped from
`slim` to `native` and returned as a new `Array2D`.
"""
if len(self.shape) != 1:
return self
sub_array_2d = array_2d_util.array_2d_native_from(
array_2d_slim=self, mask_2d=self.mask, sub_size=self.mask.sub_size
)
return self._new_structure(array=sub_array_2d, mask=self.mask)
@property
def binned(self) -> Union["AbstractArray2D", "Array2D"]:
"""
Convenience method to access the binned-up array in its 1D representation, which is a Grid2D stored as an
ndarray of shape [total_unmasked_pixels, 2].
The binning up process converts a array from (y,x) values where each value is a coordinate on the sub-array to
(y,x) values where each coordinate is at the centre of its mask (e.g. a array with a sub_size of 1). This is
performed by taking the mean of all (y,x) values in each sub pixel.
If the array is stored in 1D it is return as is. If it is stored in 2D, it must first be mapped from 2D to 1D.
"""
array_2d_slim = self.slim
binned_array_1d = np.multiply(
self.mask.sub_fraction,
array_2d_slim.reshape(-1, self.mask.sub_length).sum(axis=1),
)
return self._new_structure(array=binned_array_1d, mask=self.mask.mask_sub_1)
@property
def extent(self) -> np.ndarray:
return self.mask.extent
@property
def in_counts(self) -> "AbstractArray2D":
return self.header.array_eps_to_counts(array_eps=self)
@property
def in_counts_per_second(self) -> "AbstractArray2D":
return self.header.array_counts_to_counts_per_second(
array_counts=self.in_counts
)
@property
def original_orientation(self) -> Union[np.ndarray, "AbstractArray2D"]:
return layout_util.rotate_array_via_roe_corner_from(
array=self, roe_corner=self.header.original_roe_corner
)
@property
def readout_offsets(self) -> Tuple[int, int]:
if self.header is not None:
if self.header.readout_offsets is not None:
return self.header.readout_offsets
return (0, 0)
@property
def binned_across_rows(self) -> Array1D:
binned_array = np.mean(np.ma.masked_array(self.native, self.mask), axis=0)
return Array1D.manual_native(array=binned_array, pixel_scales=self.pixel_scale)
@property
def binned_across_columns(self) -> Array1D:
binned_array = np.mean(np.ma.masked_array(self.native, self.mask), axis=1)
return Array1D.manual_native(array=binned_array, pixel_scales=self.pixel_scale)
def zoomed_around_mask(self, buffer: int = 1) -> "AbstractArray2D":
"""
Extract the 2D region of an array corresponding to the rectangle encompassing all unmasked values.
This is used to extract and visualize only the region of an image that is used in an analysis.
Parameters
----------
buffer
The number pixels around the extracted array used as a buffer.
"""
extracted_array_2d = array_2d_util.extracted_array_2d_from(
array_2d=self.native,
y0=self.mask.zoom_region[0] - buffer,
y1=self.mask.zoom_region[1] + buffer,
x0=self.mask.zoom_region[2] - buffer,
x1=self.mask.zoom_region[3] + buffer,
)
mask = Mask2D.unmasked(
shape_native=extracted_array_2d.shape,
pixel_scales=self.pixel_scales,
sub_size=self.sub_size,
origin=self.mask.mask_centre,
)
array = convert_array_2d(array_2d=extracted_array_2d, mask_2d=mask)
return self._new_structure(array=array, mask=mask)
def extent_of_zoomed_array(self, buffer: int = 1) -> np.ndarray:
"""
For an extracted zoomed array computed from the method *zoomed_around_mask* compute its extent in scaled
coordinates.
The extent of the grid in scaled units returned as an ndarray of the form [x_min, x_max, y_min, y_max].
This is used visualize zoomed and extracted arrays via the imshow() method.
Parameters
----------
buffer
The number pixels around the extracted array used as a buffer.
"""
extracted_array_2d = array_2d_util.extracted_array_2d_from(
array_2d=self.native,
y0=self.mask.zoom_region[0] - buffer,
y1=self.mask.zoom_region[1] + buffer,
x0=self.mask.zoom_region[2] - buffer,
x1=self.mask.zoom_region[3] + buffer,
)
mask = Mask2D.unmasked(
shape_native=extracted_array_2d.shape,
pixel_scales=self.pixel_scales,
sub_size=self.sub_size,
origin=self.mask.mask_centre,
)
return mask.extent
def resized_from(
self, new_shape: Tuple[int, int], mask_pad_value: int = 0.0
) -> "AbstractArray2D":
"""
Resize the array around its centre to a new input shape.
If a new_shape dimension is smaller than the current dimension, the data at the edges is trimmed and removed.
If it is larger, the data is padded with zeros.
If the array has even sized dimensions, the central pixel around which data is trimmed / padded is chosen as
the top-left pixel of the central quadrant of pixels.
Parameters
-----------
new_shape
The new 2D shape of the array.
"""
resized_array_2d = array_2d_util.resized_array_2d_from(
array_2d=self.native, resized_shape=new_shape
)
resized_mask = self.mask.resized_mask_from(
new_shape=new_shape, pad_value=mask_pad_value
)
array = convert_array_2d(array_2d=resized_array_2d, mask_2d=resized_mask)
return self._new_structure(array=array, mask=resized_mask)
def padded_before_convolution_from(
self, kernel_shape: Tuple[int, int], mask_pad_value: int = 0.0
) -> "AbstractArray2D":
"""
When the edge pixels of a mask are unmasked and a convolution is to occur, the signal of edge pixels will be
'missing' if the grid is used to evaluate the signal via an analytic function.
To ensure this signal is included the array can be padded, where it is 'buffed' such that it includes all
pixels whose signal will be convolved into the unmasked pixels given the 2D kernel shape. The values of
these pixels are zeros.
Parameters
----------
kernel_shape
The 2D shape of the kernel | |
"""
Copyright (C) <2010> Aut<NAME>. TSRI
This file git_upy/houdini/houdiniHelper.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 23:03:07 2010
@author: <NAME>
@copyright: <NAME>in TSRI 2010
Library of houdini helper function to permit the communication and synchronisation
between houdini and a pmv cession.
"""
#base helper class
from pyubic import hostHelper
#host software import
import hou
import toolutils
#Houdini : name problem as maya does..not : or " " etc...
#GLOBAL VARIABLE
VERBOSE=0
DEBUG=1
#houdini is special as they froze geometry, and made it only readable.
#turn around is to trigger the sop cooking. The helper function will toggle
#hou.pyubic[objname][parmas] and
#feed it by data. for instance createMesh and updateMesh...
#need to have the python sope in the custom library
class houMesh:
def __init__(self,name,vertices=[],faces=[],colors=[],sopnode=None,
for_instance=False):
self.name = name
self.verts=vertices
self.faces=faces
self.colors=colors
self.sop = sopnode
self.for_instance = for_instance
class houdiniHelper(hostHelper.Helper):
"""
The DejaVu helper abstract class
============================
This is the DejaVu helper Object. The helper
give access to the basic function need for create and edit a host 3d object and scene.
"""
#this id can probably found in c4d.symbols
#TAG ID
SPLINE = "kNurbsCurve"
INSTANCE = "kTransform"
EMPTY = "kTransform"
#msutil = om.MScriptUtil()
pb = False
VERBOSE=0
DEBUG=0
viewer = None
host = "houdini"
def __init__(self,master=None):
hostHelper.Helper.__init__(self)
#we can define here some function alias
self.updateAppli = self.update
self.Cube = self.box
self.setInstance = self.newInstance
self.getCurrentScene = toolutils.sceneViewer
self.mesh={}#all geom in hou from helper
self.reParent = self.reparent
def addObjectToScene(self,sc,obj,parent=None):
if parent is not None:
self.reParent(obj,parent)
def loadIntoAsset(self,otl_file_path,node_type_name, source):
# Find the asset definition in the otl file.
definitions = [definition
for definition in hou.hda.definitionsInFile(otl_file_path)
if definition.nodeTypeName() == node_type_name]
assert(len(definitions) == 1)
definition = definitions[0]
# Store the source code into the PythonCook section of the asset.
definition.addSection("PythonCook", source)
def loadPythonSourceIntoAsset(self,otl_file_path, node_type_name, source_file_path):
#usually CustomOtl ,"geom or obj",pyubic/houdini/sop.py
# Load the Python source code.,
source_file = open(source_file_path, "rb")
source = source_file.read()
source_file.close()
self.loadIntoAsset(otl_file_path, node_type_name, source)
def update(self):
"""
Update the host viewport, ui or gl draw
This function can't be call in a thread.
"""
hou.ui.triggerUpdate()
def checkName(self,name):
"""
Check the name of the molecule/filename to avoid invalid caracter for the
host. ie maya didnt support object name starting with number. If a invalid
caracter is found, the caracter is removed.
@type name: string
@param name: name of the molecule.
@rtype: string
@return: corrected name of the molecule.
"""
# invalid=[]
# for i in range(9):
# invalid.append(str(i))
name = name.replace(":",".")
name = name.replace(" ","")
# nonecar=[" ",":"]
# for n in nonecar:
# if name.find(n) != -1 :
# name.replace(n,".")
# if name[0] in invalid:
# name= name[1:]
return name
def getName(self,o):
"""
Return the name of an host object
@type o: hostObject
@param o: an host object
@rtype: string
@return: the name of the host object
"""
if type(o) is str : return o
return o.name()
def getObjectName(self,o):
"""
Return the name of an host object
@type o: hostObject
@param o: an host object
@rtype: string
@return: the name of the host object
"""
if type(o) is str : return o
return o.name()
def getType(self,object):
return object.type().name()
def getMesh(self,m):
if type(m) is str:
m = self.getObject(m)
if m is not None :
if self.getType(m) == "null" :
return m
else :
return m
else :
return None
def getObject(self,name):
"""
retrieve an object from his name.
@type name: string
@param name: request name of an host object
@rtype: hostObject
@return: the object with the requested name or None
"""
if type(name) is str :
return hou.node('/obj/'+self.checkName(name))
else :
return name
def getTranslation(self,name):
return self.getObject(name).parmTuple('t').eval()
def setTranslation(self,name,pos=[0.,0.,0.]):
self.getObject(name).parmTuple('t').set(pos)
def translateObj(self,obj,position,use_parent=True):
pass
# if len(position) == 1 : c = position[0]
# else : c = position
# #print "upadteObj"
# newPos=self.FromVec(c)
# if use_parent :
# parentPos = self.GetAbsPosUntilRoot(obj)#parent.GetAbsPos()
# newPos = newPos - parentPos
# obj.SetAbsPos(newPos)
# else :
# pmx = obj.GetMg()
# mx = c4d.Matrix()
# mx.off = pmx.off + self.FromVec(position)
# obj.SetMg(mx)
def scaleObj(self,obj,sc):
if type(obj) is str :
obj = self.getObject(obj)
if type(sc) is float :
sc = [sc,sc,sc]
if type(sc) is int:
sc = [sc,sc,sc]
obj.parmTuple('s').set(sc)
def rotateObj(self,obj,rot):
#take radians, give degrees
if type(obj) is str :
obj = self.getObject(obj)
obj.parmTuple('r').set(rot) #Rx Ry Rz
def newEmpty(self,name,location=None,parentCenter=None,**kw):
"""
Create a new Null Object
@type name: string
@param name: name of the empty
@type location: list
@param location: position of the null object
@type parentCenter: list
@param parentCenter: position of the parent object
@type kw: dictionary
@param kw: you can add your own keyword
@rtype: hostObject
@return: the null object
"""
#null or subnet?
typ='null' #null subnet
empty=hou.node('/obj').createNode(typ,self.checkName(name), run_init_scripts=False)
#empty.setName(checkName(name))
# delete default file node
# empty.node('file1').destroy()
if location != None :
if parentCenter != None :
location = location - parentCenter
empty.parmTuple("t").set(location)
#set the position of the object to location
return empty
def newInstance(self,name,object,location=None,hostmatrice=None,matrice=None):
"""
Create a new Instance from another Object
@type name: string
@param name: name of the instance
@type object: hostObject
@param object: the object to herit from
@type location: list/Vector
@param location: position of the null object
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
@rtype: hostObject
@return: the instance object
"""
#actualy create a geom and then change to isntance, so we can have the MAterial tab
instance = hou.node('/obj').createNode('geo',self.checkName(name), run_init_scripts=True)
#need to delete the file
#instance.children()[0].destroy()
instance = instance.changeNodeType("instance")
instance.setParms({"instancepath":object.path()})
#instance parent = object
#instance name = name
if location != None :
#set the position of instance with location
instance.parmTuple("t").set(location)
#set the instance matrice
self.setObjectMatrix(instance,matrice=matrice,hostmatrice=hostmatrice)
return instance
#alias
def CopyAlonPoints(self,name,object,points=None,colors=None,location=None,
hostmatrice=None,matrice=None):
"""
Create a new Instances from another Object along points.
Main problem, appear only at rendering time.
@type name: string
@param name: name of the instance
@type object: hostObject
@param object: the object to herit from
@type location: list/Vector
@param location: position of the null object
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
@rtype: hostObject
@return: the instance object
"""
if points is None :
inst = self.newInstance(name,object,location=location,
hostmatrice=hostmatrice,matrice=matrice)
return inst
name = self.checkName(name)
instance = hou.node('/obj').createNode('instance',name, run_init_scripts=True)
instance.setParms({"instancepath":'/obj/'+object.name()})
instance.parmTuple("ptinstance").set(1)
addPtNode = instance.allSubChildren()[0]
mesh = instance.createNode('mesh',"mesh_"+name)
houmesh = houMesh("mesh_"+name,vertices=vertices,faces=faces,
colors=color,sopnode = mesh,for_instance=True)
self.mesh["mesh_"+name] = houmesh
#need to connect to addPoint node and mesh
addPtNode.insertInput(0,mesh)
#should be able to overwrite material for each instance appearly
return instance
def newInstances(self,name,object,points=None,colors=None,location=None,
hostmatrice=None,matrice=None):
"""
Create a new Instances from another Object along points.
Main problem, appear only at rendering time.
@type name: string
@param name: name of the instance
@type object: hostObject
@param object: the object to herit from
@type location: list/Vector
@param location: position of the null object
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
@rtype: hostObject
@return: the instance object
"""
if points is None :
inst = self.newInstance(name,object,location=location,
hostmatrice=hostmatrice,matrice=matrice)
return inst
name = self.checkName(name)
instance = hou.node('/obj').createNode('instance',name, run_init_scripts=True)
instance.setParms({"instancepath":'/obj/'+object.name()})
instance.parmTuple("ptinstance").set(1)
addPtNode = instance.allSubChildren()[0]
mesh = instance.createNode('mesh',"mesh_"+name)
houmesh = houMesh("mesh_"+name,vertices=vertices,faces=faces,
colors=color,sopnode = mesh,for_instance=True)
self.mesh["mesh_"+name] = houmesh
#need to connect to addPoint node and | |
<reponame>pd3d/magneto
'''
* This version has been reduced to support the Finexus setup, in an attempt to replicate their reported accurate Z positioning.
*
* Position tracking of magnet based on Finexus
* https://ubicomplab.cs.washington.edu/pdfs/finexus.pdf
*
* VERSION: 0.2.2.c
* - MODIFIED: 4 sensors in operation.
*
*
* AUTHOR : <NAME>
* DATE : Oct. 18th, 2017 Year of Our Lord
*
* AUTHOR : <NAME>
* DATE : Oct. 17th, 2017 Year of Our Lord
*
'''
# Import Modules
import numpy as np # Import Numpy
import matplotlib.pyplot as plt # Plot data
import Queue as qu # Queue for multithreading sync
import argparse # Feed in arguments to the program
import os, platform # To open and write to a file
from threading import Thread # Multithreading
from time import sleep, time, clock # Sleep for stability
from scipy.optimize import root # Solve System of Eqns for (x, y, z)
from scipy.linalg import norm # Calculate vector norms (magnitude)
from usbProtocol import createUSBPort # Create USB port (serial comm. w\ Arduino)
# ************************************************************************
# =====================> CONSTRUCT ARGUMENT PARSER <=====================*
# ************************************************************************
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--debug", action='store_true',
help="invoke flag to enable debugging")
ap.add_argument("-vp", "--visualize-position", action='store_true',
help="invoke flag to visualize position")
ap.add_argument("-plt", "--plot", action='store_true',
help="invoke flag to visualize position")
args = vars( ap.parse_args() )
args["debug"] = False
args["plot"] = False
args["visualize-position"] = True
# ************************************************************************
# =====================> DEFINE NECESSARY FUNCTIONS <====================*
# ************************************************************************
# ****************************************************
# Define function to sort from lowest->highest value *
# -------------------------------------------------- *
# INPUT : - A list *
# OUTPUT: - A list containing the indices of the *
# given list's elements arranged from *
# the index of the element with the *
# smallest value to the index of the *
# element with the largest value *
# ****************************************************
def argsort(seq):
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key=seq.__getitem__)
# ****************************************************
# Sort a list's elements from smallest to largest *
# -------------------------------------------------- *
# INPUT : - List to be sorted *
# - Number of elements in said list that *
# you want to sort *
# OUTPUT: - A sorted list of size (N) *
# ****************************************************
def bubbleSort(arr, N):
data = []
for i in range(0, N):
data.append( arr[i] )
for i in range(0, len(data)):
for j in range(0, len(data)-i-1):
if (data[j] > data[j+1]):
temp = data[j]
data[j] = data[j+1]
data[j+1] = temp
else:
continue
return (data)
# ****************************************************
# Define function to pool & return data from Arduino *
# ****************************************************
def getData(ser, Q_getData):
global CALIBRATING
##Synchronization issues are evident. May need to look into parallelizations, or threading.
## samplerate = 0.015
## sleep(samplerate)
while (True):
try:
# Flush buffer
ser.reset_input_buffer()
ser.reset_output_buffer()
# Read incoming data and seperate
line = ser.readline()[:-1]
col = line.split(", ")
# Wait for the sensor to calibrate itself to ambient fields.
while( not( len(col) == 18 ) ):
line = ser.readline()[:-1]
col = line.split(", ")
if(CALIBRATING == True):
print( "Calibrating...\n" )
CALIBRATING = False
# Construct magnetic field array
else:
# Sensor 1
Bx = float(col[0])
By = float(col[1])
Bz = float(col[2])
B1 = np.array( ([Bx],[By],[Bz]), dtype='float64') # Units { G }
# Sensor 2
Bx = float(col[3])
By = float(col[4])
Bz = float(col[5])
B2 = np.array( ([Bx],[By],[Bz]), dtype='float64') # Units { G }
# Sensor 3
Bx = float(col[6])
By = float(col[7])
Bz = float(col[8])
B3 = np.array( ([Bx],[By],[Bz]), dtype='float64') # Units { G }
# Sensor 4
Bx = float(col[9] )
By = float(col[10])
Bz = float(col[11])
B4 = np.array( ([Bx],[By],[Bz]), dtype='float64') # Units { G }
# Sensor 5
Bx = float(col[12])
By = float(col[13])
Bz = float(col[14])
B5 = np.array( ([Bx],[By],[Bz]), dtype='float64') # Units { G }
# Sensor 6
Bx = float(col[15])
By = float(col[16])
Bz = float(col[17])
B6 = np.array( ([Bx],[By],[Bz]), dtype='float64') # Units { G }
# Put the data in the Queue, no matter what. "Pipelining".
Q_getData.put( (B1, B2, B3, B4, B5, B6) )
except Exception as e:
print( "Caught error in getData()" )
print( "Error type %s" %str(type(e)) )
print( "Error Arguments " + str(e.args) )
# ****************************************************
# Define function to construct equations to solve for
# ****************************************************
def LHS( root, K, norms ):
global PRINT
# Extract x, y, and z
x, y, z = root
# Construct the (r) terms for each sensor
# NOTE: Relative distance terms are in meters
# : Standing on sensor(n), how many units in
# the x/y/z direction should I march to get
# back to sensor1 (origin)?
r1 = float( ( (x+0.000)**2. + (y+0.050)**2. + (z-0.100)**2. )**(1/2.) ) # Sensor 1
r2 = float( ( (x+0.000)**2. + (y-0.075)**2. + (z-0.100)**2. )**(1/2.) ) # Sensor 2
r3 = float( ( (x+0.000)**2. + (y+0.050)**2. + (z+0.100)**2. )**(1/2.) ) # Sensor 3
r4 = float( ( (x+0.000)**2. + (y-0.075)**2. + (z+0.100)**2. )**(1/2.) ) # Sensor 4
r5 = float( ( (x+0.000)**2. + (y+0.000)**2. + (z+0.000)**2. )**(1/2.) ) # Sensor 5 (Origin)
r6 = float( ( (x+0.062)**2. + (y+0.000)**2. + (z+0.000)**2. )**(1/2.) ) # Sensor 6 (Had to measure it with a caliper! The Standoff is meant to be on the ground, not on another sensor!)
# Construct the equations
Eqn1 = ( K*( r1 )**(-6.) * ( 3.*( z/r1 )**2. + 1 ) ) - norms[0]**2. # Sensor 1
Eqn2 = ( K*( r2 )**(-6.) * ( 3.*( z/r2 )**2. + 1 ) ) - norms[1]**2. # Sensor 2
Eqn3 = ( K*( r3 )**(-6.) * ( 3.*( z/r3 )**2. + 1 ) ) - norms[2]**2. # Sensor 3
Eqn4 = ( K*( r4 )**(-6.) * ( 3.*( z/r4 )**2. + 1 ) ) - norms[3]**2. # Sensor 4
Eqn5 = ( K*( r5 )**(-6.) * ( 3.*( z/r5 )**2. + 1 ) ) - norms[4]**2. # Sensor 5
Eqn6 = ( K*( r6 )**(-6.) * ( 3.*( z/r6 )**2. + 1 ) ) - norms[5]**2. # Sensor 6
# Construct a vector of the equations
Eqns = [Eqn1, Eqn2, Eqn3, Eqn4, Eqn5, Eqn6]
# Determine which sensors to use based on magnetic field value (smallValue==noBueno!)
sort = argsort(norms) # Auxiliary function sorts norms from smallest to largest, by index.
sort.reverse() # Python built-in function reverses elements of list.
f=[] # Declare vector to hold relevant functions.
for i in range(0, 3): # Fill functions' array with the equations that correspond to
f.append(Eqns[sort[i]]) # the sensors with the highest norm, thus closest to magnet
# Return vector
return ( f )
# ****************************************************
# Determine initial guess based on magnitude of *
# magnetic field relative to all the sensors *
# ****************************************************
def findIG(magFields):
# Define IMU positions on the grid
# / sensor 1: (x, y, z)
# / sensor 2: (x, y, z)
# Mat= : :
# \ : :
# \ sensor 6: (x, y, z)
IMU_pos = np.array(((0.000 , -0.050 , 0.100) ,
(0.000 , 0.075 , 0.100) ,
(0.000 , -0.050 , 0.100) ,
(0.000 , 0.075 , 0.100) ,
(0.000 , 0.000 , 0.000) ,
(0.062 , 0.000 , 0.000)), dtype='float64')
# Read current magnetic field from MCU
(H1, H2, H3, H4, H5, H6) = magFields
# Compute L2 vector norms
HNorm = [ float(norm(H1)), float(norm(H2)),
float(norm(H3)), float(norm(H4)),
float(norm(H5)), float(norm(H6)) ]
# Determine which sensors to use based on magnetic field value (smallValue==noBueno!)
sort = argsort(HNorm) # Auxiliary function sorts norms from smallest to largest
sort.reverse() # Python built-in function reverses elements of list
IMUS = bubbleSort(sort, 3)
# Return the initial guess as the centroid of the detected triangle
return ( np.array(((IMU_pos[IMUS[0]][0]+IMU_pos[IMUS[1]][0]+IMU_pos[IMUS[2]][0])/3.,
(IMU_pos[IMUS[0]][1]+IMU_pos[IMUS[1]][1]+IMU_pos[IMUS[2]][1])/3.,
(IMU_pos[IMUS[0]][2]+IMU_pos[IMUS[1]][2]+IMU_pos[IMUS[2]][2])/3. ), dtype='float64') )
# ************************************************************************
# ===========================> SETUP PROGRAM <===========================
# ************************************************************************
# Useful variables
global CALIBRATING
CALIBRATING = True # Boolean to indicate that device is calibrating
READY = False # Give time for user to place magnet
##This measurement was redone to reflect the changes on the imposed C.S. for LMA
K = 4.24e-7 # Magnet's constant (K) || Units { G^2.m^6}
#K = 1.09e-6
dx = 1e-7 # Differential step size (Needed for solver)
calcPos = [] # Empty array to hold calculated positions
# Create a queue for retrieving data from the thread.
Q_getData = qu.Queue( maxsize=0 )
# Establish connection with Arduino
DEVC = "Arduino" # Device Name (not | |
<filename>utils/saveHdf5ToAedat2.py
import sys, argparse
import numpy as np
from numpy import uint32, int32, int64, int16
from tqdm import tqdm
import logging
from pathlib import Path
import easygui
import locale
import h5py
MAX_ADC = 1023
GYRO_FULL_SCALE_DEG_PER_SEC_DEFAULT=1000 # default hardware values in jAER for Davis cameras; see ImuControl.loadPreferences, line 178 in jAER
ACCEL_FULL_SCALE_M_PER_S_SQ_DEFAULT=8
locale.setlocale(locale.LC_ALL, '') # print numbers with thousands separators
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def my_logger(name):
logger = logging.getLogger(name)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
return logger
log = my_logger(__name__)
tot_len_jaer_events = 0
ldvs = 0
limu = 0
nfr = 0
class Struct:
pass
def main(argv=None):
"""
Process command line arguments
:param argv: list of files to convert, or
:return:
"""
if argv is None:
argv = sys.argv
inputfile = None
outputfile = None
filelist = None
po=None
parser = argparse.ArgumentParser(
description='Convert files from hpf5 to AEDAT-2 format. Either provide a single -i input_file -o output_file, '
'or a list of .h5 input files.')
parser.add_argument('-o', help='output .aedat2 file name')
parser.add_argument('-i', help='input .hpf5 file name')
parser.add_argument('-q', dest='quiet', action='store_true',
help='Turn off all output other than warnings and errors')
parser.add_argument('-v', dest='verbose', action='store_true', help='Turn on verbose output')
parser.add_argument('--overwrite', dest='overwrite', action='store_true', help='Overwrite existing output files')
parser.add_argument('--no_imu', dest='no_imu', action='store_true',
help='Do not process IMU samples (which are very slow to extract)')
parser.add_argument('--imu', nargs='+', type=int,
default=[GYRO_FULL_SCALE_DEG_PER_SEC_DEFAULT, ACCEL_FULL_SCALE_M_PER_S_SQ_DEFAULT],
help='Use IMU full scale values GYRO ACCEL, e.g. 1000 8 for 1000 deg/s '
'and 8 gravities to encode AEDAT-2.0 values')
parser.add_argument('--no_frame', dest='no_frame', action='store_true',
help='Do not process APS sample frames (which are very slow to extract)')
parser.add_argument('--chunk_size', type=int, default=100000000,
help='Specify how many events read per step (the hdf5 might have too many events and '
'cannot be finished reading in one time)')
args, filelist = parser.parse_known_args() # filelist is list [] of files to be converted
if args.verbose:
log.setLevel(logging.DEBUG)
elif args.quiet:
log.setLevel(logging.WARNING)
else:
log.setLevel(logging.INFO)
if args.i is not None:
inputfile = args.i
if args.o is not None:
outputfile = args.o
multiple = outputfile is None
if inputfile is not None: filelist = [inputfile]
for file in filelist:
p = Path(file)
if not p.exists():
log.error(f'{p.absolute()} does not exist or is not readable')
continue
if p.suffix == '.aedat2':
log.error(f'skipping AEDAT-2.0 {p.absolute()} as input')
continue
log.debug(f'reading input {p}')
if multiple:
p = Path(file)
po = p.with_name(p.stem + '.aedat2') # output is input with .aedat2 extension
else:
po = Path(outputfile)
if not args.overwrite and po.is_file():
overwrite = query_yes_no(f'{po.absolute()} exists, overwrite it?')
if not overwrite:
log.info(f'{po.absolute()} exists, will not overwrite')
continue
else:
try:
log.debug(f'overwriting existing {po}')
po.unlink()
except Exception as e:
log.error(f'could not delete {po} (maybe it is open in jAER?): {e}')
quit(1)
if po.is_file():
try:
with open(outputfile, 'wb') as f:
pass
except IOError as x:
log.error(f'cannot open {po.absolute()} for output; maybe it is open in jAER?')
continue
log.info(f'overwriting {po.absolute()}')
if po.suffix is None or (not po.suffix == '.aedat' and not po.suffix == '.aedat2'):
log.warning(
f'output file {po} does not have .aedat or .aedat2 extension; are you sure this is what you want?')
# Define output struct
out = Struct()
out.data = Struct()
out.data.dvs = Struct()
out.data.frame = Struct()
out.data.imu6 = Struct()
# Events
out.data.dvs.polarity = []
out.data.dvs.timeStamp = []
out.data.dvs.x = []
out.data.dvs.y = []
# Frames
out.data.frame.samples = [] # np ndarray, [y,x,frame_num], with x=y=0 the UL corner using CV/DV convention
out.data.frame.position = []
out.data.frame.sizeAll = []
out.data.frame.timeStamp = []
out.data.frame.frameStart = [] # start of readout
out.data.frame.frameEnd = [] # end of readout
out.data.frame.expStart = [] # exposure start (before readout)
out.data.frame.expEnd = []
out.data.frame.numDiffImages = 0
out.data.frame.size = []
out.data.imu6.accelX = []
out.data.imu6.accelY = []
out.data.imu6.accelZ = []
out.data.imu6.gyroX = []
out.data.imu6.gyroY = []
out.data.imu6.gyroZ = []
out.data.imu6.temperature = []
out.data.imu6.timeStamp = []
# Initialize statics variable for every new file
global tot_len_jaer_events
global ldvs
global limu
global nfr
tot_len_jaer_events = 0
ldvs = 0
limu = 0
nfr = 0
data = {'aedat': out}
# loop through the "events" stream
log.debug(f'loading events to memory')
# https://gitlab.com/inivation/dv/dv-python
events = dict()
h5f = h5py.File(str(file), 'r')
events_in_total = len(h5f['events']['t'])
file_start_timestamp = h5f['events']['t'][0]
events_num_section_step = args.chunk_size
# events_in_total = events_num_section_step * 5
for events_num_section_start in range(0, events_in_total, events_num_section_step):
events_num_section_end = events_num_section_start + events_num_section_step
for dset_str in ['p', 'x', 'y', 't']:
events[dset_str] = h5f['events/{}'.format(dset_str)][events_num_section_start:events_num_section_end]
# events = np.hstack([packet for packet in f['events'].numpy()]) # load events to np array
out.data.dvs.timeStamp = events['t'] # int64
out.data.dvs.x = events['x'] # int16
out.data.dvs.y = events['y'] # int16
out.data.dvs.polarity = events['p'] # int8
log.info(f'Read {len(out.data.dvs.timeStamp)} DVS events')
log.info(f'{events_in_total - events_num_section_start - len(out.data.dvs.timeStamp)} DVS events left')
def generator():
while True:
yield
# loop through the "frames" stream
if not args.no_frame:
log.debug(f'loading frames to memory')
with tqdm(generator(), desc='frames', unit=' fr', maxinterval=1) as pbar:
for frame in (f['frames']):
out.data.frame.samples.append(
np.array(frame.image,
dtype=np.uint8)) # frame.image is ndarray(h,w,1) with 0-255 values ?? ADC has larger range, maybe clipped
out.data.frame.position.append(frame.position)
out.data.frame.sizeAll.append(frame.size)
out.data.frame.timeStamp.append(frame.timestamp)
out.data.frame.frameStart.append(frame.timestamp_start_of_frame)
out.data.frame.frameEnd.append(frame.timestamp_end_of_frame)
out.data.frame.expStart.append(frame.timestamp_start_of_exposure)
out.data.frame.expEnd.append(frame.timestamp_end_of_exposure)
pbar.update(1)
# Permute images via numpy
tmp = np.transpose(np.squeeze(np.array(out.data.frame.samples)), (1, 2, 0)) # make the frames x,y,frames
out.data.frame.numDiffImages = tmp.shape[2]
out.data.frame.size = out.data.frame.sizeAll[0]
out.data.frame.samples = tmp # leave frames as numpy array
log.info(f'{out.data.frame.numDiffImages} frames with size {out.data.frame.size}')
# # loop through the "imu" stream
if not args.no_imu:
log.debug(f'loading IMU samples to memory')
with tqdm(generator(), desc='IMU', unit=' sample') as pbar:
for i in (f['imu']):
if not imu_scale_warning_printed and imu_gyro_scale == GYRO_FULL_SCALE_DEG_PER_SEC_DEFAULT and imu_accel_scale == ACCEL_FULL_SCALE_M_PER_S_SQ_DEFAULT:
log.warning(
f'IMU sample found: IMU samples will be converted to jAER AEDAT-2.0 assuming default full scale {GYRO_FULL_SCALE_DEG_PER_SEC_DEFAULT} deg/s rotation and {ACCEL_FULL_SCALE_M_PER_S_SQ_DEFAULT}g acceleration. Use --imu option to change output scaling.')
imu_scale_warning_printed = True
a = i.accelerometer
g = i.gyroscope
m = i.magnetometer
out.data.imu6.accelX.append(a[0])
out.data.imu6.accelY.append(a[1])
out.data.imu6.accelZ.append(a[2])
out.data.imu6.gyroX.append(g[0])
out.data.imu6.gyroY.append(g[1])
out.data.imu6.gyroZ.append(g[2])
out.data.imu6.temperature.append(i.temperature)
out.data.imu6.timeStamp.append(i.timestamp)
pbar.update(1)
log.info(f'{len(out.data.imu6.accelX)} IMU samples')
# Add counts of jAER events
width = 640
height = 480
out.data.dvs.numEvents = len(out.data.dvs.x)
out.data.imu6.numEvents = len(out.data.imu6.accelX) * 7 if not args.no_imu else 0
out.data.frame.numEvents = (2 * width * height) * (out.data.frame.numDiffImages) if not args.no_frame else 0
if(events_num_section_start == 0):
export_aedat_2(args, out, po, height=height, starttimestamp=file_start_timestamp)
else:
export_aedat_2(args, out, po, height=height, appendevents=True, starttimestamp=file_start_timestamp)
log.debug('done')
def export_aedat_2(args, out, filename, starttimestamp, height=260,
gyro_scale=GYRO_FULL_SCALE_DEG_PER_SEC_DEFAULT,
accel_scale=ACCEL_FULL_SCALE_M_PER_S_SQ_DEFAULT,
appendevents=False):
"""
This function exports data to a .aedat file.
The .aedat file format is documented here:
http://inilabs.com/support/software/fileformat/
@param out: the data structure from above
@param filename: the full path to write to, .aedat2 output file
@param height: the size of the chip, to flip y coordinate for jaer compatibility
@param gyro_scale: the full scale value of gyro in deg/s
@param accel_scale: the full scale value of acceleratometer in m/s^2
"""
global tot_len_jaer_events
global ldvs
global limu
global nfr
num_total_events = out.data.dvs.numEvents + out.data.imu6.numEvents + out.data.frame.numEvents
printed_stats_first_frame=False
file_path=Path(filename)
try:
f=open(filename, 'ab')
except IOError as x:
log.error(f'could not open {file_path.absolute()} for output (maybe opened in jAER already?): {str(x)}')
else:
with f:
if(appendevents == False):
# Simple - events only - assume DAVIS
log.debug(f'saving {file_path.absolute()}')
# CRLF \r\n is needed to not break header parsing in jAER
f.write(b'#!AER-DAT2.0\r\n')
f.write(b'# This is a raw AE data file created from hdf5 (DSEC dataset)\r\n')
f.write(b'# Data format is int32 address, int32 timestamp (8 bytes total), repeated for each event\r\n')
f.write(b'# Timestamps tick is 1 us\r\n')
# Put the source in NEEDS DOING PROPERLY
f.write(b'# AEChip: Prophese Gen 3.1 (VGA)\r\n')
f.write(b'# End of ASCII Header\r\n')
else:
log.debug(f'appending events to {file_path.absolute()}')
# DAVIS
# In the 32-bit address:
# bit 32 (1-based) being 1 indicates an APS sample
# bit 11 (1-based) being 1 indicates a special event
# bits 11 and 32 (1-based) both being zero signals a polarity event
# see https://inivation.github.io/inivation-docs/Software%20user%20guides/AEDAT_file_formats#bit-31
apsDvsImuTypeShift=31
dvsType=0
apsImuType=1
imuTypeShift = 28
imuSampleShift = 12
imuSampleSubtype = | |
# -*- coding: utf-8 -*-
from copy import deepcopy
from datetime import timedelta
from itertools import combinations
from typing import Tuple, Sequence
import numpy as np
from ..base import Property
from ..models.transition.base import TransitionModel
from ..models.transition.linear import ConstantTurn, ConstantVelocity, \
CombinedLinearGaussianTransitionModel
from ..types.array import StateVector
from ..types.state import State
def create_smooth_transition_models(initial_state, x_coords, y_coords, times, turn_rate):
r"""Generate a list of constant-turn and constant acceleration transition models alongside a
list of transition times to provide smooth transitions between 2D cartesian coordinates and
time pairs.
An assumption is that the initial_state's x, y coordinates are the first elements of x_ccords
and y_coords respectively. Ie. The platform starts at the first coordinates.
Parameters
----------
initial_state: :class:`~.State`
The initial state of the platform.
x_coords:
A list of int/float x-coordinates (cartesian) in the order that the target must follow.
y_coords:
A list of int/float y-coordinates (cartesian) in the order that the target must follow.
times:
A list of :class:`~.datetime.datetime` dictating the times at which the target must be at
each corresponding coordinate.
turn_rate: Float
Angular turn rate (radians/second) measured anti-clockwise from positive x-axis.
Returns
-------
transition_models:
A list of :class:`~.ConstantTurn` and :class:`~.Point2PointConstantAcceleration` transition
models.
transition_times:
A list of :class:`~.datetime.timedelta` dictating the transition time for each
corresponding transition model in transition_models.
Notes
-----
x_coords, y_coords and times must be of same length.
This method assumes a cartesian state space with velocities eg.
:math:`(x, \dot{x}, y, \dot{y})`. It returns transition models for 2 cartesian coordinates and
their corresponding velocities.
"""
state = deepcopy(initial_state) # don't alter platform state with calculations
if not len(x_coords) == len(y_coords) == len(times):
raise ValueError('x_coords, y_coords and times must be same length')
transition_models = []
transition_times = []
for x_coord, y_coord, time in zip(x_coords[1:], y_coords[1:], times[1:]):
dx = x_coord - state.state_vector[0] # distance to next x-coord
dy = y_coord - state.state_vector[2] # distance to next y-coord
if dx == 0 and dy == 0:
a = 0 # if initial and second target coordinates are same, set arbitrary bearing of 0
vx = state.state_vector[1] # initial x-speed
vy = state.state_vector[3] # initial y-speed
if vx != 0 or vy != 0: # if velocity is 0, keep previous bearing
a = np.arctan2(vy, vx) # initial bearing
if dx == 0 and dy == 0 and vx == 0 and vy == 0: # if at destination with 0 speed, stay
transition_times.append(time - times[times.index(time) - 1])
transition_models.append(CombinedLinearGaussianTransitionModel((ConstantVelocity(0),
ConstantVelocity(0))))
continue
d = np.sqrt(dx**2 + dy**2) # distance to next coord
v = np.sqrt(vx**2 + vy**2) # initial speed
b = np.arctan2(dy, dx) - a # bearing to next coord (anti-clockwise from positive x-axis)
w = turn_rate # turn rate (anti-clockwise from positive x-axis)
if b > np.radians(180):
b -= 2*np.pi # get bearing in (0, 180) instead
elif b <= np.radians(-180):
b += 2*np.pi # get bearing in (-180, 0] instead
if b < 0:
w = -w # if bearing is in [-180, 0), turn right instead
r = v / np.abs(w) # radius of turn
if b >= 0:
p = d * np.cos(b)
q = r - d*np.sin(b)
else:
p = -d*np.cos(b)
q = r + d*np.sin(b)
alpha = np.arctan2(p, q)
beta = np.arccos(r / np.sqrt(p**2 + q**2))
angle = (alpha + beta + np.pi) % (2*np.pi) - np.pi # actual angle turned
if w > 0:
angle = (alpha - beta + np.pi) % (2*np.pi) - np.pi # quadrant adjustment
t1 = angle / w # turn time
if t1 > 0:
# make turn model and add to list
turn_model = ConstantTurn(turn_noise_diff_coeffs=(0, 0), turn_rate=w)
state.state_vector = turn_model.function(state=state, time_interval=timedelta(
seconds=t1)) # move platform through turn
state.timestamp += timedelta(seconds=t1)
transition_times.append(timedelta(seconds=t1))
transition_models.append(turn_model)
dx = x_coord - state.state_vector[0] # get remaining distance to next x-coord
dy = y_coord - state.state_vector[2] # get remaining distance to next y-coord
d = np.sqrt(dx**2 + dy**2) # remaining distance to next coord
t2 = (time - state.timestamp).total_seconds() # time remaining before platform should
# be at next coord
if d > 0: # if platform is not already at target coord, add linear acceleration model
try:
accel_model = Point2PointConstantAcceleration(state=deepcopy(state),
destination=(x_coord, y_coord),
duration=timedelta(seconds=t2))
except OvershootError:
# if linear accel leads to overshoot, apply model to stop at target coord instead
accel_model = Point2PointStop(state=deepcopy(state),
destination=(x_coord, y_coord))
state.state_vector = accel_model.function(state=state,
time_interval=timedelta(seconds=t2))
state.timestamp += timedelta(seconds=t2)
transition_times.append(timedelta(seconds=t2))
transition_models.append(accel_model)
return transition_models, transition_times
class OvershootError(Exception):
pass
class Point2PointConstantAcceleration(TransitionModel):
r"""Constant acceleration transition model for 2D cartesian coordinates
The platform is assumed to move with constant acceleration between two given cartesian
coordinates.
Motion is determined by the kinematic formulae:
.. math::
v &= u + at \\
s &= ut + \frac{1}{2} at^2
Where :math:`u, v, a, t, s` are initial speed, final speed, acceleration, transition time and
distance travelled respectively.
"""
state: State = Property(doc="The initial state, assumed to have x and y cartesian position and"
"velocities")
destination: Tuple[float, float] = Property(doc="Destination coordinates in 2D cartesian"
"coordinates (x, y)")
duration: timedelta = Property(doc="Duration of transition in seconds")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dx = self.destination[0] - self.state.state_vector[0] # x-distance to destination
dy = self.destination[1] - self.state.state_vector[2] # y-distance to destination
ux = self.state.state_vector[1] # initial x-speed
uy = self.state.state_vector[3] # initial y-speed
t = self.duration.total_seconds() # duration of acceleration
self.ax = 2*(dx - ux*t) / t**2 # x-acceleration
self.ay = 2*(dy - uy*t) / t**2 # y-acceleration
vx = ux + self.ax*t # final x-speed
vy = uy + self.ay*t # final y-speed
if np.sign(ux) != np.sign(vx) or np.sign(uy) != np.sign(vy):
raise OvershootError()
@property
def ndim_state(self):
return 4
def covar(self, **kwargs):
raise NotImplementedError('Covariance not defined')
def pdf(self, state1, state2, **kwargs):
raise NotImplementedError('pdf not defined')
def rvs(self, num_samples=1, **kwargs):
raise NotImplementedError('rvs not defined')
def function(self, state, time_interval, **kwargs):
x = state.state_vector[0]
y = state.state_vector[2]
t = time_interval.total_seconds()
ux = state.state_vector[1] # initial x-speed
uy = state.state_vector[3] # initial y-speed
dx = ux*t + 0.5*self.ax*(t**2) # x-distance travelled
dy = uy*t + 0.5*self.ay*(t**2) # y-distance travelled
vx = ux + self.ax*t # resultant x-speed
vy = uy + self.ay*t # resultant y-speed
return StateVector([x+dx, vx, y+dy, vy])
class Point2PointStop(TransitionModel):
r"""Constant acceleration transition model for 2D cartesian coordinates
The platform is assumed to move with constant acceleration between two given cartesian
coordinates.
Motion is determined by the kinematic formulae:
.. math::
v &= u + at \\
v^2 &= u^2 + 2as
Where :math:`u, v, a, t, s` are initial speed, final speed, acceleration, transition time and
distance travelled respectively.
The platform is decelerated to 0 velocity at the destination point and waits for the remaining
duration.
"""
state: State = Property(doc="The initial state, assumed to have x and y cartesian position and"
"velocities")
destination: Tuple[float, float] = Property(doc="Destination coordinates in 2D cartesian"
"coordinates (x, y)")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dx = self.destination[0] - self.state.state_vector[0] # x-distance to destination
dy = self.destination[1] - self.state.state_vector[2] # y-distance to destination
ux = self.state.state_vector[1] # initial x-speed
uy = self.state.state_vector[3] # initial y-speed
if dx == 0:
self.ax = 0 # x-acceleration (0 if already at destination x-coord)
else:
self.ax = -(ux**2) / (2*dx)
if dy == 0:
self.ay = 0 # y-acceleration (0 if already at destination y-coord)
else:
self.ay = -(uy**2) / (2*dy)
if self.ax != 0:
self.t = -ux / self.ax # deceleration time
elif self.ay != 0:
self.t = -uy / self.ay # deceleration time (if already at x-coord)
else:
self.t = 0 # at destination so acceleration time is 0
self.start_time = self.state.timestamp
@property
def ndim_state(self):
return 4
def covar(self, **kwargs):
raise NotImplementedError('Covariance not defined')
def pdf(self, state1, state2, **kwargs):
raise NotImplementedError('pdf not defined')
def rvs(self, num_samples=1, **kwargs):
raise NotImplementedError('rvs not defined')
def function(self, state, time_interval, **kwargs):
t = time_interval.total_seconds()
decel_time_remaining = self.t - (state.timestamp - self.start_time).total_seconds()
x = state.state_vector[0]
y = state.state_vector[2]
ux = state.state_vector[1] # initial x-speed
uy = state.state_vector[3] # initial y-speed
if t < decel_time_remaining: # still some deceleration needed
dx = ux*t + (0.5*self.ax)*t**2
| |
<filename>ot/gromov.py
# -*- coding: utf-8 -*-
"""
Gromov-Wasserstein transport method
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
from .bregman import sinkhorn
from .utils import dist, UndefinedParameter
from .optim import cg
def init_matrix(C1, C2, p, q, loss_fun='square_loss'):
"""Return loss matrices and tensors for Gromov-Wasserstein fast computation
Returns the value of \mathcal{L}(C1,C2) \otimes T with the selected loss
function as the loss function of Gromow-Wasserstein discrepancy.
The matrices are computed as described in Proposition 1 in [12]
Where :
* C1 : Metric cost matrix in the source space
* C2 : Metric cost matrix in the target space
* T : A coupling between those two spaces
The square-loss function L(a,b)=|a-b|^2 is read as :
L(a,b) = f1(a)+f2(b)-h1(a)*h2(b) with :
* f1(a)=(a^2)
* f2(b)=(b^2)
* h1(a)=a
* h2(b)=2*b
The kl-loss function L(a,b)=a*log(a/b)-a+b is read as :
L(a,b) = f1(a)+f2(b)-h1(a)*h2(b) with :
* f1(a)=a*log(a)-a
* f2(b)=b
* h1(a)=a
* h2(b)=log(b)
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric costfr matrix in the target space
T : ndarray, shape (ns, nt)
Coupling between source and target spaces
p : ndarray, shape (ns,)
Returns
-------
constC : ndarray, shape (ns, nt)
Constant C matrix in Eq. (6)
hC1 : ndarray, shape (ns, ns)
h1(C1) matrix in Eq. (6)
hC2 : ndarray, shape (nt, nt)
h2(C) matrix in Eq. (6)
References
----------
.. [12] Peyré, Gabriel, <NAME>, and <NAME>,
"Gromov-Wasserstein averaging of kernel and distance matrices."
International Conference on Machine Learning (ICML). 2016.
"""
if loss_fun == 'square_loss':
def f1(a):
return (a**2)
def f2(b):
return (b**2)
def h1(a):
return a
def h2(b):
return 2 * b
elif loss_fun == 'kl_loss':
def f1(a):
return a * np.log(a + 1e-15) - a
def f2(b):
return b
def h1(a):
return a
def h2(b):
return np.log(b + 1e-15)
constC1 = np.dot(np.dot(f1(C1), p.reshape(-1, 1)),
np.ones(len(q)).reshape(1, -1))
constC2 = np.dot(np.ones(len(p)).reshape(-1, 1),
np.dot(q.reshape(1, -1), f2(C2).T))
constC = constC1 + constC2
hC1 = h1(C1)
hC2 = h2(C2)
return constC, hC1, hC2
def tensor_product(constC, hC1, hC2, T):
"""Return the tensor for Gromov-Wasserstein fast computation
The tensor is computed as described in Proposition 1 Eq. (6) in [12].
Parameters
----------
constC : ndarray, shape (ns, nt)
Constant C matrix in Eq. (6)
hC1 : ndarray, shape (ns, ns)
h1(C1) matrix in Eq. (6)
hC2 : ndarray, shape (nt, nt)
h2(C) matrix in Eq. (6)
Returns
-------
tens : ndarray, shape (ns, nt)
\mathcal{L}(C1,C2) \otimes T tensor-matrix multiplication result
References
----------
.. [12] Peyré, Gabriel, <NAME>, and <NAME>,
"Gromov-Wasserstein averaging of kernel and distance matrices."
International Conference on Machine Learning (ICML). 2016.
"""
A = -np.dot(hC1, T).dot(hC2.T)
tens = constC + A
# tens -= tens.min()
return tens
def gwloss(constC, hC1, hC2, T):
"""Return the Loss for Gromov-Wasserstein
The loss is computed as described in Proposition 1 Eq. (6) in [12].
Parameters
----------
constC : ndarray, shape (ns, nt)
Constant C matrix in Eq. (6)
hC1 : ndarray, shape (ns, ns)
h1(C1) matrix in Eq. (6)
hC2 : ndarray, shape (nt, nt)
h2(C) matrix in Eq. (6)
T : ndarray, shape (ns, nt)
Current value of transport matrix T
Returns
-------
loss : float
Gromov Wasserstein loss
References
----------
.. [12] Peyré, Gabriel, <NAME>, and <NAME>,
"Gromov-Wasserstein averaging of kernel and distance matrices."
International Conference on Machine Learning (ICML). 2016.
"""
tens = tensor_product(constC, hC1, hC2, T)
return np.sum(tens * T)
def gwggrad(constC, hC1, hC2, T):
"""Return the gradient for Gromov-Wasserstein
The gradient is computed as described in Proposition 2 in [12].
Parameters
----------
constC : ndarray, shape (ns, nt)
Constant C matrix in Eq. (6)
hC1 : ndarray, shape (ns, ns)
h1(C1) matrix in Eq. (6)
hC2 : ndarray, shape (nt, nt)
h2(C) matrix in Eq. (6)
T : ndarray, shape (ns, nt)
Current value of transport matrix T
Returns
-------
grad : ndarray, shape (ns, nt)
Gromov Wasserstein gradient
References
----------
.. [12] Peyré, Gabriel, <NAME>, and <NAME>,
"Gromov-Wasserstein averaging of kernel and distance matrices."
International Conference on Machine Learning (ICML). 2016.
"""
return 2 * tensor_product(constC, hC1, hC2,
T) # [12] Prop. 2 misses a 2 factor
def update_square_loss(p, lambdas, T, Cs):
"""
Updates C according to the L2 Loss kernel with the S Ts couplings
calculated at each iteration
Parameters
----------
p : ndarray, shape (N,)
Masses in the targeted barycenter.
lambdas : list of float
List of the S spaces' weights.
T : list of S np.ndarray of shape (ns,N)
The S Ts couplings calculated at each iteration.
Cs : list of S ndarray, shape(ns,ns)
Metric cost matrices.
Returns
----------
C : ndarray, shape (nt, nt)
Updated C matrix.
"""
tmpsum = sum([lambdas[s] * np.dot(T[s].T, Cs[s]).dot(T[s])
for s in range(len(T))])
ppt = np.outer(p, p)
return np.divide(tmpsum, ppt)
def update_kl_loss(p, lambdas, T, Cs):
"""
Updates C according to the KL Loss kernel with the S Ts couplings calculated at each iteration
Parameters
----------
p : ndarray, shape (N,)
Weights in the targeted barycenter.
lambdas : list of the S spaces' weights
T : list of S np.ndarray of shape (ns,N)
The S Ts couplings calculated at each iteration.
Cs : list of S ndarray, shape(ns,ns)
Metric cost matrices.
Returns
----------
C : ndarray, shape (ns,ns)
updated C matrix
"""
tmpsum = sum([lambdas[s] * np.dot(T[s].T, Cs[s]).dot(T[s])
for s in range(len(T))])
ppt = np.outer(p, p)
return np.exp(np.divide(tmpsum, ppt))
def gromov_wasserstein(C1, C2, p, q, loss_fun, log=False, armijo=False, **kwargs):
"""
Returns the gromov-wasserstein transport between (C1,p) and (C2,q)
The function solves the following optimization problem:
.. math::
GW = \min_T \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}
Where :
- C1 : Metric cost matrix in the source space
- C2 : Metric cost matrix in the target space
- p : distribution in the source space
- q : distribution in the target space
- L : loss function to account for the misfit between the similarity matrices
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric costfr matrix in the target space
p : ndarray, shape (ns,)
Distribution in the source space
q : ndarray, shape (nt,)
Distribution in the target space
loss_fun : str
loss function used for the solver either 'square_loss' or 'kl_loss'
max_iter : int, optional
Max number of iterations
tol : float, optional
Stop threshold on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
armijo : bool, optional
If True the steps of the line-search is found via an armijo research. Else closed form is used.
If there is convergence issues use False.
**kwargs : dict
parameters can be directly passed to the ot.optim.cg solver
Returns
-------
T : ndarray, shape (ns, nt)
Doupling between the two spaces that minimizes:
\sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}
log : dict
Convergence information and loss.
References
----------
.. [12] <NAME>, <NAME>, and <NAME>,
"Gromov-Wasserstein averaging of kernel and distance matrices."
International Conference on Machine Learning (ICML). 2016.
.. [13] <NAME>. Gromov–Wasserstein distances and the
metric approach to object matching. Foundations of computational
mathematics 11.4 (2011): 417-487.
"""
constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun)
G0 = p[:, None] * q[None, :]
def f(G):
return gwloss(constC, hC1, hC2, G)
def df(G):
return gwggrad(constC, hC1, hC2, G)
if log:
res, log = cg(p, q, 0, 1, f, df, G0, log=True, armijo=armijo, C1=C1, C2=C2, constC=constC, **kwargs)
log['gw_dist'] = gwloss(constC, hC1, hC2, res)
return res, log
else:
return cg(p, q, 0, 1, f, df, G0, armijo=armijo, C1=C1, C2=C2, constC=constC, **kwargs)
def gromov_wasserstein2(C1, C2, p, q, loss_fun, log=False, armijo=False, **kwargs):
"""
Returns the gromov-wasserstein discrepancy between (C1,p) and (C2,q)
The function solves the following optimization problem:
.. math::
GW = \min_T \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}
Where :
- C1 : Metric cost matrix in the source space
- C2 : Metric cost matrix in the | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved
# ============================================================================
""" Abstract Writer """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC, abstractmethod
from typing import Tuple, Dict, List
import numpy
from xai.data import explorer
from xai.formatter.report.section import OverviewSection, DetailSection
################################################################################
### Writer Visitor
################################################################################
class Writer(ABC):
"""
The Strategy interface declares operations common to all
supported report output.
"""
def __init__(self, *values) -> None:
"""
Abstract Writer
"""
self._values = values
@property
def values(self):
"""Returns keyword-ed variable."""
return self._values
def __str__(self):
return 'Writer:(' + str(self.values) + ')'
@abstractmethod
def out(self):
"""
Output Report
"""
pass
@abstractmethod
def build(self, title: str, overview: OverviewSection,
detail: DetailSection, *, content_table=False):
"""
Build Report
Args:
title(str): header title
overview(OverviewSection): Cover Section of report
detail(DetailSection): Details Section of report
content_table (bool): is content table enabled
default False
"""
pass
################################################################################
### Base Section
################################################################################
@abstractmethod
def add_new_page(self):
"""
Add new page
"""
pass
@abstractmethod
def draw_header(self, text: str, level: int, *, link=None):
"""
Draw Header
Args:
text(str): header text in the report
level(int): header level
link: header link
"""
pass
@abstractmethod
def draw_title(self, text: str, level: int, *, link=None):
"""
Draw Title
Args:
text(str): title in the report
level(int): title type (section or paragraph)
link: title link
"""
pass
@abstractmethod
def draw_paragraph(self, text: str):
"""
Draw Paragraph
Args:
text(str): html text to render in the report
"""
pass
################################################################################
### Basic/Reusable Section
################################################################################
@abstractmethod
def draw_basic_key_value_pairs(self, notes: str, *,
info: list):
"""
Draw key-value pairs information to the report
Args:
notes(str): Explain the block
info (list): list of tuple / list of (list of tuple))
multi-level rendering, e.g. to display `model_info`
"""
pass
@abstractmethod
def draw_basic_table(self, notes: str, *,
table_header: list, table_data: list,
col_width: list):
"""
Draw table to the report
Args:
notes(str): Explain the block
table_header (list): list of str
table_data (list): list of str
col_width: list of float,
default: None (evenly divided for the whole page width)
"""
pass
@abstractmethod
def draw_basic_images_with_grid_spec(self, notes: str, *,
image_list, grid_spec):
"""
Draw image blocks with formatted grid specification
Args
notes(str): Explain the block
image_list (list): the list of image_paths
grid_spec (dict): indicate image size and position
- key: image_name, or index if image_set is a list
- value: (x,y,w,h) position and weight/height of image,
with left top corner of the block as (0,0), unit in mm
"""
pass
################################################################################
### Summary Section
################################################################################
@abstractmethod
def draw_training_time(self, notes: str, *, timing: List[Tuple[str, int]]):
"""
Draw information of timing to the report
Args:
notes(str): Explain the block
timing (:obj:`list` of :obj:`tuple`): list of tuple
(name, time in second)
"""
pass
@abstractmethod
def draw_data_set_summary(self, notes: str, *,
data_summary: List[Tuple[str, int]]):
"""
Draw information of dataset summary to the report
Args:
notes(str): Explain the block
data_summary (:obj:`list` of :obj:`tuple`): list of tuple
(dataset_name, dataset_sample_number)
"""
pass
@abstractmethod
def draw_evaluation_result_summary(self, notes: str, *,
evaluation_result: dict):
"""
Draw information of training performance to the result
Args:
evaluation_result (dict): evaluation metric
- key: metric_name
- value: metric_value: single float value for average/overall metric,
list for class metrics
sample input 1: {'precision': 0.5}, report value directly
sample input 2: {'precision': {'class':[0.5,0.4,0.3],'average':0.5}},
report "average" value
sample input 3: {'precision': {'class':[0.5,0.4,0.3]},
report unweighted average for "class" value
notes (str, Optional): explain the block
"""
pass
@abstractmethod
def draw_model_info_summary(self, notes: str, *, model_info: list):
"""
Draw information of model info to the result
Args:
model_info (:obj:`list` of :obj:
`tuple`, Optional): list of tuple (model info attribute, model info value).
Default information include `use case name`, `version`, `use case team`.
notes (str, Optional): explain the block
"""
pass
################################################################################
### Data Section
################################################################################
@abstractmethod
def draw_data_missing_value(self, notes: str, *, missing_count: dict,
total_count: dict, ratio=False):
"""
Draw Missing Data Value Summary Table
Args:
notes(str): Explain the block
missing_count(dict): Missing Count
total_count(list): Total Count
ratio(bool): True if `missing_value` is the percentage
"""
pass
@abstractmethod
def draw_data_set_distribution(self, notes: str, *,
data_set_distribution: Tuple[str, explorer.CategoricalStats],
max_class_shown=20):
"""
Draw information of distribution on data set
Args:
notes(str): Explain the block
data_set_distribution (tuple: (str,dict)):
- tuple[0] str: label/split name
- tuple[1] CategoricalStats object: `frequency_count` attribute
key - class_name/split_name,
value - class_count/split_count
max_class_shown (int, Optional): maximum number of classes shown
in the figure, default is 20
notes (str, Optional):
explain the block
"""
pass
@abstractmethod
def draw_data_attributes(self, notes: str, *, data_attribute: Dict):
"""
Draw information of data attribute for data fields to the report
Args:
notes(str): Explain the block
data_attribute (:dict of :dict):
-key: data field name
-value: attributes (dict)
- key: attribute name
- value: attribute value
"""
pass
@abstractmethod
def draw_categorical_field_distribution(self, notes: str, *,
field_name: str,
field_distribution: Dict[str,
explorer.CategoricalStats],
max_values_display=20,
colors=None):
"""
Draw information of field value distribution for categorical type to
the report.
Args:
notes(str): Explain the block
field_name (str): data field name
field_distribution (:dict of :CategoricalStats):
-key: label_name
-value: CategoricalStats object
max_values_display (int): maximum number of values displayed
colors (list): the list of color code for rendering different class
"""
pass
@abstractmethod
def draw_numeric_field_distribution(self, notes: str, *,
field_name: str,
field_distribution: Dict[str,
explorer.NumericalStats],
force_no_log=False,
x_limit=False,
colors=None):
"""
Draw information of field value distribution for numerical type to
the report.
Args:
notes(str): Explain the block
field_name (str): data field name
field_distribution (:dict of :NumericalStats):
-key: label_name
-value: NumericalStats object
force_no_log (bool): whether to change y-scale to logrithmic
scale for a more balanced view
x_limit (list:): whether x-axis only display the required percentile range.
If True, field_distribution should have a
key "x_limit" and value of [x_min, x_max].
colors (list): the list of color code for rendering different class
"""
pass
@abstractmethod
def draw_text_field_distribution(self, notes: str, *,
field_name: str,
field_distribution: Dict[str,explorer.TextStats]):
"""
Draw information of field value distribution for text type to the
report.
Args:
notes(str): Explain the block
field_name (str): data field name
field_distribution (:dict of :TextStats):
-key: label_name
-value: TextStats object
"""
pass
@abstractmethod
def draw_datetime_field_distribution(self, notes: str, *,
field_name: str,
field_distribution: Dict[str,
explorer.DatetimeStats]):
"""
Draw information of field value distribution for datetime type to the
report.
Args:
notes(str): Explain the block
field_name (str): data field name
field_distribution (:dict of :dict):
-key: label_name
-value (:dict of :DatetimeStats):
Note that in order to render it in 2D diagram, the resolution has to be ['YEAR','MONTH'].
- 1st level key: year_X(int)
- 1st level value:
- 2nd level key: month_X(int)
- 2nd level value: count of sample in month_X of year_X
"""
pass
################################################################################
### Feature Section
################################################################################
@abstractmethod
def draw_feature_importance(self, notes: str, *,
importance_ranking: List[List],
importance_threshold: float,
maximum_number_feature=20):
"""
Add information of feature importance to the report.
Args:
notes(str): Explain the block
importance_ranking(:list of :list): a list of 2-item lists,
item[0]: score, item[1] feature_name
importance_threshold(float): threshold for displaying the feature
name and score in tables
maximum_number_feature(int): maximum number of features shown in bar-chart diagram
"""
pass
################################################################################
### Feature Shap Values
################################################################################
@abstractmethod
def draw_feature_shap_values(self, notes: str, *, mode: str,
feature_shap_values: List[Tuple[str, List]],
class_id: int,
train_data: numpy.ndarray = None):
"""
Add information of feature shap values to the report.
Args:
notes(str): Explain the block
mode (str): Model Model - classification/regression model
feature_shap_values(:list of :tuple): a list of 2-item tuple,
item[0]: feature name, item[1] shap values on each training samples
class_id(int): the class id for visualization.
train_data(numpy.dnarray): Optional, training data, row is for samples, column is for features.
"""
pass
################################################################################
### Training Section
################################################################################
@abstractmethod
def draw_hyperparameter_tuning(self, notes: str, *,
history: dict, best_idx: str,
search_space=None, benchmark_metric=None,
benchmark_threshold=None,
non_hyperopt_score=None):
"""
Add information of hyperparameter tuning to the report.
Args:
notes(str): Explain the block
history(:dict of dict): a dict of training log dict.
key: iteration index
value: hyperparameter tuning information
Each dict has two keys:
- params: a dict of which key is the parameter name
and value is parameter value
- val_scores: a dict of which key is the metric name
and value is metric value
best_idx(str):
- the best idx based on benchmark metric, corresponding the `history` dict key
search_space(:dict): parameter name and | |
token should be
considered as an @ref entity_reference, `False` if the pattern is
not recognised.
@note This call should not verify an entity exits, just that
the format of the string is recognised.
@see @ref entityExists
@see @ref resolveEntityReference
"""
raise NotImplementedError
@abc.abstractmethod
def entityExists(self, entityRefs, context, hostSession):
"""
Called to determine if each @ref entity_reference supplied
points to an entity that exists in the @ref
asset_management_system, and that they can be resolved into
a meaningful string or otherwise queried.
By 'exist' we mean 'is ready to be read'. For example,
entityExists may be called before attempting to read from a
reference that is believed to point to an image sequence, so
that alternatives can be found.
In the future, this may need to be extended to cover a more
complex definition of 'existence' (for example, known to the
system, but not yet finalized). For now however, it should be
assumed to simply mean, 'ready to be consumed', and if only a
placeholder or un-finalized asset is available, `False` should
be returned.
The supplied context's locale may contain information pertinent
to disambiguating this subtle definition of 'exists' in some
cases too, as it better explains the use-case of the call.
@param entityRefs `List[str]` Entity references to query.
@param context Context The calling context.
@param hostSession HostSession The API session.
@return `List[bool]` `True` if the corresponding element in
entityRefs points to an existing entity, `False` if the entity
is not known or ready yet.
"""
raise NotImplementedError
## @}
##
# @name Entity Reference Resolution
#
# The concept of resolution is turning an @ref entity_reference into a
# 'finalized' string. This, ultimately, is anything meaningful to the
# situation. It could be a color space, a directory, a script or image
# sequence. A rule of thumb is that a resolved @ref entity_reference
# should be the string that the application would have anyway, in a
# unmanaged environment. For some kind of Entity - such as a 'Shot', for
# example, there may not be a meaningful string, though often some sensible
# value can be returned.
#
# @{
@abc.abstractmethod
def resolveEntityReference(self, entityRefs, context, hostSession):
"""
Returns the @ref primary_string represented by each given @ref
entity_reference.
If a primary string points to some data, then it should
always be in the form of a valid URL. File paths should be
returned as a `file` scheme URL.
When an @ref entity_reference points to a sequence of files,
the frame, view, etc substitution tokens should be preserved,
and in the sprintf compatible syntax.
This function should attempt to take into account the current
host/Context to ensure that any other substitution tokens are
presented in a suitable form. The Context should also be
carefully considered to ensure that the access does not violate
any rules of the system - for example, resolving an existing
entity reference for write.
The caller will have first called isEntityReference() on the
supplied strings.
@note You may need to call finalizedEntityVersion() within
this function to ensure any @ref meta_version "meta-versions"
are resolved prior to resolution.
@param entityRefs `List[str]` Entity references to query.
@param context Context The calling context.
@param hostSession HostSession The API session.
@return `List[Union[str,`
exceptions.EntityResolutionError,
exceptions.InvalidEntityReference `]]`
A list containing either the primary string for each
reference; `EntityResolutionError` if a supplied entity
reference does not have a meaningful string representation,
or it is a valid reference format that doesn't exist; or
`InvalidEntityReference` if a supplied entity reference should
not be resolved for that context, for example, if the context
access is `kWrite` and the entity is an existing version.
@see @ref entityExists
@see @ref isEntityReference
"""
raise NotImplementedError
def defaultEntityReference(self, specifications, context, hostSession):
"""
Returns an @ref entity_reference considered to be a sensible
default for each of the given specifications and Context. This
is often used in a host to ensure dialogs, prompts or publish
locations default to some sensible value, avoiding the need for
a user to re-enter such information when a Host is being run in
some known environment.
For example, a host may request the default ref for
'ShotSpecification/kWriteMultiple'. If the Manager has some
concept of the 'current sequence' it may wish to return this so
that a 'Create Shots' starts somewhere meaningful.
@param specifications `List[`
specifications.EntitySpecification `]`
The relevant specifications for the type of entities a host is
about to work with. These should be interpreted in conjunction
with the context to determine the most sensible default.
@param context Context The context the resulting reference
will be used in. When determining a suitable reference to
return, it is important to pay particular attention to the
access pattern. It differentiates between a reference that
will be used for reading or writing, and critically single or
multiple entities.
@param hostSession openassetio.managerAPI.HostSession The host
session that maps to the caller, this should be used for all
logging and provides access to the openassetio.managerAPI.Host
object representing the process that initiated the API session.
@return `List[str]` An @ref entity_reference or empty string for
each given specification.
"""
return ["" for _ in specifications]
## @}
##
# @name Entity information
#
# There are several common requests for basic, generic information about
# an entity that is assumed to be valid for all entity types.
#
# This suite of methods query information for a supplied @ref
# entity_reference.
#
# @see @ref attributes
#
# @{
@abc.abstractmethod
def entityName(self, entityRefs, context, hostSession):
"""
Returns the name of each entity itself, not including any
hierarchy or classification.
For example:
@li `"Cuttlefish v1"` - for a version of an asset
@li `"seq003"` - for a sequence in a hierarchy
@param entityRefs `List[str]` Entity references to query.
@param context Context The calling context.
@param hostSession openassetio.managerAPI.HostSession The host
session that maps to the caller, this should be used for all
logging and provides access to the openassetio.managerAPI.Host
object representing the process that initiated the API session.
@return `List[str]` Strings containing any valid characters for
the manager's implementation.
"""
raise NotImplementedError
@abc.abstractmethod
def entityDisplayName(self, entityRefs, context, hostSession):
"""
Returns an unambiguous, humanised display name for each entity.
The display name may want to consider the host, and any other
relevant Context information to form a display name for an
entity that can uniquely identify the entity in that context.
For example:
@li `"dive / build / cuttlefish / model / v1"` - for a version
of an asset in an 'open recent' menu.
@li `"Sequence 003 [ Dive / Episode 1 ]"` - for a sequence in
an hierarchy as a window title.
@param entityRefs `List[str]` Entity references to query.
@param context Context The calling context.
@param hostSession openassetio.managerAPI.HostSession The host
session that maps to the caller, this should be used for all
logging and provides access to the openassetio.managerAPI.Host
object representing the process that initiated the API session.
@return `List[Union[str,` exceptions.InvalidEntityReference `]]`
For each given entity, either a string containing any valid
characters for the @ref asset_management_system's
implementation; or an `InvalidEntityReference` if the supplied
reference is not recognised by the asset management system.
"""
raise NotImplementedError
@abc.abstractmethod
def getEntityAttributes(self, entityRefs, context, hostSession):
"""
Retrieve @ref attributes for each given entity.
It may be required to bridge between certain 'first-class'
properties of your asset management system and the well-known
OpenAssetIO attributes. For example, if the asset system
represents a 'Shot' with 'cutIn' and 'cutOut' properties or
accessors, these should be remapped to the
@ref openassetio.constants.kField_FrameIn and Out attributes as
appropriate.
@warning See @ref setEntityAttributes for important notes on
attributes and its role in the system.
@param entityRefs `List[str]` Entity references to query.
@param context Context The calling context.
@param hostSession openassetio.managerAPI.HostSession The host
session that maps to the caller, this should be used for all
logging and provides | |
is of type OvalDefinition
Returns None if no definitions could be found
@rtype: List
@return: All definitions in the OVAL document or None if none were found
"""
root = self.getDocumentRoot()
if not root:
return None
defroot = root.find("def:definitions", OvalDocument.NS_DEFAULT)
if defroot is None:
return None
element_list = list(defroot)
if not element_list:
return None
return [OvalDefinition(element) for element in element_list]
def getTests(self):
"""
Returns a list of all tests in this OvalDocument where each list item is of type OvalTest
Returns None if no tests could be found
@rtype: List
@return: All tests in the OVAL document or None if none were found
"""
root = self.getDocumentRoot()
if not root:
return None
testroot = root.find("def:tests", OvalDocument.NS_DEFAULT)
if testroot is None:
return None
element_list = list(testroot)
if not element_list:
return None
return [OvalTest(element) for element in element_list]
def getObjects(self):
"""
Returns a list of all objects in this OvalDocument where each list item is of type OvalObject
Returns None if no objects could be found
@rtype: List
@return: All objects in the OVAL document or None if none were found
"""
root = self.getDocumentRoot()
if not root:
return None
objectroot = root.find("def:objects", OvalDocument.NS_DEFAULT)
if objectroot is None:
return None
element_list = list(objectroot)
if not element_list:
return None
return [OvalObject(element) for element in element_list]
def getStates(self):
"""
Returns a list of all states in this OvalDocument where each list item is of type OvalState
Returns None if no states could be found
@rtype: List
@return: All states in the OVAL document or None if none were found
"""
root = self.getDocumentRoot()
if not root:
return None
stateroot = root.find("def:states", OvalDocument.NS_DEFAULT)
if stateroot is None:
return None
element_list = list(stateroot)
if not element_list:
return None
return [OvalState(element) for element in element_list]
def getVariables(self):
"""
Returns a list of all variables in this OvalDocument where each list item is of type OvalVariable
Returns None if no variables could be found
@rtype: List
@return: All variables in the OVAL document or None if none were found
"""
root = self.getDocumentRoot()
if not root:
return None
varroot = root.find("def:variables", OvalDocument.NS_DEFAULT)
if varroot is None:
return None
element_list = list(varroot)
if not element_list:
return None
return [OvalVariable(element) for element in element_list]
def getElementByID(self, ovalid):
"""
Uses the ovalid argument to determine what type of element is being referenced and locate that element
in the OVAL ElementTree.
Returns an OvalElement of the appropriate class (OvalDefinition, OvalTest, ...)
or None if there is no ElementTree or if a matching item could not be found
@rtype: OvalElement
@return: The located element as the appropriate OvalElement subclass, or None if no matching element was found.
"""
if not ovalid:
return None
root = self.getDocumentRoot()
if not root:
return None
try:
oval_type = OvalElement.getElementTypeFromOvalID(ovalid)
except Exception:
return None
if oval_type == OvalDefinition.DEFINITION:
return self.id_to_definition[ovalid]
elif oval_type == OvalDefinition.TEST:
return self.id_to_test[ovalid]
elif oval_type == OvalDefinition.OBJECT:
return self.id_to_object[ovalid]
elif oval_type == OvalDefinition.STATE:
return self.id_to_state[ovalid]
elif oval_type == OvalDefinition.VARIABLE:
return self.id_to_variable[ovalid]
else:
return None
def addElement(self, element, replace=True):
"""
Adds the element to the ElementTree for this OVAL document
The element argument must be of type OvalElement
This method uses the OVALID of the element to determine what type of element it is
and if an existing element with that OVALID already exists.
This method will also create the necessary structure (id est, adding <definitions>, <tests>, etc)
if the ElementTree does not already contain it.
By default this method will replace an existing item with the same OVALID, but this behavior can
be overridden by changing the option second argument to a value of "False"
Returns True on success, otherwise False
@rtype: boolean
@return: True if the element was added to the document, otherwise False
"""
if not element or element is None:
return False
if not self.tree or self.tree is None:
return False
ovalid = element.getId()
if not ovalid:
return False
root = self.tree.getroot()
if not root:
root = Element("{" + OvalDocument.NS_DEFAULT.get("def") + "}oval_definitions")
self.tree._setroot(root)
# If replace has been set to False, then we want to exit with no changes
# when an element with this OVALID already appears in the document
if not replace:
existing = self.getElementByID(ovalid)
if existing:
return False
try:
oval_type = OvalElement.getElementTypeFromOvalID(ovalid)
except Exception:
return False
# Depending on the ID type, find the parent for it or create that parent if it doesn't exist
# Then append the current element
if oval_type == OvalDefinition.DEFINITION:
parent = root.find("def:definitions", OvalDocument.NS_DEFAULT)
if parent is None:
parent = Element("{" + OvalDocument.NS_DEFAULT.get("def") + "}definitions")
root.append(parent)
parent.append(element.getElement())
self.id_to_definition[ovalid] = element
return True
elif oval_type == OvalDefinition.TEST:
parent = root.find("def:tests", OvalDocument.NS_DEFAULT)
if parent is None:
parent = Element("{" + OvalDocument.NS_DEFAULT.get("def") + "}tests")
root.append(parent)
parent.append(element.getElement())
self.id_to_test[ovalid] = element
return True
elif oval_type == OvalDefinition.OBJECT:
parent = root.find("def:objects", OvalDocument.NS_DEFAULT)
if parent is None:
parent = Element("{" + OvalDocument.NS_DEFAULT.get("def") + "}objects")
root.append(parent)
parent.append(element.getElement())
self.id_to_object[ovalid] = element
return True
elif oval_type == OvalDefinition.STATE:
parent = root.find("def:states", OvalDocument.NS_DEFAULT)
if parent is None:
parent = Element("{" + OvalDocument.NS_DEFAULT.get("def") + "}states")
root.append(parent)
parent.append(element.getElement())
self.id_to_state[ovalid] = element
return True
elif oval_type == OvalDefinition.VARIABLE:
parent = root.find("def:variables", OvalDocument.NS_DEFAULT)
if parent is None:
parent = Element("{" + OvalDocument.NS_DEFAULT.get("def") + "}variables")
root.append(parent)
self.id_to_variable[ovalid] = element
parent.append(element.getElement())
return True
else:
return False
# --------------------- END OF OvalDocument class ----------------------------
class OvalGenerator(object):
"""
Contains information about this OvalDocument, such as the schema version, the product that produced it, and when it was produced
"""
def __init__(self, element):
self.element = element
def getProduct(self):
"""
Gets the value of the product element
"""
if self.element is None:
return None
# child = self.element.find("{http://oval.mitre.org/XMLSchema/oval-common-5}product_name")
child = self.element.find("oval:product_name", OvalDocument.NS_OVAL)
if child is None:
return None
else:
return child.text
def get_element(self):
return self.element
def setProduct(self, product):
"""
Sets a value for the product element. If a product element does not already exist, one will be created
"""
if self.element is None:
return False
if product is None:
return False
child = self.element.find("oval:product_name", OvalDocument.NS_OVAL)
if child is not None:
child.text = product
else:
child = Element("{" + OvalDocument.NS_DEFAULT.get("def") + "}product_name")
child.text = product
self.element.append(child)
def getSchemaVersion(self):
"""
Gets the value of the schema_version element
"""
if self.element is None:
return None
child = self.element.find("oval:schema_version", OvalDocument.NS_OVAL)
if child is not None:
return child.text
else:
return None
def setSchemaVersion(self, version):
"""
Sets a value for the schema_version element. If that element does not exist, one will be created.
"""
if self.element is None:
return False
if version is None:
return False
child = self.element.find("oval:schema_version", OvalDocument.NS_OVAL)
if child is not None:
child.text = version
else:
child = Element("{" + OvalDocument.NS_DEFAULT.get("def") + "}schema_version")
child.text = version
self.element.append(child)
def getTimestamp(self):
"""
Gets the value of the timestamp element
"""
if self.element is None:
return None
child = self.element.find("oval:timestamp", OvalDocument.NS_OVAL)
if child is not None:
return child.text
else:
return None
def setTimestamp(self, timestamp):
"""
Sets a value for the timestamp element. If that elememtn does not exist, one will be created.
If the timestamp argument is set to None, the timestamp will be set to the current time.
"""
if self.element is None:
return False
if not timestamp or timestamp is None:
now = datetime.date.today()
timestamp = now.strftime(OvalDocument.TIME_FORMAT)
child = self.element.find("oval:timestamp", OvalDocument.NS_OVAL)
if child is not None:
child.text = timestamp
else:
child = Element("{" + OvalDocument.NS_OVAL.get("oval") + "}timestamp")
child.text = timestamp
self.element.append(child)
def getExtra(self, name, namespace=None):
"""
Gets the value of the first child element of the generator where the tag name matches 'name'
If the namespace argument is not provided, it will be assumed that the child element does not have a namespace.
"""
if self.element is None:
return None
if not name:
return None
if namespace is not None:
child = self.element.find(name, namespace)
else:
child = self.element.find(name)
if child is not None:
return child.text
else:
return None
def setExtra(self, name, value, namespace=None):
"""
Sets the value if the first child | |
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
import turtle
import time
from PIL import Image
from tkinter import tix
class App:
s="""self.screen.clearscreen()
self.screen.listen()
self.draw = turtle.RawTurtle(self.screen)
self.draw.pu()
self.screen.onclick(self.listen)
self.draw.ondrag(self.dragging)
self.k=""
self.sav=0
self.bh=""
self.file=""
self.i=0
self.jj=1
self.jj1=0
self.fgh=0
self.func=""
self.func_name=""
self.functions=[]
self.keylinks={}
self.headings={"Object1":0}
self.objects={"Object1":self.draw}
self.index=1
self.stam=0
self.redos=[]#
"""
s1=s
k=""
sav=1
bh=""
file=""
i=0
jj=1
jj1=0
fgh=0
func=""
func_name=""
functions=[]
keylinks={}
headings={"Object1":0}
index=1
stam=0
redos=[]
COLOURS =['snow', 'ghost white', 'white smoke', 'gainsboro', 'floral white', 'old lace',
'linen', 'antique white', 'papaya whip', 'blanched almond', 'bisque', 'peach puff',
'navajo white', 'lemon chiffon', 'mint cream', 'azure', 'alice blue', 'lavender',
'lavender blush', 'misty rose', 'dark slate gray', 'dim gray', 'slate gray',
'light slate gray', 'gray', 'light grey', 'midnight blue', 'navy', 'cornflower blue', 'dark slate blue',
'slate blue', 'medium slate blue', 'light slate blue', 'medium blue', 'royal blue', 'blue',
'dodger blue', 'deep sky blue', 'sky blue', 'light sky blue', 'steel blue', 'light steel blue',
'light blue', 'powder blue', 'pale turquoise', 'dark turquoise', 'medium turquoise', 'turquoise',
'cyan', 'light cyan', 'cadet blue', 'medium aquamarine', 'aquamarine', 'dark green', 'dark olive green',
'dark sea green', 'sea green', 'medium sea green', 'light sea green', 'pale green', 'spring green',
'lawn green', 'medium spring green', 'green yellow', 'lime green', 'yellow green',
'forest green', 'olive drab', 'dark khaki', 'khaki', 'pale goldenrod', 'light goldenrod yellow',
'light yellow', 'yellow', 'gold', 'light goldenrod', 'goldenrod', 'dark goldenrod', 'rosy brown',
'indian red', 'saddle brown', 'sandy brown',
'dark salmon', 'salmon', 'light salmon', 'orange', 'dark orange',
'coral', 'light coral', 'tomato', 'orange red', 'red', 'hot pink', 'deep pink', 'pink', 'light pink',
'pale violet red', 'maroon', 'medium violet red', 'violet red',
'medium orchid', 'dark orchid', 'dark violet', 'blue violet', 'purple', 'medium purple',
'thistle', 'snow2', 'snow3',
'snow4', 'seashell2', 'seashell3', 'seashell4', 'AntiqueWhite1', 'AntiqueWhite2',
'AntiqueWhite3', 'AntiqueWhite4', 'bisque2', 'bisque3', 'bisque4', 'PeachPuff2',
'PeachPuff3', 'PeachPuff4', 'NavajoWhite2', 'NavajoWhite3', 'NavajoWhite4',
'LemonChiffon2', 'LemonChiffon3', 'LemonChiffon4', 'cornsilk2', 'cornsilk3',
'cornsilk4', 'ivory2', 'ivory3', 'ivory4', 'honeydew2', 'honeydew3', 'honeydew4',
'LavenderBlush2', 'LavenderBlush3', 'LavenderBlush4', 'MistyRose2', 'MistyRose3',
'MistyRose4', 'azure2', 'azure3', 'azure4', 'SlateBlue1', 'SlateBlue2', 'SlateBlue3',
'SlateBlue4', 'RoyalBlue1', 'RoyalBlue2', 'RoyalBlue3', 'RoyalBlue4', 'blue2', 'blue4',
'DodgerBlue2', 'DodgerBlue3', 'DodgerBlue4', 'SteelBlue1', 'SteelBlue2',
'SteelBlue3', 'SteelBlue4', 'DeepSkyBlue2', 'DeepSkyBlue3', 'DeepSkyBlue4',
'SkyBlue1', 'SkyBlue2', 'SkyBlue3', 'SkyBlue4', 'LightSkyBlue1', 'LightSkyBlue2',
'LightSkyBlue3', 'LightSkyBlue4', 'SlateGray1', 'SlateGray2', 'SlateGray3',
'SlateGray4', 'LightSteelBlue1', 'LightSteelBlue2', 'LightSteelBlue3',
'LightSteelBlue4', 'LightBlue1', 'LightBlue2', 'LightBlue3', 'LightBlue4',
'LightCyan2', 'LightCyan3', 'LightCyan4', 'PaleTurquoise1', 'PaleTurquoise2',
'PaleTurquoise3', 'PaleTurquoise4', 'CadetBlue1', 'CadetBlue2', 'CadetBlue3',
'CadetBlue4', 'turquoise1', 'turquoise2', 'turquoise3', 'turquoise4', 'cyan2', 'cyan3',
'cyan4', 'DarkSlateGray1', 'DarkSlateGray2', 'DarkSlateGray3', 'DarkSlateGray4',
'aquamarine2', 'aquamarine4', 'DarkSeaGreen1', 'DarkSeaGreen2', 'DarkSeaGreen3',
'DarkSeaGreen4', 'SeaGreen1', 'SeaGreen2', 'SeaGreen3', 'PaleGreen1', 'PaleGreen2',
'PaleGreen3', 'PaleGreen4', 'SpringGreen2', 'SpringGreen3', 'SpringGreen4',
'green2', 'green3', 'green4', 'chartreuse2', 'chartreuse3', 'chartreuse4',
'OliveDrab1', 'OliveDrab2', 'OliveDrab4', 'DarkOliveGreen1', 'DarkOliveGreen2',
'DarkOliveGreen3', 'DarkOliveGreen4', 'khaki1', 'khaki2', 'khaki3', 'khaki4',
'LightGoldenrod1', 'LightGoldenrod2', 'LightGoldenrod3', 'LightGoldenrod4',
'LightYellow2', 'LightYellow3', 'LightYellow4', 'yellow2', 'yellow3', 'yellow4',
'gold2', 'gold3', 'gold4', 'goldenrod1', 'goldenrod2', 'goldenrod3', 'goldenrod4',
'DarkGoldenrod1', 'DarkGoldenrod2', 'DarkGoldenrod3', 'DarkGoldenrod4',
'RosyBrown1', 'RosyBrown2', 'RosyBrown3', 'RosyBrown4', 'IndianRed1', 'IndianRed2',
'IndianRed3', 'IndianRed4', 'sienna1', 'sienna2', 'sienna3', 'sienna4', 'burlywood1',
'burlywood2', 'burlywood3', 'burlywood4', 'wheat1', 'wheat2', 'wheat3', 'wheat4', 'tan1',
'tan2', 'tan4', 'chocolate1', 'chocolate2', 'chocolate3', 'firebrick1', 'firebrick2',
'firebrick3', 'firebrick4', 'brown1', 'brown2', 'brown3', 'brown4', 'salmon1', 'salmon2',
'salmon3', 'salmon4', 'LightSalmon2', 'LightSalmon3', 'LightSalmon4', 'orange2',
'orange3', 'orange4', 'DarkOrange1', 'DarkOrange2', 'DarkOrange3', 'DarkOrange4',
'coral1', 'coral2', 'coral3', 'coral4', 'tomato2', 'tomato3', 'tomato4', 'OrangeRed2',
'OrangeRed3', 'OrangeRed4', 'red2', 'red3', 'red4', 'DeepPink2', 'DeepPink3', 'DeepPink4',
'HotPink1', 'HotPink2', 'HotPink3', 'HotPink4', 'pink1', 'pink2', 'pink3', 'pink4',
'LightPink1', 'LightPink2', 'LightPink3', 'LightPink4', 'PaleVioletRed1',
'PaleVioletRed2', 'PaleVioletRed3', 'PaleVioletRed4', 'maroon1', 'maroon2',
'maroon3', 'maroon4', 'VioletRed1', 'VioletRed2', 'VioletRed3', 'VioletRed4',
'magenta2', 'magenta3', 'magenta4', 'orchid1', 'orchid2', 'orchid3', 'orchid4', 'plum1',
'plum2', 'plum3', 'plum4', 'MediumOrchid1', 'MediumOrchid2', 'MediumOrchid3',
'MediumOrchid4', 'DarkOrchid1', 'DarkOrchid2', 'DarkOrchid3', 'DarkOrchid4',
'purple1', 'purple2', 'purple3', 'purple4', 'MediumPurple1', 'MediumPurple2',
'MediumPurple3', 'MediumPurple4', 'thistle1', 'thistle2', 'thistle3', 'thistle4',
'gray1', 'gray2', 'gray3', 'gray4', 'gray5', 'gray6', 'gray7', 'gray8', 'gray9', 'gray10',
'gray11', 'gray12', 'gray13', 'gray14', 'gray15', 'gray16', 'gray17', 'gray18', 'gray19',
'gray20', 'gray21', 'gray22', 'gray23', 'gray24', 'gray25', 'gray26', 'gray27', 'gray28',
'gray29', 'gray30', 'gray31', 'gray32', 'gray33', 'gray34', 'gray35', 'gray36', 'gray37',
'gray38', 'gray39', 'gray40', 'gray42', 'gray43', 'gray44', 'gray45', 'gray46', 'gray47',
'gray48', 'gray49', 'gray50', 'gray51', 'gray52', 'gray53', 'gray54', 'gray55', 'gray56',
'gray57', 'gray58', 'gray59', 'gray60', 'gray61', 'gray62', 'gray63', 'gray64', 'gray65',
'gray66', 'gray67', 'gray68', 'gray69', 'gray70', 'gray71', 'gray72', 'gray73', 'gray74',
'gray75', 'gray76', 'gray77', 'gray78', 'gray79', 'gray80', 'gray81', 'gray82', 'gray83',
'gray84', 'gray85', 'gray86', 'gray87', 'gray88', 'gray89', 'gray90', 'gray91', 'gray92',
'gray93', 'gray94', 'gray95', 'gray97', 'gray98', 'gray99']
def __init__(self,master):
self.master=master
b=tix.Balloon(self.master)
self.tabcon=ttk.Notebook(self.master)
self.pw=Frame(self.tabcon)
self.tabcon.add(self.pw,text="Actions")
self.pl=Frame(self.tabcon)
self.tabcon.add(self.pl,text="Time")
self.nb=Frame(self.tabcon)
self.tabcon.add(self.nb,text="Loop")
self.ia=Frame(self.tabcon)
self.tabcon.add(self.ia,text="Objects")
self.fg=Frame(self.tabcon)
self.tabcon.add(self.fg,text="functions")
self.df=Frame(self.tabcon)
self.tabcon.add(self.df,text="key links")
self.yu=Frame(self.tabcon)
self.tabcon.add(self.yu,text="Screen")
self.tabcon.grid(row=0,column=0,sticky="n")
self.canvas = turtle.ScrolledCanvas(master = self.master)
self.canvas.grid(row=0,column=1)
self.screen=turtle.TurtleScreen(self.canvas)
self.draw = turtle.RawTurtle(self.screen)
self.screen.listen()
self.draw.pu()
self.objects={"Object1":self.draw}
self.screen.onclick(self.listen)
self.draw.ondrag(self.dragging)
Button(self.pw,text="move upwards",command=self.up).grid()
Button(self.pw,text="move downwards",command=self.down).grid()
Button(self.pw,text="move right",command=self.right).grid()
Button(self.pw,text="move left",command=self.left).grid()
Button(self.pw,text="move forward",command=self.fwd).grid(row=4,column=0)
Button(self.pw,text="move backward",command=self.bwd).grid(row=5,column=0)
self.deg=Entry(self.pw)
self.deg.grid(row=6,column=1)
Button(self.pw,text="Rotate left",command=self.rotlt).grid(row=6,column=0)
self.deg1=Entry(self.pw)
self.deg1.grid(row=7,column=1)
Button(self.pw,text="Rotate right",command=self.rotrt).grid(row=7,column=0)
Button(self.pw,text="Show object",command=self.show).grid(row=8,column=0)
Button(self.pw,text="Hide object",command=self.hide).grid(row=9,column=0)
Button(self.pw,text="stamp",command=self.stamp).grid(row=10,column=0)
Button(self.pw,text="clear previous stamp",command=self.clprestmp).grid(row=11)
Button(self.pw,text="clear all stamps",command=self.clallstmp).grid(row=12)
self.gotopos=Entry(self.pw)
self.gotopos.grid(row=13,column=1)
Button(self.pw,text="goto position",command=self.goto).grid(row=13,column=0)
self.circpar=Entry(self.pw)
self.circpar.grid(row=14,column=1)
Button(self.pw,text="circle",command=self.circ).grid(row=14,column=0)
b.bind_widget(self.circpar, msg="""Draw a circle with given radius. The center is radius units left of the turtle; extent – an angle – determines which
part of the circle is drawn. If extent is not given, draw the entire circle. If extent is not a full circle, one endpoint
of the arc is the current pen position. Draw the arc in counterclockwise direction if radius is positive, otherwise in
clockwise direction. Finally the direction of the turtle is changed by the amount of extent. As the circle is approximated
by an inscribed regular polygon, steps determines the number of steps to use. If not given, it will be calculated automatically.
May be used to draw regular polygons. Enter the three parameters one after the other separated by commas. radius is compulsory.""")
Button(self.pw,text="pen down",command=self.pendown).grid()
Button(self.pw,text="pen up",command=self.penup).grid()
Button(self.pw,text="Choose pen colour",command=lambda:self.colourchoose("pencolour")).grid()
self.wo=Entry(self.pl)
self.wo.grid(row=0,column=1)
Button(self.pl,text="Wait for",command=self.wa).grid(row=0,column=0)
self.delay=Entry(self.pl)
self.delay.grid(row=1,column=1)
Button(self.pl,text="set animation delay",command=self.dela).grid(row=1,column=0)
self.speed=Entry(self.pl)
self.speed.grid(row=2,column=1)
Button(self.pl,text="set object speed",command=self.spee).grid(row=2,column=0)
b.bind_widget(self.speed,msg="enter integer from 0 to 10. 0 means no animation.")
self.lo=Entry(self.nb)
self.lo.grid(row=0,column=1)
Label(self.nb,text="Loop length").grid(row=0,column=0)
Button(self.nb,text="Start loop",command=self.lop).grid(row=1,column=0)
Button(self.nb,text="End loop",command=self.loz).grid(row=2,column=0)
Button(self.ia,text="Object"+str(self.index),command=lambda:self.chobj("Object1")).grid(row=0,column=0)
self.chs=Button(self.ia,text="Change shape of current object",command=lambda:self.chsh())
self.chs.grid(row=1,column=0)
self.addt=Button(self.ia,text="Add object",command=lambda:self.addobj())
self.addt.grid(row=self.jj+1,column=0)
self.curob=Label(self.ia,text="current object is Object1")
self.curob.grid(row=self.jj+2)
self.stf=Button(self.fg,text="Start Creating Function",command=lambda:self.stafunc())
self.stf.grid(row=self.jj1)
self.stf1=Button(self.fg,text="Stop Creating Function",command=lambda:self.stofunc())
self.stf1.grid(row=self.jj1+1)
func_name=""
func=""
self.keymenu=Menubutton(self.df,text="keys",relief=RAISED)
self.keymenu.grid(row=self.fgh)
self.keymenu.menu = Menu(self.keymenu,tearoff=0)
self.keymenu["menu"]= self.keymenu.menu
keys=['space','enter','<KEY>y','z','Up','Down','Left','Right','1','2','3','4','5','6','7','8','9','0','Shift']
for j in keys:
self.keymenu.menu.add_command(label=j,command=lambda j=j:self.key(j))
self.fulk=Entry(self.df)
self.fulk.grid(row=self.fgh,column=1)
self.keylink=Button(self.df,text="add keylink",command=lambda:self.addkeylink())
self.keylink.grid(row=self.fgh,column=2)
Button(self.yu,text="Stop animating",command=self.stoani).grid()
Button(self.yu,text="Start animating",command=self.staani).grid()
Button(self.yu,text="choose bgpic",command=self.bgpic).grid()
Button(self.yu,text="choose bgcolour",command=lambda:self.colourchoose("bgcolour")).grid()
self.menubar=Menu(self.master)
self.file = Menu(self.menubar, tearoff=0)
self.file.add_command(label="New",command=lambda:self.new())
self.file.add_command(label="Open",command=lambda:self.ope())
self.file.add_command(label="Save",command=lambda:self.save())
self.file.add_command(label="Exit", command=self.master.quit)
self.menubar.add_cascade(label="File", menu=self.file)
self.edit = Menu(self.menubar, tearoff=0)
self.edit.add_command(label="Undo",command=lambda:self.undo())
self.menubar.add_cascade(label="Edit", menu=self.edit)
self.helpa = Menu(self.menubar, tearoff=0)
self.helpa.add_command(label="About")
self.menubar.add_cascade(label="Help", menu=self.helpa)
self.run=Menu(self.menubar,tearoff=0)
self.run.add_command(label="Run",command=lambda:self.starun())
self.menubar.add_cascade(label="Run",menu=self.run)
self.master.config(menu=self.menubar)
def listen(self,x,y):
self.screen.listen()
print(x,y)
def dragging(self,x, y):
if(self.k!=""):
self.bh+=self.k+f"self.draw.ondrag(None)\n{self.k}self.draw.goto({x},{y})\n{self.k}self.draw.ondrag(self.dragging)\n"
elif(self.func!=""):
self.func+=f" self.draw.goto({x},{y})\n"
else:
self.draw.ondrag(None)
self.s+=f"self.draw.goto({x},{y})#\n"
self.draw.goto(x, y)
self.draw.ondrag(self.dragging)
self.sav=0
def up(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.sety(self.draw.ycor()+15)\n"
elif(self.func!=""):
self.func+=" self.draw.sety(self.draw.ycor()+15)\n"
else:
self.draw.sety(self.draw.ycor()+15)
self.s+="self.draw.sety(self.draw.ycor()+15)#\n"
self.sav=0
def down(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.sety(self.draw.ycor()-15)\n"
elif(self.func!=""):
self.func+=" self.draw.sety(self.draw.ycor()-15)\n"
else:
self.draw.sety(self.draw.ycor()-15)
self.s+="self.draw.sety(self.draw.ycor()-15)#\n"
self.sav=0
def right(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.setx(self.draw.xcor()+15)\n"
elif(self.func!=""):
self.func+=" self.draw.setx(self.draw.xcor()+15)\n"
else:
self.draw.setx(self.draw.xcor()+15)
self.s+="self.draw.setx(self.draw.xcor()+15)#\n"
self.sav=0
def left(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.setx(self.draw.xcor()-15)\n"
elif(self.func!=""):
self.func+=" self.draw.setx(self.draw.xcor()-15)\n"
else:
self.draw.setx(self.draw.xcor()-15)
self.s+="self.draw.setx(self.draw.xcor()-15)#\n"
self.sav=0
def fwd(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.fd(15)\n"
elif(self.func!=""):
self.func+=" self.draw.fd(15)\n"
else:
self.draw.fd(15)
self.s+="self.draw.fd(15)#\n"
self.sav=0
def bwd(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.bk(15)\n"
elif(self.func!=""):
self.func+=" self.draw.bk(15)\n"
else:
self.draw.bk(15)
self.s+="self.draw.bk(15)#\n"
self.sav=0
def rotlt(self):
try:
int(self.deg.get())
if(self.i==0):
if(self.k!=""):
self.bh+=self.k+"self.draw.lt(int("+self.deg.get()+"))\n"+self.k+"self.angle+=int("+self.deg.get()+")\n"
elif(self.func!=""):
self.func+=" self.draw.lt(int("+self.deg.get()+"))\n"+" self.angle+=int("+self.deg.get()+")\n"
else:
self.s+="self.draw.lt(int("+self.deg.get()+"))#\n"
self.draw.lt(int(self.deg.get()))
self.headings[self.curob["text"][18:]]+=int(self.deg.get())
else:
if(self.k!=""):
self.bh+=f"""{self.k}im=Image.open('"""+self.file+f"""')
{self.k}im=im.convert('RGBA')
{self.k}self.headings[self.curob["text"][18:]]+=int("""+self.deg.get()+f""")
{self.k}im=im.rotate(self.headings[self.curob["text"][18:]],expand=1)
{self.k}dst_im = Image.new("RGBA",im.size,"white")
{self.k}dst_im.paste( im, (0,0),im)
{self.k}dst_im.save("a.gif",transparency=0)
{self.k}self.screen.addshape("a.gif")
{self.k}self.draw.shape("a.gif")
"""
self.bh+=self.k+"self.draw.lt(int("+self.deg.get()+"))\n"
elif(self.func_name!=""):
self.func+=""" im=Image.open('"""+self.file+"""')
im=im.convert('RGBA')
self.headings[self.curob["text"][18:]]+=int("""+self.deg.get()+""")
im=im.rotate("""+str(self.headings[self.curob["text"][18:]])+""",expand=1)
dst_im = Image.new("RGBA",im.size,"white")
dst_im.paste( im, (0,0),im)
dst_im.save("a.gif",transparency=0)
self.screen.addshape("a.gif")
self.draw.shape("a.gif")
"""
self.func+=" self.draw.lt(int("+self.deg.get()+"))\n"
else:
im=Image.open(self.file)
im=im.convert('RGBA')
self.headings[self.curob["text"][18:]]+=int(self.deg.get())
im=im.rotate(self.headings[self.curob["text"][18:]],expand=1)
dst_im = Image.new("RGBA",im.size,"white")
dst_im.paste( im, (0,0),im)
dst_im.save("a.gif",transparency=0)
self.screen.addshape("a.gif")
self.draw.shape("a.gif")
self.s+="""im=Image.open('"""+self.file+"""')
im=im.convert('RGBA')
self.headings[self.curob["text"][18:]]+=int("""+self.deg.get()+""")
im=im.rotate("""+str(self.headings[self.curob["text"][18:]])+""",expand=1)
dst_im = Image.new("RGBA",im.size,"white")
dst_im.paste( im, (0,0),im)
dst_im.save("a.gif",transparency=0)
self.screen.addshape("a.gif")
self.draw.shape("a.gif")
"""
self.s+="self.draw.lt(int("+self.deg.get()+"))#\n"
self.draw.lt(int(self.deg.get()))
self.sav=0
except:
pass
def rotrt(self):
try:
int(self.deg1.get())
if(self.i==0):
if(self.k!=""):
self.bh+=self.k+"self.draw.rt(int("+self.deg1.get()+"))\n"+self.k+"self.angle+=int("+self.deg.get()+")\n"
elif(self.func!=""):
self.func+=" self.draw.rt(int("+self.deg1.get()+"))\n"+" self.angle+=int("+self.deg.get()+")\n"
else:
self.s+="self.draw.rt(int("+self.deg1.get()+"))\n"
self.draw.rt(int(self.deg1.get()))
self.headings[self.curob["text"][18:]]+=360-int(self.deg.get())
else:
if(self.k!=""):
self.bh+=f"""{self.k}im=Image.open('"""+self.file+f"""')
{self.k}im=im.convert('RGBA')
{self.k}self.headings[self.curob["text"][18:]]+=360-int("""+self.deg1.get()+f""")
{self.k}im=im.rotate(self.headings[self.curob["text"][18:]],expand=1)
{self.k}dst_im = Image.new("RGBA",im.size,"white")
{self.k}dst_im.paste( im, (0,0),im)
{self.k}dst_im.save("a.gif",transparency=0)
{self.k}self.screen.addshape("a.gif")
{self.k}self.draw.shape("a.gif")
"""
self.bh+=self.k+"self.draw.rt(int("+self.deg1.get()+"))\n"
elif(self.func_name!=""):
self.func+=""" im=Image.open('"""+self.file+"""')
im=im.convert('RGBA')
self.headings[self.curob["text"][18:]]+=360-int("""+self.deg1.get()+""")
im=im.rotate("""+str(self.headings[self.curob["text"][18:]])+""",expand=1)
dst_im = Image.new("RGBA",im.size,"white")
dst_im.paste( im, (0,0),im)
dst_im.save("a.gif",transparency=0)
self.screen.addshape("a.gif")
self.draw.shape("a.gif")
"""
self.func+=" self.draw.rt(int("+self.deg1.get()+"))\n"
else:
im=Image.open(self.file)
im=im.convert('RGBA')
self.headings[self.curob["text"][18:]]+=360-int(self.deg1.get())
im=im.rotate(self.headings[self.curob["text"][18:]],expand=1)
dst_im = Image.new("RGBA",im.size,"white")
dst_im.paste( im, (0,0),im)
dst_im.save("a.gif",transparency=0)
self.screen.addshape("a.gif")
self.draw.shape("a.gif")
self.s+="""im=Image.open('"""+self.file+"""')
im=im.convert('RGBA')
self.headings[self.curob["text"][18:]]+=360-int("""+self.deg1.get()+""")
im=im.rotate("""+str(self.headings[self.curob["text"][18:]])+""",expand=1)
dst_im = Image.new("RGBA",im.size,"white")
dst_im.paste( im, (0,0),im)
dst_im.save("a.gif",transparency=0)
self.screen.addshape("a.gif")
self.draw.shape("a.gif")
"""
self.s+="self.draw.rt(int("+self.deg1.get()+"))#\n"
self.draw.rt(int(self.deg1.get()))
self.sav=0
except:
raise
def show(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.st()\n"
elif(self.func!=""):
self.func+=" self.draw.st()\n"
else:
self.draw.st()
self.s+="self.draw.st()#\n"
self.sav=0
def hide(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.ht()\n"
elif(self.func!=""):
self.func+=" self.draw.st()\n"
else:
self.draw.ht()
self.s+="self.draw.ht()#\n"
self.sav=0
def stamp(self):
if(self.k!=""):
self.bh+=self.k+"self.stam=self.draw.stamp()\n"
elif(self.func!=""):
self.func+=" self.stam=self.draw.stamp()\n"
else:
self.stam=self.draw.stamp()
self.s+="self.stam=self.draw.stamp()#\n"
self.sav=0
def clprestmp(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.clearstamp(self.stam)\n"
elif(self.func!=""):
self.func+=" self.draw.clearstamp(self.stam)\n"
else:
self.draw.clearstamp(self.stam)
self.s+="self.draw.clearstamp(self.stam)#\n"
self.sav=0
def clallstmp(self):
if(self.k!=""):
self.bh+=self.k+"self.draw.clearstamps()\n"
elif(self.func!=""):
self.func+=" self.draw.clearstamps()\n"
else:
self.draw.clearstamps()
self.s+="self.draw.clearstamps()#\n"
self.sav=0
def goto(self):
import random
try:
float(self.gotopos.get().split(',')[0])
if(self.k!=""):
if(self.gotopos.get()=="random"):
x=random.randint(0,list(self.screen.screensize())[0])
y=random.randint(0,list(self.screen.screensize())[1])
self.bh+=self.k+f"self.draw.goto({x},{y})\n"
else:
self.bh+=self.k+f"self.draw.goto(float({self.gotopos.get().split(',')[0]}),float({self.gotopos.get().split(',')[1]}))\n"
elif(self.func!=""):
if(self.gotopos.get()=="random"):
x=random.randint(0,list(self.screen.screensize())[0])
y=random.randint(0,list(self.screen.screensize())[1])
self.func+=f" self.draw.goto({x},{y})\n"
else:
self.func+=f" self.draw.goto(float({self.gotopos.get().split(',')[0]}),float({self.gotopos.get().split(',')[1]}))\n"
else:
if(self.gotopos.get()=="random"):
x=random.randint(0,list(self.screen.screensize())[0])
y=random.randint(0,list(self.screen.screensize())[1])
self.draw.goto(x,y)
self.s+=f"self.draw.goto({x},{y})#\n"
else:
self.draw.goto(float(self.gotopos.get().split(",")[0]),float(self.gotopos.get().split(",")[1]))
self.s+=f"self.draw.goto(float({self.gotopos.get().split(',')[0]}),float({self.gotopos.get().split(',')[1]}))#\n"
self.sav=0
except:
pass
def circ(self):
try:
int(self.circpar.get().split(',')[0])
if(self.k!=""):
if(len(self.circpar.get().split(','))==3):
self.bh+=self.k+f"self.draw.circle(int({self.circpar.get().split(',')[0]}),int({self.circpar.get().split(',')[1]}),int({self.circpar.get().split(',')[2]}))\n"
elif(len(self.circpar.get().split(','))==2):
self.bh+=self.k+f"self.draw.circle(int({self.circpar.get().split(',')[0]}),int({self.circpar.get().split(',')[1]}))\n"
elif(len(self.circpar.get().split(','))==1):
self.bh+=self.k+f"self.draw.circle(int({self.circpar.get().split(',')[0]}))\n"
elif(self.func!=""):
if(len(self.circpar.get().split(','))==3):
self.func+=f" self.draw.circle(int({self.circpar.get().split(',')[0]}),int({self.circpar.get().split(',')[1]}),int({self.circpar.get().split(',')[2]}))\n"
elif(len(self.circpar.get().split(','))==2):
self.func+=f" self.draw.circle(int({self.circpar.get().split(',')[0]}),int({self.circpar.get().split(',')[1]}))\n"
| |
Here we can choose different algorithms
# _get_split_mse _get_split_info
for split in unique), key=lambda x: x[0])
return mse, feature, split, split_avg
def _choose_category_point(self, X: List[List[str]], y: List[Num],
idx: List[int], feature: int):
"""Iterate each xi and classify x, y into two parts,
and the best category point is the xi when we get minimum info or mse.
Parameters:
X {list} -- 2d list with str
y {list} -- 1d list object with int or float
idx {list} -- indexes, 1d list object with int
feature {int} -- Feature number
Returns:
tuple -- The best choice of mse, feature, category point and average
could be None
"""
# Feature cannot be splitted if there's only one unique element.
unique = set([X[i][feature] for i in idx])
if len(unique) == 1:
return None
# If there is only one category left, None should be returned
# In case of empty split
# unique.remove(min(unique))
# We don't need this for categorical situation
# Get split point which has min mse
mse, category_idx, split_avg = min(
(self._get_category_mse(X, y, idx, feature, category)
# Here we can choose different algorithms
# _get_category_mse _get_category_info
for category in unique), key=lambda x: x[0])
# logger.debug(split_avg)
return mse, feature, category_idx, split_avg
def _detect_feature_type(self, x: list) -> int:
"""
To determine the type of the feature
:param x: 1d list with int, float or str
:return: 0 or 1, 0 represents continuous, 1 represents discrete
"""
for item in x:
if item is not None:
return 1 if type(item) == str else 0
def _get_column(self, x: list, i: int) -> list:
return [item[i] for item in x]
def _choose_feature(self, X: list, y: List[Num], idx: List[int]):
"""Choose the feature which has minimum mse or minimal info.
Parameters:
X {list} -- 2d list with int, float or str
y {list} -- 1d list with int or float
idx {list} -- indexes, 1d list object with int
Returns:
tuple -- (feature number, classify point or split point,
average, idx_classify)
could be None
"""
m = len(X[0])
# x[0] selects the first row
# Compare the mse of each feature and choose best one.
split_rets = []
for i in range(m):
if self.feature_types[i]:
item = self._choose_category_point(X, y, idx, i)
else:
item = self._choose_split_point(X, y, idx, i)
if item is not None:
split_rets.append(item)
# If it is None, it will not be considered as the chosen feature
# Terminate if no feature can be splitted
if not split_rets: # split_rets == []
return None
_, feature, split, split_avg = min(split_rets, key=lambda x: x[0])
# Get split idx into two pieces and empty the idx.
idx_split = [[], []]
# it contains different groups, and produces idx for next step
while idx:
i = idx.pop()
# logger.debug(i)
xi = X[i][feature]
if self.feature_types[feature]:
if xi == split:
idx_split[0].append(i)
else:
idx_split[1].append(i)
else:
if xi < split:
idx_split[0].append(i)
else:
idx_split[1].append(i)
return feature, split, split_avg, idx_split
def _expr2literal(self, expr: list) -> str:
"""Auxiliary function of print_rules.
Parameters:
expr {list} -- 1D list like [Feature, op, split]
Op: In continuos situation, -1 means less than, 1 means equal or more than
In discrete situation, -1 means equal, 1 means not equal
Returns:
str
"""
feature, op, split = expr
if type(split) == float or type(split) == int:
op = ">=" if op == 1 else "<"
return "Feature%d %s %.4f" % (feature, op, split)
if type(split) == str:
op = "!=" if op == 1 else "=="
return "Feature%d %s %s" % (feature, op, split)
def _get_rules(self):
"""Get the rules of all the decision tree leaf nodes.
Print the rules with breadth-first search for a tree
first print the leaves in shallow postions, then print
from left to right
广度有限搜索,先打印的是比较浅的叶子,然后从左往右打印
Expr: 1D list like [Feature, op, split]
Rule: 2D list like [[Feature, op, split], score]
"""
que = [[self.root, []]]
self.rules = []
# Breadth-First Search
while que:
nd, exprs = que.pop(0)
# Generate a rule when the current node is leaf node
if not(nd.left or nd.right):
# Convert expression to text
literals = list(map(self._expr2literal, exprs))
self.rules.append([literals, nd.score])
# Expand when the current node has left child
if nd.left:
rule_left = copy.copy(exprs)
rule_left.append([nd.feature, -1, nd.split])
que.append([nd.left, rule_left])
# Expand when the current node has right child
if nd.right:
rule_right = copy.copy(exprs)
rule_right.append([nd.feature, 1, nd.split])
que.append([nd.right, rule_right])
# logger.debug(self.rules)
def fit(self, X: list, y: list, max_depth: int =5, min_samples_split: int =2):
"""Build a regression decision tree.
Note:
At least there's one column in X has more than 2 unique elements
y cannot be all the same value
Parameters:
X {list} -- 2d list object with int, float or str
y {list} -- 1d list object with int or float
max_depth {int} -- The maximum depth of the tree. (default: {2})
min_samples_split {int} -- The minimum number of samples required
to split an internal node (default: {2})
"""
# max_depth reflects the height of the tree, at most how many steps
# will we take to make decisions
# max_depth反映的是树的高度,最多决策多少步
# min_samples_split determines how many times we could split in a
# feture, the smaller min_samples_split is, the more times
# min_samples_split决定了在一个feature上可以反复split多少次,
# min_samples_split越小,可以split的次数越多
# Initialize with depth, node, indexes
self.root = Node()
que = [[0, self.root, list(range(len(y)))]]
# logger.debug(que)
# Breadth-First Search
# 决策树是一层一层构建起来的,所以要用广度优先算法
depth = 0
m = len(X[0])
self.feature_types = [self._detect_feature_type(self._get_column(X, i))
for i in range(m)]
logger.debug(self.feature_types)
while que:
depth, nd, idx = que.pop(0)
# Terminate loop if tree depth is more than max_depth
# At first, que is a list with only one element, if there is no new
# elements added to que, the loop can only run once
# que开始是只有一个元素的list,如果没有新的元素加入,就只能循环一次,下一次que就为空了
if depth == max_depth:
break
# Stop split when number of node samples is less than
# min_samples_split or Node is 100% pure.
if len(idx) < min_samples_split or set(
map(lambda i: y[i], idx)) == 1:
continue
# Stop split if no feature has more than 2 unique elements
feature_rets = self._choose_feature(X, y, idx)
if feature_rets is None:
continue
# if feature_rets is None, it means that for X's with these idx,
# the split should be stopped
# Split
nd.feature, nd.split, split_avg, idx_split = feature_rets
nd.left = Node(split_avg[0])
nd.right = Node(split_avg[1])
que.append([depth+1, nd.left, idx_split[0]])
que.append([depth+1, nd.right, idx_split[1]])
# Update tree depth and rules
self.height = depth
self._get_rules()
def print_rules(self):
"""Print the rules of all the regression decision tree leaf nodes.
"""
for i, rule in enumerate(self.rules):
literals, score = rule
print("Rule %d: " % i, ' | '.join(
literals) + ' => split_hat %.4f' % score)
def _predict(self, row: list) -> float:
"""Auxiliary function of predict.
Arguments:
row {list} -- 1D list with int, float or str
Returns:
int or float -- prediction of yi
"""
nd = self.root
while nd.left and nd.right:
if self.feature_types[nd.feature]:
# categorical split
if row[nd.feature] == nd.split:
nd = nd.left
else:
nd = nd.right
else:
# continuous split
if row[nd.feature] < nd.split:
nd = nd.left
else:
nd = nd.right
return nd.score
# nd.score must be float?
def predict(self, X: list) -> List[float]:
"""Get the prediction of y.
Prediction in batch, 批量预测
Arguments:
X {list} -- 2d list object with int, float or str
Returns:
list -- 1d list object with int or float
"""
return [self._predict(Xi) for Xi in X]
@run_time
def test_continuous_continuous():
"""test: x is continous, and y is continuous"""
print("Tesing the accuracy of RegressionTree...")
# Load data
X, y = load_boston_house_prices()
# Split data randomly, train set rate 70%
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=10)
# Train model
reg = RegressionTree()
reg.fit(X=X_train, y=y_train, max_depth=4)
# Show rules
reg.print_rules()
# Model accuracy
get_r2(reg, X_test, y_test)
@run_time
def test_arbitrary_continuous():
"""test: x is continuous or categorical, and y is continuous"""
print("Tesing the accuracy of RegressionTree...")
# Load data
X, y = load_bike_sharing_data()
logger.debug(X[0])
logger.debug([max(y), sum(y)/len(y), min(y)])
# Split data randomly, train set rate 70%
X_train, X_test, y_train, | |
(isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(matrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x + b2*x^2 + b3*x^3 + ... + bn*x^n")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
bestModelingResults.append(allAccuracies)
# We recreate some things to apply the Matrix method in the permutation
# section
rowLengthOfBothMatrixes = len(self.y_samplesList)
currentMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
temporalRow.append(1)
for currentIndependentVariable in range(0, numberOfIndependentVariables):
temporalRow.append(self.x_samplesList[row][0]**(currentIndependentVariable+1))
currentMatrix_x.append(temporalRow)
originalMatrix_x = currentMatrix_x
from .MortrackML_Library import Combinations
possibleCombinations = []
for n in range (0, len(originalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
if (evtfbmip == True):
# ----------------------------------------------------------------------------------------------- #
# ----- We now get all possible combinations/permutations with the elements of our equation ----- #
# ----------------------------------------------------------------------------------------------- #
customizedPermutations = combinations.getCustomizedPermutationList()
customizedPermutations.pop(0) # We remove the null value
customizedPermutations.pop(len(customizedPermutations)-1) # We remove the last one because we already did it
for actualPermutation in range(0, len(customizedPermutations)):
newOriginalMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
for column in range(0, len(customizedPermutations[actualPermutation])):
temporalRow.append(originalMatrix_x[row][customizedPermutations[actualPermutation][column]])
newOriginalMatrix_x.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS USING CURRENT PERMUTATION ----- #
# We define a variable to save the search patterns in original matrix x
possibleCombinations = []
for n in range (0, len(newOriginalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(newOriginalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(newOriginalMatrix_x[0])):
for column in range(0, len(newOriginalMatrix_x[0])):
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == column):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][column]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = self.y_samplesList
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(newOriginalMatrix_x[0])):
trueRowOfCoefficient = customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][row]]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
newNumberOfIndependentVariables = len(currentMatrix_x[0])
predictedData = []
for row in range(0, len(self.y_samplesList)):
temporalRow = []
actualIc = currentMatrix_b[0][0]
for currentIndependentVariable in range(0, (newNumberOfIndependentVariables-1)):
actualIc = actualIc + currentMatrix_b[currentIndependentVariable+1][0]*self.x_samplesList[row][0]**(currentIndependentVariable+1)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(self.y_samplesList)
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = self.y_samplesList[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x + b2*x^2 + b3*x^3 + ... + bn*x^n")
# We include all the reports of all the models studied to the reporting
# variable that contains the report of the best fitting model and we
# then return it
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getMultiplePolynomialRegression(
orderOfThePolynomial = "whole number to represent the desired order of the polynomial model to find",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method returns the best fitting model of a dataset to predict its
behavior through a Multiple Polynomial Regression that may have any number
of independent variables (x). This method gets a model by through the
following equation format:
y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=4, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[-1.745717777706403e-08],
[0],
[0.07581354676648289],
[-0.00104662847289827],
[3.942075523087618e-06],
[-14.202436859894078],
[0.670002091817878],
[-0.009761974914994198],
[-5.8006065221068606e-15]]
accuracyFromTraining =
91.33822971744071
predictedData =
[[14.401799310251064],
[10.481799480368835],
[7.578466505722503],
[13.96195814877683],
[10.041958318894615],
[7.1386253442482825],
[15.490847097061135],
[11.57084726717892],
[8.667514292532587],
[18.073281006823265],
[14.15328117694105],
[11.249948202294718],
[20.794074729782523],
[16.874074899900307],
[13.970741925253975],
[22.73804311765818],
[18.818043287775964],
[15.914710313129632]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getMultiplePolynomialRegression(self, orderOfThePolynomial, evtfbmip=False, isClassification=True):
# We import the libraries we want to use and we create the class we
# use from it
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
# We define the variables to use within our code
numberOfIndependentVariables = len(self.x_samplesList[0])
rowLengthOfBothMatrixes = len(self.y_samplesList)
matrix_x = []
# MATRIX X MATHEMATICAL PROCEDURE to add the 1's in the first column of
# each row and to add the additional columns that will represent the
# polynomials that we want to get according to the input value of
# this method's argument "orderOfThePolynomial"
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
temporalRow.append(1)
for actualIndependentVariable in range(0, numberOfIndependentVariables):
xMultiplicationsResult = 1
for actualOrder in range(0, | |
<reponame>LaGauffre/SMCCompoMo<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 15:42:14 2020
@author: pierr
"""
import scipy.special as sp
import math as ma
import pandas as pd
import numpy as np
import scipy.stats as st
from scipy.optimize import minimize
def sim_gam_par(n, k, α, θ):
"""
Sample from a Gamma-Pareto model.
Parameters
----------
n : int
sample size.
k : float
shape parameter of the Gamma distribution.
α : float
Tail index of the Pareto distribution.
Returns
-------
array
A sample drawn from the Weibull-Pareto distribution.
Example
-------
k, α, θ = 1/2, 1/2, 5
X = sim_gam_par(1000, k, α, θ)
"""
β = θ / (k + α)
r = α*sp.gamma(k)* sp.gammainc(k, θ / β) * np.exp(k+α)*(k+α)**(-k) / \
(1+ α*sp.gamma(k) * sp.gammainc(k, θ / β) * np.exp(k+α)*(k+α)**(-k))
gamma_rv = st.gamma(k)
par_rv = st.pareto(α)
binom_rv = st.binom(1, r)
par_rvs = θ * par_rv.rvs(size = n)
binom_rvs = binom_rv.rvs(size = n)
gamma_rvs = β * gamma_rv.ppf(sp.gammainc(k, θ / β) *\
np.random.uniform(size = n))
return(binom_rvs * gamma_rvs + (1 - binom_rvs) * par_rvs)
def logp_gam_par(X):
"""
Likelihood function of the Gamma-Pareto model.
Parameters
----------
X : Array
Insurance losses.
Returns
-------
function
Allows the evaluation of the likelihood in the parameters provided the
data.
Example
-------
k, α, θ = 1/2, 1/2, 5
X = sim_gam_par(100, k, α, θ)
logp = logp_gam_par(X)
logp(np.array([k, α, θ]))
costFn = lambda parms: -logp(parms)
bnds = ((0, None), (0, None), (0, None))
θ0 = (1, 1, 1)
minRes = minimize(costFn, θ0,bounds=bnds)
minRes
"""
# parms = particles.to_numpy()[4]
def logp(parms):
k, α, θ = tuple(parms)
if np.all(parms > 0):
β = θ / (k + α)
r = α*sp.gamma(k)* sp.gammainc(k,θ / β) * np.exp(k+α)*(k+α)**(-k) / \
(1+ α*sp.gamma(k) * sp.gammainc(k, θ / β) * np.exp(k+α)*(k+α)**(-k))
if β > 0 and r > 0 and r < 1:
X1 = X[X < θ]
X2 = X[X >= θ]
F1 = sp.gammainc(k, θ / β)
return(len(X1) * (np.log(r) - np.log(F1) - np.log(sp.gamma(k)) - \
k * np.log(β)) - sum(X1) / β +\
(k-1) * sum(np.log(X1)) + len(X2) *(np.log(1-r) +\
np.log(α) + α * np.log(θ)) - (α + 1) * sum(np.log(X2))
)
else:
return(-np.inf)
else:
return(-np.inf)
return logp
def logd_gam_par(parms):
"""
density function of the Gamma-Pareto model.
Parameters
----------
parms : ndArray
particles.
Returns
-------
function
Allows the evaluation of the density functions for multiple parameter
values.
"""
k, α, θ = parms[:,0], parms[:,1], parms[:,2]
β = θ / (k + α)
r = α*sp.gamma(k)* sp.gammainc(k,θ / β) * np.exp(k+α)*(k+α)**(-k) / \
(1+ α*sp.gamma(k) * sp.gammainc(k, θ / β) * np.exp(k+α)*(k+α)**(-k))
F1 = sp.gammainc(k, θ / β)
def logd(x):
res = np.zeros(len(α))
s = np.logical_and(β > 0, r > 0, r < 1)
s1 = np.logical_and(s, x < θ)
s2 = np.logical_and(s, x >= θ)
res1 = np.log(r[s1]) - np.log(F1[s1]) - np.log(sp.gamma(k[s1])) - \
k[s1] * np.log(β[s1]) - x / β[s1] + (k[s1]-1) * np.log(x)
res2 = (np.log(1-r[s2]) + np.log(α[s2]) + α[s2] * \
np.log(θ[s2])) - (α[s2] + 1) * np.log(x)
res[np.where(s1)] = res1
res[np.where(s2)] = res2
res[np.where(np.invert(s))] = -np.inf
return(res)
return logd
def sim_wei_par(n, k, α, θ):
"""
Sample from a Weibull-Pareto model.
Parameters
----------
n : int
sample size.
k : float
shape parameter of the Weibull distribution.
α : float
Tail index of the Pareto distribution.
Returns
-------
array
A sample drawn from the Weibull-Pareto distribution.
Example
-------
k, α, θ = 1/2, 1/2, 5
X = sim_wei_par(1000, k, α, θ)
"""
β = (k / (k + α))**(1 / k) * θ
r = (α / θ)*(1 - np.exp(-(k + α) / k))\
/ (α / θ + (k / θ)*np.exp(-(k + α) / k))
weib_rv = st.weibull_min(k)
par_rv = st.pareto(α)
binom_rv = st.binom(1, r)
par_rvs = θ * par_rv.rvs(size = n)
binom_rvs = binom_rv.rvs(size = n)
weib_rvs = β * weib_rv.ppf(weib_rv.cdf(θ / β) *\
np.random.uniform(size = n))
return(binom_rvs * weib_rvs + (1 - binom_rvs) * par_rvs)
def logp_wei_par(X):
"""
Likelihood function of the Weibull-Pareto model.
Parameters
----------
X : Array
Insurance losses.
Returns
-------
function
Allows the evaluation of the likelihood in the parameters provided the
data.
Example
-------
k, α, θ = 1/2, 1/2, 5
X = sim_wei_par(1000, k, α, θ)
logp = logp_wei_par(X)
logp(np.array([k, α, θ)])
costFn = lambda parms: -logp(parms)
bnds = ((0, None), (0, None), (0, None))
θ0 = (1, 1, 1)
minRes = minimize(costFn, θ0,bounds=bnds)
minRes
"""
# parms = particles.to_numpy()[4]
def logp(parms):
k, α, θ = tuple(parms)
if np.all(parms > 0):
β = (k / (k + α))**(1 / k) * θ
r = (α / θ)*(1 - np.exp(-(k + α) / k)) / (α / θ + (k / θ) *\
np.exp(-(k+α)/k))
if β > 0 and r > 0 and r < 1:
X1 = X[X < θ]
X2 = X[X >= θ]
F1 = 1 - np.exp(-(θ / β)**k)
return(len(X1) * \
( np.log(r) + np.log(k) - k * np.log(β) ) + \
(k-1) * sum(np.log(X1)) - sum( (X1/ β)**k ) -\
len(X1) * np.log(F1) + len(X2) *(np.log(1-r) +\
np.log(α) + α * np.log(θ)) - (α + 1) * sum(np.log(X2))
)
else:
return(-np.inf)
else:
return(-np.inf)
return logp
def logd_wei_par(parms):
"""
density function of the Weibull-Pareto model.
Parameters
----------
parms : ndArray
particles.
Returns
-------
function
Allows the evaluation of the density functions for multiple parameter
values.
"""
k, α, θ = parms[:,0], parms[:,1], parms[:,2]
β = (k / (k + α))**(1 / k) * θ
r = (α / θ)*(1 - np.exp(-(k + α) / k)) / (α / θ + (k / θ) * \
np.exp(-(k+α)/k))
F1 = 1 - np.exp(-(θ / β)**k)
def logd(x):
res = np.zeros(len(α))
s = np.logical_and(β > 0, r > 0, r < 1)
s1 = np.logical_and(s, x < θ)
s2 = np.logical_and(s, x >= θ)
res1 = (np.log(r[s1]) + np.log(k[s1]) - k[s1] * np.log(β[s1])) + \
(k[s1]-1) * np.log(x) - (x/ β[s1]) ** k[s1] - \
np.log(F1[s1])
res2 = (np.log(1-r[s2]) + np.log(α[s2]) + α[s2] * \
np.log(θ[s2])) - (α[s2] + 1) * np.log(x)
res[np.where(s1)] = res1
res[np.where(s2)] = res2
res[np.where(np.invert(s))] = - np.inf
return(res)
return logd
def phi(z):
"""
Cdf of unit normal distribution
Parameters
----------
z : Float
Returns
-------
CDF of unit normal distribution
"""
return( 1 / 2 * (1 + sp.erf(z /np.sqrt(2))))
def sim_lnorm_par(n, σ, α, θ):
"""
Sample from a lognormal-Pareto model.
Parameters
----------
n : int
sample size.
σ : float
shape parameter of the lognormal distribution.
α : float
Tail index of the Pareto distribution.
θ: float
Threshold parameter
Returns
-------
array
A sample drawn from the lognormal-Pareto distribution.
Example
-------
n, σ, α, θ =10, 1/2, 1/2, 5
X = sim_lnorm_par(n, σ, α, θ)
"""
μ = np.log(θ) - α * σ**2
r = (α * σ *np.sqrt(2* ma.pi) *phi(α * σ) ) / \
(α * σ *np.sqrt(2* ma.pi) *phi(α * σ) + np.exp(-(α*σ)**2 / 2))
lnorm_rv = st.lognorm(s = σ, scale = np.exp(μ))
par_rv = st.pareto(α)
binom_rv = st.binom(1, r)
par_rvs = θ * par_rv.rvs(size = n)
binom_rvs = binom_rv.rvs(size = n)
lnorm_rvs = lnorm_rv.ppf(lnorm_rv.cdf(θ) *\
np.random.uniform(size = n))
return(binom_rvs * lnorm_rvs + (1 - binom_rvs) * par_rvs)
def logp_lnorm_par(X):
"""
Likelihood function of the lognormal-Pareto model.
Parameters
----------
X : Array
Insurance losses.
Returns
-------
function
Allows the evaluation of the likelihood in the parameters provided the
data.
Example
-------
n, σ, α, θ =100, 1/2, 1/2, 5
X = sim_lnorm_par(n, σ, α, θ)
logp = logp_lnorm_par(X)
logp(np.array([σ, α, θ]))
costFn = lambda parms: -logp(parms)
bnds = ((0, None), (0, None), (0, None))
θ0 = (1, 1, 3)
minRes = minimize(costFn, θ0,bounds=bnds)
minRes
"""
def logp(parms):
σ, α, θ = tuple(parms)
if np.all(parms > 0):
| |
<reponame>daccordeon/CEonlyPony<gh_stars>0
"""Calculates the same set of injections for a set of networks.
Based on the old calculate_injections.py and gwbench's multi_network.py example script, this processes the injections (e.g. in data_raw_injections/) in union for each network in a set and saves the results (e.g. in data_processed_injections/). This is faster than the previous implementation if detectors are shared between the networks because the detector responses are only calculated once.
Usage:
See the example in run_unified_injections.py.
License:
BSD 3-Clause License
Copyright (c) 2022, <NAME>.
All rights reserved except for those for the gwbench code which remain reserved
by <NAME>; the gwbench code is included in this repository for convenience.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from typing import List, Set, Dict, Tuple, Optional, Union
from numpy.typing import NDArray
import os
import numpy as np
from gwbench import network
from gwbench.basic_relations import f_isco_Msolar
from useful_functions import (
without_rows_w_nan,
parallel_map,
HiddenPrints,
PassEnterExit,
)
from generate_injections import filter_bool_for_injection, fisco_obs_from_Mc_eta
from network_subclass import NetworkExtended
def multi_network_results_for_injection(
network_specs: List[List[str]],
inj: NDArray[np.float64],
base_params: Dict[str, Union[int, float]],
wf_dict: Dict[str, Union[str, Optional[Dict[str, str]], bool, int]],
deriv_dict: Dict[
str,
Union[
str,
Tuple[str, ...],
List[Set[str]],
bool,
Optional[Dict[str, Union[float, str, int]]],
],
],
misc_settings_dict: Dict[str, Optional[int]],
debug: bool = False,
) -> Dict[str, Tuple[float, ...]]:
"""Returns the benchmark as a dict of tuples for a single injection using the inj and base_params and the settings dicts through the networks in network_specs.
If a single network fails an injection, then the unified results will save it as a np.nan in all networks so that the universe of injections is the same between each network. TODO: check that this doesn't bias the results away from loud sources that we care about.
Args:
network_specs: Networks to pass to gwbench's multi-network pipeline.
inj: Injection parameters for each injection, e.g. chirp mass and luminosity distance.
base_params: Common parameters among injections, e.g. time of coalesence.
wf_dict: Waveform dictionary of model name and options, also contains the science case string.
deriv_dict: Derivative options dictionary.
misc_settings_dict: Options for gwbench, e.g. whether to account for Earth's rotation about its axis.
debug: Whether to debug.
Returns:
Dict[str, Tuple[float]]: Keys are repr(network_spec). Each value is (redshift, SNR, logMc err, logDL err, eta err, iota err, 90%-credible sky-area in sqr degrees) or a tuple of seven np.nan's if the injection failed in any network.
"""
output_if_injection_fails = dict(
(
(repr(network_spec), tuple(np.nan for _ in range(7)))
for network_spec in network_specs
)
)
varied_keys = [
"Mc",
"eta",
"chi1x",
"chi1y",
"chi1z",
"chi2x",
"chi2y",
"chi2z",
"DL",
"iota",
"ra",
"dec",
"psi",
"z",
]
varied_params = dict(zip(varied_keys, inj))
z = varied_params.pop("z")
inj_params = dict(**base_params, **varied_params)
# subtlety, if V+ (or aLIGO) is present in any network, then f is truncated for V+ for all networks (since f is shared below). TODO: figure out how common this is
aLIGO_or_Vplus_used = ("aLIGO" in deriv_dict["unique_tecs"]) or (
"V+" in deriv_dict["unique_tecs"]
)
if not filter_bool_for_injection(
inj,
misc_settings_dict["redshifted"],
wf_dict["coeff_fisco"],
wf_dict["science_case"],
aLIGO_or_Vplus_used=aLIGO_or_Vplus_used,
debug=debug,
):
return output_if_injection_fails
fmin, fmax = 5.0, wf_dict["coeff_fisco"] * fisco_obs_from_Mc_eta(
inj_params["Mc"],
inj_params["eta"],
redshifted=misc_settings_dict["redshifted"],
z=z,
)
if aLIGO_or_Vplus_used:
fmax_bounds = (11, 1024)
else:
fmax_bounds = (6, 1024)
fmax = float(max(min(fmax, fmax_bounds[1]), fmax_bounds[0]))
# df linearly transitions from 1/16 Hz (fine from B&S2022) to 10 Hz (coarse to save computation time)
df = ((fmax - fmax_bounds[0]) / (fmax_bounds[1] - fmax_bounds[0])) * 10 + (
(fmax_bounds[1] - fmax) / (fmax_bounds[1] - fmax_bounds[0])
) * 1 / 16
f = np.arange(fmin, fmax + df, df)
# passing parameters to gwbench, hide stdout (i.e. prints) if not debugging, stderr should still show up
if not debug:
entry_class = HiddenPrints
else:
entry_class = PassEnterExit
with entry_class():
# precalculate the unique components (detector derivatives and PSDs) common among all networks
# calculate the unique detector response derivatives
loc_net_args = (
network_specs,
f,
inj_params,
deriv_dict["deriv_symbs_string"],
wf_dict["wf_model_name"],
wf_dict["wf_other_var_dic"],
deriv_dict["conv_cos"],
deriv_dict["conv_log"],
misc_settings_dict["use_rot"],
misc_settings_dict["num_cores"],
)
if not deriv_dict["numerical_over_symbolic_derivs"]:
unique_loc_net = network.unique_locs_det_responses(*loc_net_args)
else:
# update eta if too close to its maximum value for current step size, https://en.wikipedia.org/wiki/Chirp_mass#Definition_from_component_masses
eta_max = 0.25
deriv_dict["numerical_deriv_settings"]["step"] = min(
deriv_dict["numerical_deriv_settings"]["step"],
(eta_max - inj_params["eta"]) / 10,
)
unique_loc_net = network.unique_locs_det_responses(
*loc_net_args,
deriv_dict["numerical_deriv_settings"]["step"],
deriv_dict["numerical_deriv_settings"]["method"],
deriv_dict["numerical_deriv_settings"]["order"],
deriv_dict["numerical_deriv_settings"]["n"],
)
# get the unique PSDs for the various detector technologies
unique_tec_net = network.unique_tecs(network_specs, f)
# perform the analysis of each network from the unique components
multi_network_results_dict = dict()
for i, network_spec in enumerate(network_specs):
# if net only exists here, then the subclass is pointless. re-factor to justify using subclass
net = network.Network(network_spec)
# get the correct network from the unique components calculated above. this avoids the need to .set_net_vars, .set_wf_vars, .setup_ant_pat_lpf_psds, .calc_det_responses, .calc_det_responses_derivs_num/sym,
net.get_det_responses_psds_from_locs_tecs(unique_loc_net, unique_tec_net)
# calculate the network SNRs
net.calc_snrs(only_net=misc_settings_dict["only_net"])
# calculate the Fisher and covariance matrices, then error estimates
net.calc_errors(only_net=misc_settings_dict["only_net"])
# calculate the 90%-credible sky area (in [deg]^2)
net.calc_sky_area_90(only_net=misc_settings_dict["only_net"])
# TODO: if using gwbench 0.7, still introduce a limit on net.cond_num based on machine precision errors that mpmath is blind to
# if the FIM is zero, then the condition number is NaN and matrix is ill-conditioned (according to gwbench). TODO: try catching this by converting warnings to errors following <https://stackoverflow.com/questions/5644836/in-python-how-does-one-catch-warnings-as-if-they-were-exceptions#30368735> --> 54 and 154 converged in a second run
if not net.wc_fisher:
# unified injection rejection so that cosmological resampling can be uniform across networks, this now means that the number of injections is equal to that of the weakest network in the set but leads to a better comparison
if debug:
print(
f"Rejected injection for {network_spec} and, therefore, all networks in the multi-network because of ill-conditioned FIM ({net.fisher}) with condition number ({net.cond_num}) greater than 1e15"
)
return dict(
(repr(network_spec_2), tuple(np.nan for _ in range(7)))
for network_spec_2 in network_specs
)
# multi_network_results_dict[repr(network_spec)] = tuple(
# np.nan for _ in range(7)
# )
else:
# convert sigma_cos(iota) into sigma_iota
abs_err_iota = abs(net.errs["cos_iota"] / np.sin(inj_params["iota"]))
multi_network_results_dict[repr(network_spec)] = (
z,
net.snr,
net.errs["log_Mc"],
net.errs["log_DL"],
net.errs["eta"],
abs_err_iota,
net.errs["sky_area_90"],
)
return multi_network_results_dict
def multi_network_results_for_injections_file(
results_file_name: str,
network_specs: List[List[str]],
injections_file: str,
num_injs_per_redshift_bin: int,
process_injs_per_task: Optional[int],
base_params: Dict[str, Union[int, float]],
wf_dict: Dict[str, Union[str, Optional[Dict[str, str]], bool, int]],
deriv_dict: Dict[
str,
Union[
str,
Tuple[str, ...],
List[Set[str]],
bool,
Optional[Dict[str, Union[float, str, int]]],
],
],
misc_settings_dict: Dict[str, Optional[int]],
data_path: str = "./data_processed_injections/task_files/",
debug: int = False,
) -> None:
"""Runs the injections in the given file through the given set of networks and saves them as a .npy file.
Benchmarks the first process_injs_per_task number of injections from injections_file + base_params for each of the networks in network_specs for the science_case and other settings in the three dict.'s provided, saves the results as a .npy file in results_file_name at data_path in the form (number of surviving injections, 7) with the columns of (redshift, SNR, logMc err, logDL err, eta err, iota err, 90%-credible sky-area in sqr degrees).
Args:
results_file_name: Output .npy filename template for | |
widgets.MenuBox(
"menu_funcionario_vinculado_{0}".format(doce[2]),
I(_class="fas fa-ellipsis-v"),
widgets.MenuOption("Visualizar", **{
"_class": "botao_visualizar_funcionario wave_on_click",
"_data-id_funcionario": doce[2],
}),
onOpen=self.bind_menu_docente
),
**{"drag_and_drop": False}
)
)
html_botao_falta = A(
I(_class="fas fa-calendar-check"),
**{
"_class": "botao_faltas icon_button",
"_title": "Faltas dos Alunos",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id,
"_href": window.PhanterPWA.XWAY("registro-de-faltas", self.id_escola, self.ano_letivo, data_turma.id, **{"_retornar": "turma-especifica"})
}
)
self.lista_de_disciplinas = None
if data_turma.lista_de_disciplinas is not None:
self.lista_de_disciplinas = data_turma.lista_de_disciplinas
if "Educação Infantil" in data_turma.ensinos:
if len(data_turma.ensinos) > 1:
html_botao_falta = DIV(
I(_class="fas fa-calendar-check"),
**{
"_class": "botao_faltas_modal icon_button",
"_title": "Faltas dos Alunos",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id,
"_data-tem_educacao_infantil": True,
"_href": window.PhanterPWA.XWAY("registro-de-faltas", self.id_escola, self.ano_letivo, data_turma.id, **{"_retornar": "turma-especifica"})
}
)
else:
html_botao_falta = DIV(
I(_class="fas fa-calendar-check"),
**{
"_class": "botao_faltas_modal icon_button",
"_title": "Faltas dos Alunos",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id,
"_data-tem_educacao_infantil": False,
"_href": window.PhanterPWA.XWAY("registro-de-faltas", self.id_escola, self.ano_letivo, data_turma.id, **{"_retornar": "turma-especifica"})
}
)
card = DIV(
LABEL(data_turma.turma, " (", data_turma.quant_alunos, " Alunos)", _for="phanterpwa-card-panel-control-{0}".format(data_turma.id)),
DIV(
DIV(
DIV(
DIV(
H3("Corpo Discente"),
table,
_class="p-row"
),
corpo_docente,
_class="phanterpwa-card-panel-control-content"
),
DIV(
DIV(
I(_class="fas fa-user-graduate"),
**{
"_class": "botao_alunos icon_button actived",
"_title": "Adicione alunos à turma",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id
}
),
A(
I(_class="fas fa-table"),
**{
"_class": "botao_diario_notas icon_button",
"_title": "Diário de Notas da Turma",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id,
"_disabled": disabled,
"_href": window.PhanterPWA.XWAY("diario-de-notas", self.id_escola, self.ano_letivo, data_turma.id, **{"_retornar": "turma-especifica"})
}
),
A(
I(_class="fas fa-clipboard-list"),
**{
"_class": "botao_ficha_avaliativa icon_button",
"_title": "Ficha Avaliativa",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id,
"_disabled": disabled,
"_href": window.PhanterPWA.XWAY("ficha-avaliativa", self.id_escola, self.ano_letivo, data_turma.id, **{"_retornar": "turma-especifica"})
}
),
html_botao_falta,
A(
I(_class="fas fa-calculator"),
**{
"_class": "icon_button",
"_title": "Total de Faltas",
"_href": window.PhanterPWA.XWAY("frequencia", "total-de-faltas", self.id_escola, self.ano_letivo, data_turma.id, **{"_retornar": "turma-especifica"})
}
),
DIV(
I(_class="fas fa-chalkboard-teacher"),
**{
"_class": "botao_professores icon_button",
"_title": "Disciplinas e seus respectivos professores",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id
}
),
DIV(
I(_class="fas fa-clock"),
**{
"_class": "botao_horario icon_button",
"_title": "Horário da Turma",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id,
"_disabled": "disabled"
}
),
DIV(
I(_class="fas fa-sort-alpha-up"),
**{
"_class": "botao_auto_ordernar icon_button",
"_title": "Ordenar alunos automaticamente",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id,
}
),
DIV(
I(_class="fas fa-award"),
**{
"_class": "botao_definir_resultados icon_button",
"_title": "Definir os resultados finais dos alunos",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id,
}
),
A(
I(_class="fas fa-print"),
**{
"_class": "botao_imprimir_lista_alunos_turma icon_button",
"_title": "Imprimir lista de alunos",
"_href": window.PhanterPWA.XWAY("imprimir", "turma", self.id_escola, self.ano_letivo, data_turma.id, **{"_retornar": "turma-especifica"})
}
),
_class="phanterpwa-card-panel-control-buttons"
),
_class="phanterpwa-card-panel-control-wrapper has_buttons"
),
_class="phanterpwa-card-panel-control-container"
),
_id="phanterpwa-card-panel-control-{0}".format(data_turma.id),
_class="phanterpwa-card-panel-control"
)
return card
def bind_menu_docente(self):
jQuery(
".botao_visualizar_funcionario"
).off(
"click.botao_visualizar_funcionario"
).on(
"click.botao_visualizar_funcionario",
lambda: self.get_visualizar_funcionario(this)
)
def binds_menu_aluno(self):
jQuery(
".botao_visualizar_aluno"
).off(
"click.botao_visualizar_aluno"
).on(
"click.botao_visualizar_aluno",
lambda: self.get_visualizar_aluno(this)
)
jQuery(
".botao_remover_matricula_da_turma"
).off(
"click.botao_remover_matricula"
).on(
"click.botao_remover_matricula",
lambda: self.modal_confirmar_remover_matricula_da_turma(this)
)
jQuery(
".botao_deletar_matricula"
).off(
"click.botao_revogar_matricula"
).on(
"click.botao_revogar_matricula",
lambda: self.modal_confirmar_deletar_matricula(this)
)
jQuery(
".botao_aluno_desistente"
).off(
"click.botao_aluno_desistente"
).on(
"click.botao_aluno_desistente",
lambda: self.abrir_modal_desistencia(this)
)
jQuery(
".botao_transferir_aluno"
).off(
"click.botao_transferir_aluno"
).on(
"click.botao_transferir_aluno",
lambda: self.abrir_modal_transferencia(this)
)
jQuery(
".botao_documentos"
).off(
"click.botao_documentos"
).on(
"click.botao_documentos",
lambda: self.abrir_modal_documentos(this)
)
def binds_painel_da_turma(self):
jQuery(
".botao_alunos"
).off(
"click.add_alunos"
).on(
"click.add_alunos",
lambda: self.get_alunos_remanejar(this)
)
jQuery(
".botao_professores"
).off(
"click.modal_disciplinas_professores"
).on(
"click.modal_disciplinas_professores",
lambda: self.get_disciplinas_professores(this)
)
jQuery(
".botao_auto_ordernar"
).off(
"click.botao_auto_ordernar"
).on(
"click.botao_auto_ordernar",
lambda: self.ordenagem_automatica(this)
)
jQuery(
".botao_definir_resultados"
).off(
"click.botao_definir_resultados"
).on(
"click.botao_definir_resultados",
lambda: self.modal_definir_resultados(this)
)
jQuery(
".botao_faltas_modal"
).off(
"click.botao_faltas_modal"
).on(
"click.botao_faltas_modal",
lambda: self.abrir_modal_faltas(this)
)
def ordenagem_automatica(self, widget_instance):
id_turma = jQuery(widget_instance).data("id_turma")
window.PhanterPWA.ApiServer.PUT(
"api",
"turma",
self.id_escola,
self.ano_letivo,
"auto-ordenar",
id_turma,
onComplete=lambda: self.get_dados_turma()
)
def after_drop(self, ev, el):
t_ordem = list()
id_turma = jQuery(el).data("id_turma")
id_matricula = jQuery(el).data("id_matricula")
jQuery(
"#phanterpwa-card-panel-control-{0}".format(id_turma)
).find(
".phanterpwa-widget-table-data"
).each(lambda: t_ordem.append(
jQuery(this).data("id_matricula")
))
lista_ordem = []
for li in t_ordem:
if li not in lista_ordem:
lista_ordem.append(li)
window.PhanterPWA.GET(
"api",
"signforms",
"phanterpwa-form-turma",
onComplete=lambda data, ajax_status: self.depois_de_assinar_ordenacao_aluno(
data, ajax_status, id_turma, id_matricula, lista_ordem)
)
def update_turma(self, data, ajax_status, id_turma, json):
json = data.responseJSON
if ajax_status == "success":
for x in json.data.turmas:
card = self.xml_card(x, x.id)
card.html_to("#turma_e_turmas_{0}".format(x.id))
self.binds_painel_da_turma()
def depois_de_assinar_ordenacao_aluno(self, data, ajax_status, id_turma, id_matricula, lista_de_matriculas):
json = data.responseJSON
if ajax_status == "success":
formdata = __new__(FormData())
formdata.append(
"csrf_token",
json.csrf
)
formdata.append(
"matriculas",
JSON.stringify(lista_de_matriculas)
)
window.PhanterPWA.ApiServer.PUT(**{
'url_args': ["api", "turma", self.id_escola, self.ano_letivo, "ordenar", id_turma, id_matricula],
'form_data': formdata,
'onComplete': lambda: self.get_dados_turma()
})
def get_alunos_remanejar(self, widget_instance):
id_turma = jQuery(widget_instance).data("id_turma")
window.PhanterPWA.GET(
"api",
"turma",
self.id_escola,
self.ano_letivo,
"remanejar",
id_turma,
onComplete=lambda data, ajax_status: self.modal_add_alunos(data, ajax_status, id_turma)
)
def modal_add_alunos(self, data, ajax_status, id_turma):
json = data.responseJSON
if ajax_status == "success":
data_turma = json.data
table = XTABLE(
"remanejar-table-{0}".format(data_turma.id_turma),
XTRH(
"remanejar-table-head-{0}".format(data_turma.id_turma),
"Foto",
"Nome",
"Data de Nascimento",
"Turma Atual",
"Endereço"
)
)
cont_alunos = 0
titulo_adc = False
for x in data_turma.alunos_sem_turmas:
cont_alunos += 1
data_de_nascimento_formated = ""
if x.alunos.data_nasc is not None and x.alunos.data_nasc is not js_undefined:
data_de_nascimento_formated = validations.format_iso_date_datetime(
x.alunos.data_nasc, "dd/MM/yyyy", "date"
)
endereco_imagem_aluno = "/static/{0}/images/user.png".format(
window.PhanterPWA.VERSIONING
)
if x.alunos.foto3x4 is not None and x.alunos.foto3x4 is not js_undefined:
endereco_imagem_aluno = "{0}/api/alunos/{1}/image".format(
window.PhanterPWA.ApiServer.remote_address,
x.alunos.foto3x4
)
if not titulo_adc:
table.append(TR(TD("ALUNOS SEM TURMA", _colspan=5, _style="text-align: center; background-color: #d5d5d5;")))
titulo_adc = True
table.append(
XTRD(
"turma-table-data-remaneja-sem_turma-{0}".format(x.alunos.id),
TD(
DIV(IMG(_src=endereco_imagem_aluno, _style="width:25px; height:25px; border-radius: 100%;")),
),
TD(
x.alunos.aluno,
),
TD(
data_de_nascimento_formated,
),
I(_class="fas fa-times-circle", _style="color: red;"),
TD(
x.alunos.endereco,
),
**{
"drag_and_drop": False,
"_class": "linha_add_aluno_remanejar link",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id_turma,
"_data-id_aluno": x.alunos.id,
"_data-id_serie": x.series.id,
"_data-id_matricula": x.matriculas.id
}
)
)
titulo_alun = False
for x in data_turma.alunos_outras_turmas:
cont_alunos += 1
data_de_nascimento_formated = ""
if x.alunos.data_nasc is not None and x.alunos.data_nasc is not js_undefined:
data_de_nascimento_formated = validations.format_iso_date_datetime(
x.alunos.data_nasc, "dd/MM/yyyy", "date"
)
endereco_imagem_aluno = "/static/{0}/images/user.png".format(
window.PhanterPWA.VERSIONING
)
if x.alunos.foto3x4 is not None and x.alunos.foto3x4 is not js_undefined:
endereco_imagem_aluno = "{0}/api/alunos/{1}/image".format(
window.PhanterPWA.ApiServer.remote_address,
x.alunos.foto3x4
)
if not titulo_alun:
table.append(TR(TD("ALUNOS EM OUTRA TURMA", _colspan=5, _style="text-align: center; background-color: #d5d5d5;")))
titulo_alun = True
table.append(
XTRD(
"turma-table-data-remanejar-tem_turma-{0}".format(x.alunos.id),
TD(
DIV(IMG(_src=endereco_imagem_aluno, _style="width:25px; height:25px; border-radius: 100%;")),
),
TD(
x.alunos.aluno,
),
TD(
data_de_nascimento_formated,
),
TD(
x.turmas.turma
),
TD(
x.alunos.endereco,
),
**{
"drag_and_drop": False,
"_class": "linha_add_aluno_remanejar link",
"_data-id_escola": self.id_escola,
"_data-id_ano_letivo": self.ano_letivo,
"_data-id_turma": data_turma.id_turma,
"_data-id_aluno": x.alunos.id,
"_data-id_serie": x.series.id,
"_data-id_matricula": x.matriculas.id
}
)
)
if cont_alunos > 0:
content = DIV(
table,
forms.FormWidget(
"turma",
"id_turma",
**{
"value": data_turma.id_turma,
"type": "hidden",
"form": "turma",
}
),
_class="p-row"
)
self.modal_remanejar = modal.Modal(
"#modal_turma_case",
**{
"title": CONCATENATE(
DIV("REMANEJAR ALUNOS PARA A TURMA ", STRONG(str(data_turma.turma).upper())),
DIV("Basta clicar sobre o(a) aluno(a) para ele(a) ser remanejado(a)",
_style="color: red; font-size: 0.7em;"),
),
"content": content,
"form": "turma",
"header_height": 65,
}
)
self.modal_remanejar.open()
jQuery(".linha_add_aluno_remanejar").off(
"click.adicionar_aluno_na_turma"
).on(
"click.adicionar_aluno_na_turma",
lambda: self._on_click_add_aluno_turma(this)
)
forms.SignForm("#form-turma", after_sign=lambda: forms.ValidateForm("#form-turma"))
else:
window.PhanterPWA.flash("Não há alunos matriculados para a série da turma")
def _on_click_add_aluno_turma(self, widget_instance):
id_matricula = jQuery(widget_instance).data("id_matricula")
id_turma = jQuery(widget_instance).data("id_turma")
jQuery(widget_instance).fadeOut()
form_remanejar = jQuery("#form-turma")[0]
form_remanejar = __new__(FormData(form_remanejar))
window.PhanterPWA.ApiServer.PUT(**{
'url_args': ["api", "turma", self.id_escola, self.ano_letivo, "remanejar", id_turma, id_matricula],
'form_data': form_remanejar,
'onComplete': lambda data, ajax_status:
self.update_turma(data, ajax_status, id_turma)
})
def modal_confirmar_remover_matricula_da_turma(self, el):
id_matricula = jQuery(el).data("id_matricula")
content = DIV(
P("Atenção, o(a) aluno(a) sairá da turma permanentemente."
" Se ele possuir notas, faltas, pareceres, etc. Ou seja, ",
"informações dele vinculados a esta turma, tudo será perdido."),
P("Tem certeza que deseja remover esta matrícula?"),
_class="p-row"
)
footer = DIV(
forms.FormButton(
"remover_matricula_da_turma_sim",
"Sim",
_class="btn-autoresize wave_on_click waves-phanterpwa"
),
forms.FormButton(
"remover_matricula_da_turma_nao",
"Não",
_class="btn-autoresize wave_on_click waves-phanterpwa"
),
_class='phanterpwa-form-buttons-container'
)
self.modal_remover_matricula_da_turma = modal.Modal(
"#modal_remover_matricula_da_turma_detalhe_container",
**{
"title": "Retirar aluno da turma",
"content": content,
"footer": footer,
"header_height": 50,
"footer_height": 65,
"form": "remover_matricula_da_turma"
}
)
self.modal_remover_matricula_da_turma.open()
jQuery("#phanterpwa-widget-form-form_button-remover_matricula_da_turma_sim").off(
"click.adicionar_remover_matricula_da_turma_sim"
).on(
"click.adicionar_remover_matricula_da_turma_sim",
lambda: self._on_click_remover_matricula_da_turma(id_matricula)
)
jQuery("#phanterpwa-widget-form-form_button-remover_matricula_da_turma_nao").off(
"click.adicionar_remover_matricula_da_turma_nao"
).on(
"click.adicionar_remover_matricula_da_turma_nao",
lambda: self.modal_remover_matricula_da_turma.close()
)
def _on_click_remover_matricula_da_turma(self, id_matricula):
window.PhanterPWA.PUT(
"api",
"matricula",
"remover-da-turma",
self.id_escola,
self.ano_letivo,
id_matricula,
onComplete=self.depois_de_deletar_ou_retirar
)
self.modal_remover_matricula_da_turma.close()
def modal_confirmar_deletar_matricula(self, el):
id_matricula = jQuery(el).data("id_matricula")
content = DIV(
P("Atenção, a matrícula do aluno será deletada permanentemente, se ",
"o mesmo possuir notas, faltas, ficha individual, boletim, etc."
" Tudo isto será perdido, inclusive o mesmo sairá da turma permanentemente."),
P("Tem certeza que deseja deletar esta matrícula?"),
_class="p-row"
)
footer = DIV(
forms.FormButton(
"deletar_matricula_sim",
"Sim",
_class="btn-autoresize wave_on_click waves-phanterpwa"
),
forms.FormButton(
"deletar_matricula_nao",
"Não",
_class="btn-autoresize wave_on_click waves-phanterpwa"
),
_class='phanterpwa-form-buttons-container'
)
self.modal_deletar_matricula = modal.Modal(
"#modal_deletar_matricula_detalhe_container",
**{
"title": "Deletar Matrícula do(a) aluno(a)",
"content": content,
"footer": footer,
"header_height": 50,
"footer_height": 65,
"form": "deletar_matricula"
}
)
self.modal_deletar_matricula.open()
jQuery("#phanterpwa-widget-form-form_button-deletar_matricula_sim").off(
"click.adicionar_deletar_matricula_sim"
).on(
"click.adicionar_deletar_matricula_sim",
lambda: self._on_click_deletar_matricula(id_matricula)
)
jQuery("#phanterpwa-widget-form-form_button-deletar_matricula_nao").off(
"click.adicionar_deletar_matricula_nao"
).on(
"click.adicionar_deletar_matricula_nao",
lambda: self.modal_deletar_matricula.close()
)
def _on_click_deletar_matricula(self, id_matricula):
window.PhanterPWA.DELETE(
"api",
"matricula",
"deletar",
self.id_escola,
self.ano_letivo,
id_matricula,
onComplete=self.depois_de_deletar_ou_retirar
)
self.modal_deletar_matricula.close()
def depois_de_deletar_ou_retirar(self, data, ajax_status):
if ajax_status == "success":
self.get_dados_turma()
def get_disciplinas_professores(self, widget_instance):
id_turma = jQuery(widget_instance).data("id_turma")
window.PhanterPWA.GET(
"api",
"turma",
self.id_escola,
self.ano_letivo,
"disciplinas-professores",
id_turma,
onComplete=lambda data, ajax_status: self.modal_disciplinas_professores(data, ajax_status, id_turma)
)
def modal_disciplinas_professores(self, data, ajax_status, id_turma):
json = data.responseJSON
if ajax_status == "success":
disciplinas_turmas = json.data
if disciplinas_turmas.disciplinas is not None:
table = XTABLE(
"disciplinas-professores-table-{0}".format(id_turma),
| |
<reponame>jmm-montiel/Auto-PyTorch<gh_stars>1-10
import numpy as np
import os
import time
import shutil
import netifaces
import traceback
import logging
from hpbandster.core.nameserver import NameServer, nic_name_to_host
from hpbandster.core.result import logged_results_to_HBS_result
from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode
from autoPyTorch.pipeline.base.pipeline import Pipeline
from autoPyTorch.pipeline.nodes import MetricSelector
from autoPyTorch.utils.config.config_option import ConfigOption, to_bool
from autoPyTorch.utils.config.config_condition import ConfigCondition
from autoPyTorch.core.hpbandster_extensions.bohb_ext import BOHBExt
from autoPyTorch.core.hpbandster_extensions.hyperband_ext import HyperBandExt
from autoPyTorch.core.worker import AutoNetWorker
from autoPyTorch.components.training.budget_types import BudgetTypeTime, BudgetTypeEpochs, BudgetTypeTrainingTime
import copy
class OptimizationAlgorithm(SubPipelineNode):
def __init__(self, optimization_pipeline_nodes):
"""OptimizationAlgorithm pipeline node.
It will run either the optimization algorithm (BOHB, Hyperband - defined in config) or start workers
Each worker will run the provided optimization_pipeline and will return the output
of the pipeline_result_node to the optimization algorithm
Train:
The optimization_pipeline will get the following inputs:
{hyperparameter_config, pipeline_config, X_train, Y_train, X_valid, Y_valid, budget, budget_type}
The pipeline_result_node has to provide the following outputs:
- 'loss': the optimization value (minimize)
- 'info': dict containing info for the respective training process
Predict:
The optimization_pipeline will get the following inputs:
{pipeline_config, X}
The pipeline_result_node has to provide the following outputs:
- 'Y': result of prediction for 'X'
Note: predict will not call the optimization algorithm
Arguments:
optimization_pipeline {Pipeline} -- pipeline that will be optimized (hyperparamter)
pipeline_result_node {PipelineNode} -- pipeline node that provides the results of the optimization_pieline
"""
super(OptimizationAlgorithm, self).__init__(optimization_pipeline_nodes)
self.algorithms = {"bohb": BOHBExt,
"hyperband": HyperBandExt}
self.budget_types = dict()
self.budget_types["time"] = BudgetTypeTime
self.budget_types["epochs"] = BudgetTypeEpochs
self.budget_types["training_time"] = BudgetTypeTrainingTime
def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid, result_loggers, dataset_info, shutdownables, refit=None):
"""Run the optimization algorithm.
Arguments:
pipeline_config {dict} -- The configuration of the pipeline.
X_train {array} -- The data
Y_train {array} -- The data
X_valid {array} -- The data
Y_valid {array} -- The data
result_loggers {list} -- List of loggers that log the result
dataset_info {DatasetInfo} -- Object with information about the dataset
shutdownables {list} -- List of objects that need to shutdown when optimization is finished.
Keyword Arguments:
refit {dict} -- dict containing information for refitting. None if optimization run should be started. (default: {None})
Returns:
dict -- Summary of optimization run.
"""
logger = logging.getLogger('autonet')
res = None
run_id, task_id = pipeline_config['run_id'], pipeline_config['task_id']
# Use tensorboard logger
if pipeline_config['use_tensorboard_logger'] and not refit:
import tensorboard_logger as tl
directory = os.path.join(pipeline_config['result_logger_dir'], "worker_logs_" + str(task_id))
os.makedirs(directory, exist_ok=True)
tl.configure(directory, flush_secs=5)
# Only do refitting
if (refit is not None):
logger.info("Start Refitting")
loss_info_dict = self.sub_pipeline.fit_pipeline(
hyperparameter_config=refit["hyperparameter_config"], pipeline_config=pipeline_config,
X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid,
budget=refit["budget"], rescore=refit["rescore"], budget_type=self.budget_types[pipeline_config['budget_type']],
optimize_start_time=time.time(), refit=True, hyperparameter_config_id=None, dataset_info=dataset_info)
logger.info("Done Refitting")
return {'optimized_hyperparameter_config': refit["hyperparameter_config"],
'budget': refit['budget'],
'loss': loss_info_dict['loss'],
'info': loss_info_dict['info']}
# Start Optimization Algorithm
try:
ns_credentials_dir, tmp_models_dir, network_interface_name = self.prepare_environment(pipeline_config)
# start nameserver if not on cluster or on master node in cluster
if task_id in [1, -1]:
NS = self.get_nameserver(run_id, task_id, ns_credentials_dir, network_interface_name)
ns_host, ns_port = NS.start()
if task_id != 1 or pipeline_config["run_worker_on_master_node"]:
self.run_worker(pipeline_config=pipeline_config, run_id=run_id, task_id=task_id, ns_credentials_dir=ns_credentials_dir,
network_interface_name=network_interface_name, X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid,
dataset_info=dataset_info, shutdownables=shutdownables)
# start BOHB if not on cluster or on master node in cluster
res = None
if task_id in [1, -1]:
self.run_optimization_algorithm(pipeline_config=pipeline_config, run_id=run_id, ns_host=ns_host,
ns_port=ns_port, nameserver=NS, task_id=task_id, result_loggers=result_loggers,
dataset_info=dataset_info, logger=logger)
res = self.parse_results(pipeline_config)
except Exception as e:
print(e)
traceback.print_exc()
finally:
self.clean_up(pipeline_config, ns_credentials_dir, tmp_models_dir)
if res:
return res
return {'optimized_hyperparameter_config': dict(), 'budget': 0, 'loss': float('inf'), 'info': dict()}
def predict(self, pipeline_config, X):
"""Run the predict pipeline.
Arguments:
pipeline_config {dict} -- The configuration of the pipeline
X {array} -- The data
Returns:
dict -- The predicted values in a dictionary
"""
result = self.sub_pipeline.predict_pipeline(pipeline_config=pipeline_config, X=X)
return {'Y': result['Y']}
# OVERRIDE
def get_pipeline_config_options(self):
options = [
ConfigOption("run_id", default="0", type=str, info="Unique id for each run."),
ConfigOption("task_id", default=-1, type=int, info="ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally."),
ConfigOption("algorithm", default="bohb", type=str, choices=list(self.algorithms.keys()), info="Algorithm to use for config sampling."),
ConfigOption("budget_type", default="time", type=str, choices=list(self.budget_types.keys())),
ConfigOption("min_budget", default=lambda c: self.budget_types[c["budget_type"]].default_min_budget, type=float, depends=True, info="Min budget for fitting configurations."),
ConfigOption("max_budget", default=lambda c: self.budget_types[c["budget_type"]].default_max_budget, type=float, depends=True, info="Max budget for fitting configurations."),
ConfigOption("max_runtime",
default=lambda c: ((-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1) * c["max_budget"])
if c["budget_type"] == "time" else float("inf"),
type=float, depends=True, info="Total time for the run."),
ConfigOption("num_iterations",
default=lambda c: (-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1)
if c["budget_type"] == "epochs" else float("inf"),
type=float, depends=True, info="Number of successive halving iterations."),
ConfigOption("eta", default=3, type=float, info='eta parameter of Hyperband.'),
ConfigOption("min_workers", default=1, type=int),
ConfigOption("working_dir", default=".", type="directory"),
ConfigOption("network_interface_name", default=self.get_default_network_interface_name(), type=str),
ConfigOption("memory_limit_mb", default=1000000, type=int),
ConfigOption("use_tensorboard_logger", default=False, type=to_bool),
ConfigOption("run_worker_on_master_node", default=True, type=to_bool),
ConfigOption("use_pynisher", default=True, type=to_bool)
]
return options
# OVERRIDE
def get_pipeline_config_conditions(self):
def check_runtime(pipeline_config):
return pipeline_config["budget_type"] != "time" or pipeline_config["max_runtime"] >= pipeline_config["max_budget"]
return [
ConfigCondition.get_larger_equals_condition("max budget must be greater than or equal to min budget", "max_budget", "min_budget"),
ConfigCondition("When time is used as budget, the max_runtime must be larger than the max_budget", check_runtime)
]
def get_default_network_interface_name(self):
"""Get the default network interface name
Returns:
str -- The default network interface name
"""
try:
return netifaces.gateways()['default'][netifaces.AF_INET][1]
except:
return 'lo'
def prepare_environment(self, pipeline_config):
"""Create necessary folders and get network interface name
Arguments:
pipeline_config {dict} -- The configuration of the pipeline
Returns:
tuple -- path to created directories and network interface namei
"""
if not os.path.exists(pipeline_config["working_dir"]) and pipeline_config['task_id'] in [1, -1]:
try:
os.mkdir(pipeline_config["working_dir"])
except:
pass
tmp_models_dir = os.path.join(pipeline_config["working_dir"], "tmp_models_" + str(pipeline_config['run_id']))
ns_credentials_dir = os.path.abspath(os.path.join(pipeline_config["working_dir"], "ns_credentials_" + str(pipeline_config['run_id'])))
network_interface_name = self.get_nic_name(pipeline_config)
if os.path.exists(tmp_models_dir) and pipeline_config['task_id'] in [1, -1]:
shutil.rmtree(tmp_models_dir) # not used right now
if os.path.exists(ns_credentials_dir) and pipeline_config['task_id'] in [1, -1]:
shutil.rmtree(ns_credentials_dir)
return ns_credentials_dir, tmp_models_dir, network_interface_name
def clean_up(self, pipeline_config, tmp_models_dir, ns_credentials_dir):
"""Remove created folders
Arguments:
pipeline_config {dict} -- The pipeline config
tmp_models_dir {[type]} -- The path to the temporary models (not used right now)
ns_credentials_dir {[type]} -- The path to the nameserver credentials
"""
if pipeline_config['task_id'] in [1, -1]:
# Delete temporary files
if os.path.exists(tmp_models_dir):
shutil.rmtree(tmp_models_dir)
if os.path.exists(ns_credentials_dir):
shutil.rmtree(ns_credentials_dir)
def get_nameserver(self, run_id, task_id, ns_credentials_dir, network_interface_name):
"""Get the namesever object
Arguments:
run_id {str} -- The id of the run
task_id {int} -- An id for the worker
ns_credentials_dir {str} -- Path to ns credentials
network_interface_name {str} -- The network interface name
Returns:
NameServer -- The NameServer object
"""
if not os.path.isdir(ns_credentials_dir):
try:
os.mkdir(ns_credentials_dir)
except:
pass
return NameServer(run_id=run_id, nic_name=network_interface_name, working_directory=ns_credentials_dir)
def get_optimization_algorithm_instance(self, config_space, run_id, pipeline_config, ns_host, ns_port, loggers, previous_result=None):
"""Get an instance of the optimization algorithm
Arguments:
config_space {ConfigurationSpace} -- The config space to optimize.
run_id {str} -- An Id for the current run.
pipeline_config {dict} -- The configuration of the pipeline.
ns_host {str} -- Nameserver host.
ns_port {int} -- Nameserver port.
loggers {list} -- Loggers to log the results.
Keyword Arguments:
previous_result {Result} -- A previous result to warmstart the search (default: {None})
Returns:
Master -- An optimization algorithm.
"""
optimization_algorithm = self.algorithms[pipeline_config["algorithm"]]
kwargs = {"configspace": config_space, "run_id": run_id,
"eta": pipeline_config["eta"], "min_budget": pipeline_config["min_budget"], "max_budget": pipeline_config["max_budget"],
"host": ns_host, "nameserver": ns_host, "nameserver_port": ns_port,
"result_logger": combined_logger(*loggers),
"ping_interval": 10**6,
"working_directory": pipeline_config["working_dir"],
"previous_result": previous_result}
hb = optimization_algorithm(**kwargs)
return hb
def parse_results(self, pipeline_config):
"""Parse the results of the optimization run
Arguments:
pipeline_config {dict} -- The configuration of the pipeline.
Raises:
RuntimeError: An Error occurred when parsing the results.
Returns:
dict -- Dictionary summarizing the results
"""
try:
res = logged_results_to_HBS_result(pipeline_config["result_logger_dir"])
id2config = res.get_id2config_mapping()
incumbent_trajectory = res.get_incumbent_trajectory(bigger_is_better=False, non_decreasing_budget=False)
except Exception as e:
raise RuntimeError("Error parsing results. Check results.json and output for more details. An empty results.json is usually caused by a misconfiguration of AutoNet.")
if (len(incumbent_trajectory['config_ids']) == 0):
return dict()
final_config_id = incumbent_trajectory['config_ids'][-1]
final_budget = incumbent_trajectory['budgets'][-1]
best_run = [r for r in res.get_runs_by_id(final_config_id) if r.budget == final_budget][0]
return {'optimized_hyperparameter_config': id2config[final_config_id]['config'],
'budget': final_budget,
'loss': best_run.loss,
'info': best_run.info}
def run_worker(self, pipeline_config, run_id, task_id, ns_credentials_dir, network_interface_name,
X_train, Y_train, X_valid, Y_valid, dataset_info, shutdownables):
""" Run the AutoNetWorker
Arguments:
pipeline_config {dict} -- The configuration of the pipeline
run_id {str} -- An id for the run
task_id {int} -- An id for the worker
ns_credentials_dir {str} -- path to nameserver credentials
network_interface_name {str} -- the name of the network interface
X_train {array} -- The data
Y_train {array} -- The data
X_valid {array} -- The data
Y_valid {array} -- The data
dataset_info | |
batch_shape=(num_batches,))
else:
raise ValueError('Unknown rotation augmentation : ' +
cfg.augment_rotation)
# Scale
# Choose random scales for each example
min_s = cfg.augment_scale_min
max_s = cfg.augment_scale_max
if cfg.augment_scale_anisotropic:
s = tf.random.uniform((num_batches, 3), minval=min_s, maxval=max_s)
else:
s = tf.random.uniform((num_batches, 1), minval=min_s, maxval=max_s)
symmetries = []
for i in range(3):
if cfg.augment_symmetries[i]:
symmetries.append(
tf.round(tf.random.uniform((num_batches, 1))) * 2 - 1)
else:
symmetries.append(tf.ones([num_batches, 1], dtype=tf.float32))
s *= tf.concat(symmetries, 1)
# Create N x 3 vector of scales to multiply with stacked_points
stacked_scales = tf.gather(s, batch_inds)
# Apply scales and Noise
if not is_test:
stacked_points = stacked_points * stacked_scales
noise = tf.random.normal(tf.shape(stacked_points),
stddev=cfg.augment_noise)
stacked_points = stacked_points + noise
return stacked_points, s, R
def segmentation_inputs(self,
stacked_points,
stacked_features,
point_labels,
stacks_lengths,
batch_inds,
object_labels=None):
cfg = self.cfg
# Batch weight at each point for loss (inverse of stacks_lengths for each point)
min_len = tf.reduce_min(stacks_lengths, keepdims=True)
batch_weights = tf.cast(min_len, tf.float32) / tf.cast(
stacks_lengths, tf.float32)
stacked_weights = tf.gather(batch_weights, batch_inds)
# Starting radius of convolutions
r_normal = cfg.first_subsampling_dl * cfg.conv_radius
# Starting layer
layer_blocks = []
# Lists of inputs
input_points = []
input_neighbors = []
input_pools = []
input_upsamples = []
input_batches_len = []
# Loop over the blocks
for block_i, block in enumerate(cfg.architecture):
# Get all blocks of the layer
if not ('pool' in block or 'strided' in block or
'global' in block or 'upsample' in block):
layer_blocks += [block]
continue
# Convolution neighbors indices
deform_layer = False
if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks]):
r = r_normal * cfg.deform_radius / cfg.conv_radius
deform_layer = True
else:
r = r_normal
conv_i = tf_batch_neighbors(stacked_points, stacked_points,
stacks_lengths, stacks_lengths, r)
else:
# This layer only perform pooling, no neighbors required
conv_i = tf.zeros((0, 1), dtype=tf.int32)
# Pooling neighbors indices
# If end of layer is a pooling operation
if 'pool' in block or 'strided' in block:
# New subsampling length
dl = 2 * r_normal / cfg.conv_radius
# Subsampled points
pool_p, pool_b = tf_batch_subsampling(stacked_points,
stacks_lengths, dl)
# Radius of pooled neighbors
if 'deformable' in block:
r = r_normal * cfg.deform_radius / cfg.conv_radius
deform_layer = True
else:
r = r_normal
# Subsample indices
pool_i = tf_batch_neighbors(pool_p, stacked_points, pool_b,
stacks_lengths, r)
# Upsample indices (with the radius of the next layer to keep wanted density)
up_i = tf_batch_neighbors(stacked_points, pool_p,
stacks_lengths, pool_b, 2 * r)
else:
# No pooling in the end of this layer, no pooling indices required
pool_i = tf.zeros((0, 1), dtype=tf.int32)
pool_p = tf.zeros((0, 3), dtype=tf.float32)
pool_b = tf.zeros((0,), dtype=tf.int32)
up_i = tf.zeros((0, 1), dtype=tf.int32)
# Reduce size of neighbors matrices by eliminating furthest point
# TODO :
conv_i = self.big_neighborhood_filter(conv_i, len(input_points))
pool_i = self.big_neighborhood_filter(pool_i, len(input_points))
up_i = self.big_neighborhood_filter(up_i, len(input_points) + 1)
# Updating input lists
input_points += [stacked_points]
input_neighbors += [conv_i]
input_pools += [pool_i]
input_upsamples += [up_i]
input_batches_len += [stacks_lengths]
# New points for next layer
stacked_points = pool_p
stacks_lengths = pool_b
# Update radius and reset blocks
r_normal *= 2
layer_blocks = []
# Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block:
break
# Return inputs
# Batch unstacking (with last layer indices for optionnal classif loss)
stacked_batch_inds_0 = self.stack_batch_inds(input_batches_len[0])
# Batch unstacking (with last layer indices for optionnal classif loss)
stacked_batch_inds_1 = self.stack_batch_inds(input_batches_len[-1])
if object_labels is None:
# list of network inputs
li = input_points + input_neighbors + input_pools + input_upsamples
li += [
stacked_features, stacked_weights, stacked_batch_inds_0,
stacked_batch_inds_1
]
li += [point_labels]
return li
else:
# Object class ind for each point
stacked_object_labels = tf.gather(object_labels, batch_inds)
# list of network inputs
li = input_points + input_neighbors + input_pools + input_upsamples
li += [
stacked_features, stacked_weights, stacked_batch_inds_0,
stacked_batch_inds_1
]
li += [point_labels, stacked_object_labels]
return li
def transform(self,
stacked_points,
stacked_colors,
point_labels,
stacks_lengths,
point_inds,
cloud_inds,
is_test=False):
"""
[None, 3], [None, 3], [None], [None]
"""
cfg = self.cfg
# Get batch indice for each point
batch_inds = self.get_batch_inds(stacks_lengths)
# Augment input points
stacked_points, scales, rots = self.augment_input(
stacked_points, batch_inds, is_test)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1),
dtype=tf.float32)
# Get coordinates and colors
stacked_original_coordinates = stacked_colors[:, :3]
stacked_colors = stacked_colors[:, 3:]
# Augmentation : randomly drop colors
if cfg.in_features_dim in [4, 5]:
num_batches = batch_inds[-1] + 1
s = tf.cast(
tf.less(tf.random.uniform((num_batches,)), cfg.augment_color),
tf.float32)
stacked_s = tf.gather(s, batch_inds)
stacked_colors = stacked_colors * tf.expand_dims(stacked_s, axis=1)
# Then use positions or not
if cfg.in_features_dim == 1:
pass
elif cfg.in_features_dim == 2:
stacked_features = tf.concat(
(stacked_features, stacked_original_coordinates[:, 2:]), axis=1)
elif cfg.in_features_dim == 3:
stacked_features = stacked_colors
elif cfg.in_features_dim == 4:
stacked_features = tf.concat((stacked_features, stacked_colors),
axis=1)
elif cfg.in_features_dim == 5:
stacked_features = tf.concat(
(stacked_features, stacked_original_coordinates[:, 2:],
stacked_colors),
axis=1)
elif cfg.in_features_dim == 7:
stacked_features = tf.concat(
(stacked_features, stacked_original_coordinates,
stacked_colors),
axis=1)
else:
raise ValueError(
'Only accepted input dimensions are 1, 3, 4 and 7 (without and with rgb/xyz)'
)
# Get the whole input list
input_list = self.segmentation_inputs(stacked_points, stacked_features,
point_labels, stacks_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots]
input_list += [point_inds, cloud_inds]
return input_list
def inference_begin(self, data):
attr = {'split': 'test'}
self.inference_data = self.preprocess(data, attr)
num_points = self.inference_data['search_tree'].data.shape[0]
self.possibility = np.random.rand(num_points) * 1e-3
self.test_probs = np.zeros(shape=[num_points, self.cfg.num_classes],
dtype=np.float16)
self.pbar = tqdm(total=self.possibility.shape[0])
self.pbar_update = 0
def inference_preprocess(self):
flat_inputs, point_inds, stacks_lengths = self.transform_inference(
self.inference_data)
self.test_meta = {}
self.test_meta['inds'] = point_inds
self.test_meta['lens'] = stacks_lengths
return flat_inputs
def inference_end(self, results):
results = tf.reshape(results, (-1, self.cfg.num_classes))
results = tf.nn.softmax(results, axis=-1)
results = results.cpu().numpy()
test_smooth = 0.98
inds = self.test_meta['inds']
l = 0
r = 0
for len in self.test_meta['lens']:
r += len
self.test_probs[inds[l:r]] = self.test_probs[
inds[l:r]] * test_smooth + (1 - test_smooth) * results[l:r]
l += len
self.pbar.update(self.possibility[self.possibility > 0.5].shape[0] -
self.pbar_update)
self.pbar_update = self.possibility[self.possibility > 0.5].shape[0]
if np.min(self.possibility) > 0.5:
self.pbar.close()
reproj_inds = self.inference_data['proj_inds']
predict_scores = self.test_probs[reproj_inds]
inference_result = {
'predict_labels': np.argmax(predict_scores, 1),
'predict_scores': predict_scores
}
self.inference_result = inference_result
return True
else:
return False
def transform_inference(self, data):
cfg = self.cfg
p_list = []
c_list = []
pl_list = []
pi_list = []
ci_list = []
n_points = 0
points = np.array(data['search_tree'].data)
while (n_points < cfg.batch_limit):
cloud_ind = 0
point_ind = int(np.argmin(self.possibility))
center_point = points[point_ind, :].reshape(1, -1)
pick_point = center_point.copy()
input_inds = data['search_tree'].query_radius(pick_point,
r=cfg.in_radius)[0]
n = input_inds.shape[0]
n_points += n
dists = np.sum(np.square(
(points[input_inds] - pick_point).astype(np.float32)),
axis=1)
tuckeys = np.square(1 - dists / np.square(cfg.in_radius))
tuckeys[dists > np.square(cfg.in_radius)] = 0
self.possibility[input_inds] += tuckeys
input_points = points[input_inds].copy() - pick_point
feat = data['feat']
t_normalize = self.cfg.get('t_normalize', {})
input_points, feat = trans_normalize(input_points, feat,
t_normalize)
if feat is None:
coords = input_points
else:
coords = np.hstack((input_points, feat[input_inds]))
coords[:, 2] += pick_point[:, 2]
if len(data['label'][input_inds].shape) == 2:
input_labels = data['label'][input_inds][:, 0]
else:
input_labels = data['label'][input_inds]
if n > 0:
p_list += [input_points]
c_list += [coords]
pl_list += [input_labels]
pi_list += [input_inds]
ci_list += [cloud_ind]
stacked_points = np.concatenate(p_list, axis=0),
stacked_colors = np.concatenate(c_list, axis=0),
point_labels = np.concatenate(pl_list, axis=0),
stacks_lengths = np.array([tp.shape[0] for tp in p_list],
dtype=np.int32),
point_inds = np.concatenate(pi_list, axis=0),
cloud_inds = np.array(ci_list, dtype=np.int32)
input_list = self.transform(
tf.convert_to_tensor(np.array(stacked_points[0], dtype=np.float32)),
tf.convert_to_tensor(np.array(stacked_colors[0], dtype=np.float32)),
tf.convert_to_tensor(np.array(point_labels[0], dtype=np.int32)),
tf.convert_to_tensor(np.array(stacks_lengths[0], dtype=np.int32)),
tf.convert_to_tensor(np.array(point_inds[0], dtype=np.int32)),
tf.convert_to_tensor(np.array(cloud_inds, dtype=np.int32)),
is_test=True)
return input_list, np.array(point_inds[0]), np.array(stacks_lengths[0])
def preprocess(self, data, attr):
cfg = self.cfg
points = data['point'][:, 0:3]
if 'label' not in data.keys() or data['label'] is None:
labels = np.zeros((points.shape[0],), dtype=np.int32)
else:
labels = np.array(data['label'], dtype=np.int32).reshape((-1,))
split = attr['split']
if 'feat' not in data.keys() or data['feat'] is None:
feat = None
else:
feat = np.array(data['feat'], dtype=np.float32)
data = dict()
if (feat is None):
sub_points, sub_labels = DataProcessing.grid_subsampling(
points, labels=labels, grid_size=cfg.first_subsampling_dl)
sub_feat = None
else:
sub_points, sub_feat, sub_labels = DataProcessing.grid_subsampling(
points,
features=feat,
labels=labels,
grid_size=cfg.first_subsampling_dl)
search_tree = KDTree(sub_points)
data['point'] = sub_points
data['feat'] = sub_feat
data['label'] = sub_labels
data['search_tree'] = search_tree
if split in ["test", "testing"]:
proj_inds = np.squeeze(
search_tree.query(points, return_distance=False))
proj_inds = proj_inds.astype(np.int32)
data['proj_inds'] = proj_inds
return data
def get_batch_gen(self, dataset, steps_per_epoch=None, batch_size=1):
cfg = self.cfg
if dataset.read_data(0)[0]['feat'] is None:
dim_features = 3
else:
dim_features | |
<filename>img_utils.py
#-------------------------------------------------------------------------------
# Name: Image functions
# Purpose: Car damage analysis project
#
# Author: kol
#
# Created: 17.01.2020
# Copyright: (c) kol 2020
# Licence: MIT
#-------------------------------------------------------------------------------
import cv2
import numpy as np
from pathlib import Path
from imutils.perspective import order_points
from skimage import measure
from skimage import draw
from copy import deepcopy
from scipy.ndimage.filters import gaussian_filter
from matplotlib import colors as plt_colors
from scipy.spatial import distance
from random import randint
from gr.utils import get_image_area
def random_colors(n):
"""Returns n random colors"""
rr = []
for i in range(n):
r = randint(0,255)
g = randint(0,255)
b = randint(0,255)
rr.extend([(r,g,b)])
return rr
def gradient_colors(colors, n):
"""Returns color gradient of length n from colors[0] to colors[1]"""
if len(colors) < 2:
raise ValueError("Two colors required to compute gradient")
if n < 2:
raise ValueError("Gradient length must be greater than 1")
c = np.linspace(0, 1, n)[:, None, None]
x = np.array([colors[0]])
y = np.array([colors[1]])
g = y + (x - y) * c
return g.astype(x.dtype)
def color_to_cv_color(name):
"""Convert color with given name to OpenCV color"""
mp_rgb = plt_colors.to_rgb(name)
cv_bgr = [c * 255 for c in reversed(mp_rgb)]
return cv_bgr
def rgba_to_rgb(rgba):
"""Convert RGBA color to RGB color and mask"""
assert rgba.shape[-1] == 4
rgb = np.empty((rgba.shape[0], rgba.shape[1], 3), dtype=rgba.dtype)
r, g, b, m = rgba[:,:,0], rgba[:,:,1], rgba[:,:,2], rgba[:,:,3]
rgb[:,:,0] = r
rgb[:,:,1] = g
rgb[:,:,2] = b
return [rgb, m]
def rgb_to_rgba(rgb, fill=1):
"""Convert RGB color to RGBA color"""
assert rgb.shape[-1] == 3
rgba = np.full((rgb.shape[0], rgb.shape[1], 4), fill, dtype=rgb.dtype)
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
rgba[:,:,0] = r
rgba[:,:,1] = g
rgba[:,:,2] = b
return rgba
def ensure_numeric_color(color, gradients=None, max_colors=None):
"""Ensures color is numeric and, if a vector requested, have specified length.
The function performs the following conversions:
if 'random' color name is specified, generates requred number of random colors,
if 'gradient' color name is speciefied, computes a gradient vector,
translates color from textual name ('red') to RGB value,
if a list is provided, performs checks on its elements and
guarantees it has required number of elements.
Parameters:
color Either a color name, RGB value or special 'random' and 'gradient' strings
Could also be a list of names or RGB values
gradients Array-like of 2 colors to generate a gradient from.
Requred if `color` == `gradient`
max_color Requested number of colors (see return value).
Required if `color` == `gradient` or `random`.
Returns:
If `color` is a name or RGB value, result will be a vector of
this color with `max_color` length or single value if `max_color` is None
If `color` is `random`, result will be a list of colors with `max_color` length
If `color` is `gradient`, result will be a list of colors for computed gradient
If `color` is list, result will be a list with `max_colors` or original length
with all elements converted as they are single values
"""
ret_color = None
if color is None:
raise ValueError("Color not specified")
if type(color) == list:
# List of colors
# Assume all items are of the same type
# Make up a list of required length
if len(color) == 0:
raise ValueError("Empty color list")
if max_colors is None:
ret_color = color
else:
ret_color = color * int(np.ceil(max_colors / len(color)))
ret_color = ret_color[0:max_colors]
elif color == 'random':
# Random color
ret_color = random_colors(max_colors if max_colors is not None else 1)
if max_colors is None:
ret_color = ret_color[0]
elif color == "gradient":
# Gradient color
if gradients is None or max_colors is None:
raise ValueError("Cannot determine gradient of a single color")
else:
if len(gradients) < 2:
raise ValueError("Two colors required to compute gradient")
gc = (ensure_numeric_color(gradients[0]), ensure_numeric_color(gradients[1]))
ret_color = gradient_colors(gc, max_colors)
elif type(color) == str:
# Named color
ret_color = color_to_cv_color(color)
if max_colors is not None:
ret_color = [ret_color]
else:
# Should be a color
ret_color = color
if max_colors is not None:
ret_color = [ret_color]
return ret_color
# Modified version of imutils.four_point_transform() function
# author: <NAME>
# website: http://www.pyimagesearch.com
def four_point_transform(image, pts, inverse=False):
"""Perform 4-point transformation or reverses it"""
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
d = np.min(rect)
if d >= 0:
(tl, tr, br, bl) = rect
else:
# Correct all rectangle points to be greater than or equal to 0
corrected_rect = deepcopy(rect)
d = abs(d)
for r in corrected_rect:
r[0] += d
r[1] += d
(tl, tr, br, bl) = corrected_rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
if inverse:
M = np.linalg.pinv(M)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def clahe(img):
"""Apply CLAHE filter for luminocity equalization"""
# Convert to LAB color space
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# Split channels
l, a, b = cv2.split(lab)
# Apply CLAHE to l_channel
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
# Merge back and convert to RGB color space
merged = cv2.merge((cl,a,b))
final = cv2.cvtColor(merged, cv2.COLOR_LAB2BGR)
return final
def gauss_filter(img):
"""Apply Gaussian filter"""
s = 2
w = 5
t = (((w - 1)/2)-0.5)/s
return gaussian_filter(img, sigma=s, truncate=t)
# Align two images
# Taken from https://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/
# Author <NAME>
def align_images(im1, im2, warp_mode=cv2.MOTION_TRANSLATION, debug=False):
"""Algin two images.
warp_mode is either cv2.MOTION_TRANSLATION for affine transformation or
cv2.MOTION_HOMOGRAPHY for perspective one.
Note that under OpenCV < 3.4.0 if images cannot be aligned, the function fails
crashing the calling program (unless it uses global exception hook)"""
# Convert images to grayscale
im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
if debug:
cv2.imshow("Image 1 gray", im1_gray)
cv2.imshow("Image 2 gray", im2_gray)
# Find size of image1
sz = im1.shape
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
if warp_mode == cv2.MOTION_HOMOGRAPHY:
warp_matrix = np.eye(3, 3, dtype=np.float32)
else :
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Specify the number of iterations.
number_of_iterations = 1000;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-10;
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix,
motionType=warp_mode,
criteria=criteria)
try:
if warp_mode == cv2.MOTION_HOMOGRAPHY :
# Use warpPerspective for Homography
im2_aligned = cv2.warpPerspective(im2, warp_matrix, (sz[1],sz[0]),
flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
else :
# Use warpAffine for Translation, Euclidean and Affine
im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]),
flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
if debug:
cv2.imshow('Aligned image', im2_aligned)
return im2_aligned
except:
# Alignment unsuccessfull
return im2
def get_diff(im1, im2, align=False, debug=False):
"""Get difference of two images"""
# Algin images
if align:
im2 = align_images(im1, im2, cv2.MOTION_HOMOGRAPHY, debug)
# Make up grays
gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Calculate scrore and difference
(score, diff) = measure.compare_ssim(gray1, gray2, full=True, gaussian_weights=True)
return score, diff
def draw_contour(contour_img,
contour,
color,
gradient_colors=("red", "blue"),
min_size=10,
filled=True,
compute_mask=False):
""" Draw a contour as optionally fill it with color.
Parameters:
contour_img An image to draw on
contour A list of contour points as returned by cv2.findContour() or
measure.find_contours()
color A color to draw contour | |
'can_be_none'],
"['fortnite']['exec']['friend_remove']": [list, str, 'can_be_none'],
"['fortnite']['exec']['party_member_join']": [list, str, 'can_be_none'],
"['fortnite']['exec']['party_member_leave']": [list, str, 'can_be_none'],
"['fortnite']['exec']['party_member_confirm']": [list, str, 'can_be_none'],
"['fortnite']['exec']['party_member_kick']": [list, str, 'can_be_none'],
"['fortnite']['exec']['party_member_promote']": [list, str, 'can_be_none'],
"['fortnite']['exec']['party_update']": [list, str, 'can_be_none'],
"['fortnite']['exec']['party_member_update']": [list, str, 'can_be_none'],
"['fortnite']['exec']['party_member_disconnect']": [list, str, 'can_be_none'],
"['discord']": [dict],
"['discord']['enabled']": [bool, 'select_bool'],
"['discord']['token']": [str],
"['discord']['owner']": [list, str, 'can_be_none'],
"['discord']['channels']": [list, str],
"['discord']['status']": [str],
"['discord']['status_type']": [str, 'select_status'],
"['discord']['chat_max']": [int, 'can_be_none'],
"['discord']['chat_max_for']": [list, str, 'multiple_select_user_type', 'can_be_none'],
"['discord']['command_enable_for']": [list, str, 'multiple_select_user_type', 'can_be_none'],
"['discord']['blacklist']": [list, str],
"['discord']['whitelist']": [list, str],
"['discord']['prefix']": [list, str, 'can_be_none'],
"['discord']['exec']": [dict],
"['discord']['exec']['ready']": [list, str, 'can_be_none'],
"['ng_words']": [list, dict, 'ng_words_config'],
"['ng_word_for']": [list, str, 'multiple_select_user_type', 'can_be_none'],
"['ng_word_operation']": [list, str, 'multiple_select_user_operation', 'can_be_none'],
"['ng_word_reply']": [str, 'can_be_none'],
"['relogin_in']": [int, 'can_be_none'],
"['search_max']": [int, 'can_be_none'],
"['no_logs']": [bool, 'select_bool'],
"['loglevel']": [str, 'select_loglevel'],
"['discord_log']": [str, 'can_be_none'],
"['omit_over2000']": [bool, 'select_bool'],
"['skip_if_overflow']": [bool, 'select_bool'],
"['case_insensitive']": [bool, 'select_bool'],
"['convert_kanji']": [bool, 'select_bool']
}
self.ng_names_config_tags = {
"['matchmethod']": [str, 'select_matchmethod'],
"['word']": [str]
}
self.ng_words_config_tags = {
"['count']": [int],
"['matchmethod']": [str, 'select_matchmethod'],
"['words']": [list, str]
}
self.commands = None
tags = [list, str, 'can_be_multiple']
tags2 = [list, str, 'can_be_multiple', 'lambda x: len(x) > 0']
self.commands_tags = {
**{
"['whitelist_commands']": tags,
"['user_commands']": tags,
"['prefix_to_item_search']": [bool, 'select_bool'],
"['command']": tags2,
"['ng_word']": tags2,
"['most']": tags2,
"['user']": tags2,
"['whitelist']": tags2,
"['blacklist']": tags2,
"['owner']": tags2,
"['bot']": tags2,
"['null']": tags2,
"['operation_kick']": tags2,
"['operation_chatban']": tags2,
"['operation_remove']": tags2,
"['operation_block']": tags2,
"['operation_blacklist']": tags2,
"['add']": tags2,
"['remove']": tags2,
"['true']": tags2,
"['false']": tags2,
"['accept']": tags2,
"['decline']": tags2,
"['me']": tags2,
"['public']": tags2,
"['friends_allow_friends_of_friends']": tags2,
"['friends']": tags2,
"['private_allow_friends_of_friends']": tags2,
"['private']": tags2,
"['outfit']": tags2,
"['backpack']": tags2,
"['pickaxe']": tags2,
"['save']": tags2,
"['load']": tags2,
"['commands']": [dict]
},
**{
f"['commands']['{command}']": tags
for command in self.all_commands.keys()
}
}
self.custom_commands = None
self.custom_commands_tags = {
"['run_when']": [str, 'select_run_when'],
"['commands']": [list, dict, 'custom_commands_config']
}
self.custom_commands_config_tags = {
"['word']": [str],
"['allow_for']": [list, str, 'multiple_select_user_type', 'can_be_none'],
"['run']": [list, str, 'can_be_multiple']
}
self.replies = None
self.replies_tags = {
"['run_when']": [str, 'select_run_when'],
"['prefix_to_replies']": [bool, 'select_bool'],
"['replies']": [list, dict, 'replies_config']
}
self.replies_config_tags = {
"['matchmethod']": [str, 'select_matchmethod'],
"['word']": [str],
"['reply']": [list, str, 'can_be_multiple', 'accept_empty'],
"['ct']": [int, 'can_be_none']
}
self.cosmetic_presets = None
self.config_item_pattern = re.compile(
r"<Item name='(?P<name>.+)' "
r"id='(?P<id>.+)' "
r"path='(?P<path>.+)'>"
)
self.config_playlist_pattern = re.compile(
r"<Playlist name='(?P<name>.+)' "
r"id='(?P<id>.+)'>"
)
self.config_variant_pattern = re.compile(
r"<Variant name='(?P<name>.+)' "
r"channel='(?P<channel>.+)' "
r"tag='(?P<tag>.+)'>"
)
self.http = HTTPClient(aiohttp.ClientSession())
self.webhook = None
self.discord_client = None
@property
def loaded_clients(self) -> List[Client]:
return [client for client in self.clients if client.is_ready()]
@property
def loaded_client_ids(self) -> List[Client]:
return [client.user.id for client in self.loaded_clients]
def add_command(self, command: Command) -> None:
if not isinstance(command, Command):
raise TypeError(f'command argument must be instance of {Command.__name__}')
if command.name in self.all_commands:
raise ValueError(f"Command '{command.name}' is already registered")
self.all_commands[command.name] = command
for client in self.clients:
client.add_command(command)
def is_error(self) -> bool:
return (
self.error_config
or self.error_commands
or self.error_custom_commands
or self.error_replies
)
def get_device_auth_details(self) -> dict:
if self.isfile('device_auths'):
return self.load_json('device_auths')
else:
return {}
def store_device_auth_details(self, email: str, details: dict) -> None:
existing = self.get_device_auth_details()
existing[email.lower()] = details
self.save_json('device_auths', existing)
def get_refresh_tokens(self) -> dict:
if self.isfile('refresh_tokens'):
return self.load_json('refresh_tokens')
else:
return {}
def store_refresh_token(self, email: str, refresh_token: str) -> None:
existing = self.get_refresh_tokens()
existing[email.lower()] = refresh_token
self.save_json('refresh_tokens', existing)
def get_cosmetic_presets(self) -> dict:
if self.isfile('cosmetic_presets'):
return self.load_json('cosmetic_presets')
else:
return {}
async def store_cosmetic_presets(self, account_id: str, details: dict) -> None:
existing = self.get_cosmetic_presets()
existing[account_id] = details
self.save_json('cosmetic_presets', existing)
def get_command_stats(self) -> dict:
if self.isfile('command_stats'):
return self.load_json('command_stats')
else:
return {}
def store_command_stats(self) -> None:
self.save_json('command_stats', self.command_stats)
def convert_td(self, td: datetime.timedelta) -> Tuple[int, int, int, int]:
m, s = divmod(td.seconds, 60)
h, m = divmod(m, 60)
return td.days, h, m, s
def isfile(self, key: str, force_file: Optional[bool] = False) -> bool:
if self.mode == 'repl' and not force_file:
if db.get(key) is None:
return False
else:
if not os.path.isfile(f'{key}.json'):
return False
return True
def remove(self, key: str, force_file: Optional[bool] = False) -> None:
if self.mode == 'repl' and not force_file:
try:
del db[key]
except KeyError as e:
raise FileNotFoundError from e
else:
os.remove(f'{key}.json')
async def aremove(self, key: str, force_file: Optional[bool] = False) -> None:
if self.mode == 'repl' and not force_file:
try:
del db[key]
except KeyError as e:
raise FileNotFoundError from e
else:
await aremove(f'{key}.json')
def rename(self, key_src: str, key_dst: str, force_file: Optional[bool] = False) -> None:
if self.mode == 'repl' and not force_file:
try:
db[key_dst] = db[key_src]
del db[key_src]
except KeyError as e:
raise FileNotFoundError from e
else:
os.rename(f'{key_src}.json', f'{key_dst}.json')
def load_json(self, key: str, force_file: Optional[bool] = False) -> Union[dict, list]:
if self.mode == 'repl' and not force_file:
data = db[key]['value']
if isinstance(data, str):
return json.loads(db[key]['value'])
return data
else:
try:
with open(f'{key}.json', encoding='utf-8') as f:
data = f.read()
except UnicodeDecodeError:
try:
with open(f'{key}.json', encoding='utf-8-sig') as f:
data = f.read()
except UnicodeDecodeError:
with open(f'{key}.json', encoding='shift_jis') as f:
data = f.read()
return json.loads(data)
async def aload_json(self, key: str, force_file: Optional[bool] = False) -> Union[dict, list]:
if self.mode == 'repl' and not force_file:
data = db[key]['value']
if isinstance(data, str):
return json.loads(db[key]['value'])
return data
else:
try:
async with aopen(f'{key}.json', encoding='utf-8') as f:
data = await f.read()
except UnicodeDecodeError:
try:
async with aopen(f'{key}.json', encoding='utf-8-sig') as f:
data = await f.read()
except UnicodeDecodeError:
async with aopen(f'{key}.json', encoding='shift_jis') as f:
data = await f.read()
return json.loads(data)
def save_json(self, key: str, value: Union[dict, list],
force_file: Optional[bool] = False,
compact: Optional[bool] = False) -> None:
if self.mode == 'repl' and not force_file:
db[key] = {
'last_edited': self.utcnow(),
'value': json.dumps(
value,
ensure_ascii=False,
cls=MyJSONEncoder
)
}
else:
with open(f'{key}.json', 'w', encoding='utf-8') as f:
if compact:
json.dump(
value,
f,
ensure_ascii=False,
cls=MyJSONEncoder
)
else:
json.dump(
value,
f,
indent=4,
ensure_ascii=False,
cls=MyJSONEncoder
)
def dumps(self, data: Union[dict, list]) -> str:
return json.dumps(
data,
ensure_ascii=False,
cls=MyJSONEncoder
)
def get_last_edited(self, key: str, force_file: Optional[bool] = False) -> datetime.datetime:
if self.mode == 'repl' and not force_file:
return datetime.datetime.fromisoformat(db[key]['last_edited'])
else:
stat = os.stat(f'{key}.json')
return datetime.datetime.fromtimestamp(stat.st_mtime)
def is_not_edited_for(self, key: str, td: datetime.timedelta, force_file: Optional[bool] = False) -> bool:
last_edited = self.get_last_edited(key, force_file=force_file)
if last_edited < (datetime.datetime.utcnow() - td):
return True
return False
def l(self, key: str, *args: tuple, default: Optional[str] = '', **kwargs: dict) -> LocalizedText:
return LocalizedText(self, ['main', key], default, *args, **kwargs)
def send(self, content: Any,
user_name: Optional[str] = None,
color: Optional[Callable] = None,
add_p: Optional[Union[Callable, List[Callable]]] = None,
add_d: Optional[Union[Callable, List[Callable]]] = None,
file: Optional[io.IOBase] = None) -> Optional[str]:
file = file or sys.stdout
content = str(content)
color = color or (lambda x: x)
add_p = (add_p if isinstance(add_p, list) else [add_p or (lambda x: x)])
add_d = (add_d if isinstance(add_d, list) else [add_d or (lambda x: x)])
if file == sys.stderr:
add_d.append(self.discord_error)
if not self.config['no_logs'] if self.config else True:
text = content
for func in add_p:
text = func(text)
print(color(text), file=file)
if self.webhook:
content = discord.utils.escape_markdown(content)
name = user_name or 'Fortnite-LobbyBot'
text = content
for func in add_d:
text = func(text)
self.webhook.send(text, name)
def time(self, text: str) -> str:
return f'[{self.now()}] {text}'
def discord_error(self, text: str) -> str:
texts = []
for line in text.split('\n'):
texts.append(f'> {line}')
return '\n'.join(texts)
def debug_message(self, text: str) -> str:
return f'```\n{text}\n```'
def format_exception(self, exc: Optional[Exception] = None) -> str:
if exc is not None:
return ''.join(list(traceback.TracebackException.from_exception(exc).format()))
return traceback.format_exc()
def print_exception(self, exc: Optional[Exception] = None) -> None:
if exc is not None:
self.send(
''.join(['Ignoring exception\n']
+ list(traceback.TracebackException.from_exception(exc).format())),
file=sys.stderr
)
else:
self.send(
traceback.format_exc(),
file=sys.stderr
)
def debug_print_exception(self, exc: Optional[Exception] = None) -> None:
if self.config is not None and self.config['loglevel'] == 'debug':
self.print_exception(exc)
def now(self) -> str:
return datetime.datetime.now().strftime('%H:%M:%S')
def utcnow(self) -> str:
return datetime.datetime.utcnow().strftime('%H:%M:%S')
def strftime(self, dt: datetime.datetime) -> str:
dt = dt.astimezone(get_localzone())
if dt.hour >= 12 and self.config['lang'] == 'en':
dt -= datetime.timedelta(hours=12)
return f"{dt.strftime('%H:%M PM')}"
return f"{dt.strftime('%H:%M')}"
def str_to_bool(self, text: str) -> bool:
if text.lower() == 'true':
return True
elif text.lower() == 'false':
return False
raise ValueError(f"{text!r} does not match to any of True, False")
def get_list_index(self, data: list, index: int, default: Optional[Any] = None) -> Any:
return data[index] if data[index:index + 1] else default
def eval_format(self, text: str, variables: dict) -> str:
return self.formatter.format(text, **variables)
def eval_dict(self, data: dict, keys: list) -> str:
text = ''
for key in keys:
text += f"[{repr(key)}]"
return text
def | |
# Add the list of possible values to the field som_nature
map_predefined_vals_to_fld(self.l_vertex, "som_typologie_nature", self.typo_nature_som)
# Add the list of possible values to the field som_precision_rattachement
map_predefined_vals_to_fld(self.l_vertex, "som_precision_rattachement", self.precision_class, 0, 1)
# Add the list of possible values to the field som_precision_rattachement
map_predefined_vals_to_fld(self.l_vertex, "som_representation_plane", self.ellips_acronym, 0, 2)
# Add the list of possible values to the field lim_typologie_nature
map_predefined_vals_to_fld(self.l_edge, "lim_typologie_nature", self.typo_nature_lim)
# Add the list of possible values to the field som_delimitation_publique
false_true_lst = [('False', 'Faux'), ('True', 'Vrai')]
map_predefined_vals_to_fld(self.l_vertex, "som_delimitation_publique", false_true_lst, 0, 1)
# Add the list of possible values to the field lim_delimitation_publique
map_predefined_vals_to_fld(self.l_edge, "lim_delimitation_publique", false_true_lst, 0, 1)
# >>
# Then, start editing mode..
for idx, layer in enumerate(self.layers):
if not layer.isEditable():
layer.startEditing()
if idx == 0:
self.iface.setActiveLayer(layer)
self.projComboBox.setDisabled(False)
self.permalinkCmb.setDisabled(True)
self.downloadPushButton.setDisabled(True)
self.resetPushButton.setDisabled(False)
self.uploadPushButton.setDisabled(False)
self.downloadPushButton.clicked.disconnect(self.on_downloaded)
# self.permalinkLineEdit.returnPressed.disconnect(self.on_downloaded)
self.resetPushButton.clicked.connect(self.on_reset)
self.uploadPushButton.clicked.connect(self.on_uploaded)
# Activate the scale limitation for the canvas
self.canvas.scaleChanged.connect(self.limit_cvs_scale)
self.downloaded.emit()
return True
def reset(self):
"""Remove RFU layers."""
# Save (virtually) the changes in the layers
# (to avoid alert messages when removing the layers)
for layer in self.layers :
if isinstance(layer, QgsVectorLayer):
if layer.isEditable():
self.iface.setActiveLayer(layer)
layer.commitChanges()
# Remove RFU layers
try:
self.project.removeMapLayers([
self.l_vertex.id(), self.l_edge.id(), self.l_bbox.id()])
except:
return
# Remove eliminated lines layer
if self.project.mapLayersByName(elimedge_lname):
el_lyr = self.project.mapLayersByName(elimedge_lname)[0]
self.iface.setActiveLayer(el_lyr)
el_lyr.commitChanges()
self.project.removeMapLayers([el_lyr.id()])
# Reset variable
self.precision_class = []
self.ellips_acronym = []
self.dflt_ellips_acronym = None
self.nature = []
self.typo_nature_lim = []
self.typo_nature_som = []
self.auth_creator = []
self.l_vertex = None
self.l_edge = None
self.layers = [self.l_vertex, self.l_edge]
self.edges_added = {}
self.vertices_added = {}
self.edges_removed = {}
self.vertices_removed = {}
self.edges_modified = {}
self.vertices_modified = {}
self.tol_same_pt = 0.0
# Reset ComboBox which contains projections authorized
self.projComboBox.clear()
self.projComboBox.setDisabled(True)
# Loads permalinks into the permalink combox
self.load_permalinks()
self.permalinkCmb.setDisabled(False)
# self.permalinkLineEdit.returnPressed.connect(self.on_downloaded)
self.downloadPushButton.setDisabled(False)
self.downloadPushButton.clicked.connect(self.on_downloaded)
self.resetPushButton.setDisabled(True)
self.resetPushButton.clicked.disconnect(self.on_reset)
self.uploadPushButton.setDisabled(True)
self.uploadPushButton.clicked.disconnect(self.on_uploaded)
return True
def upload(self, enr_api_dossier=None, commentaire=None):
"""Upload data to Géofoncier REST API.
On success returns the log messages (Array).
"""
# Set XML document
root = EltTree.Element(r"rfu")
first_vtx_kept = True
first_edge_kept = True
# Add to our XML document datasets which have been changed
if self.vertices_added:
for fid in self.vertices_added:
# Check if vertex is out of the bbox
to_export = check_vtx_outofbbox(self.vertices_added_ft[fid], self.ft_bbox)
if to_export:
tools.xml_subelt_creator(root, "sommet",
data=self.vertices_added[fid],
action=r"create")
# If vertex is out of the bbox
else:
# Create a new layer to store the vertices non exported
if first_vtx_kept:
if layer_exists(vtx_outofbbox_lname, self.project):
vtx_outofbbox_lyr = self.project.mapLayersByName(vtx_outofbbox_lname)[0]
else:
vtx_outofbbox_lyr = create_vtx_outofbbox_lyr()
# Add the vertex to this layer
if not vtx_outofbbox_lyr.isEditable():
vtx_outofbbox_lyr.startEditing()
vtx_outofbbox_lyr.addFeature(self.vertices_added_ft[fid])
first_vtx_kept = False
if self.edges_added:
for fid in self.edges_added:
# Check if edge is out of the bbox
to_export = check_edge_outofbbox(self.edges_added_ft[fid], self.ft_bbox)
if to_export:
tools.xml_subelt_creator(root, "limite",
data=self.edges_added[fid],
action=r"create")
# If edge is out of the bbox
else:
# Create a new layer to store the edges non exported
if first_edge_kept:
if layer_exists(edge_outofbbox_lname, self.project):
edge_outofbbox_lyr = self.project.mapLayersByName(edge_outofbbox_lname)[0]
else:
edge_outofbbox_lyr = create_edge_outofbbox_lyr()
# Add the edge to this layer
if not edge_outofbbox_lyr.isEditable():
edge_outofbbox_lyr.startEditing()
edge_outofbbox_lyr.addFeature(self.edges_added_ft[fid])
first_edge_kept = False
if self.vertices_removed:
for fid in self.vertices_removed:
tools.xml_subelt_creator(root, "sommet",
data=self.vertices_removed[fid],
action=r"delete")
if self.edges_removed:
for fid in self.edges_removed:
tools.xml_subelt_creator(root, "limite",
data=self.edges_removed[fid],
action=r"delete")
if self.vertices_modified:
for fid in self.vertices_modified:
tools.xml_subelt_creator(root, "sommet",
data=self.vertices_modified[fid],
action=r"update")
if self.edges_modified:
for fid in self.edges_modified:
tools.xml_subelt_creator(root, "limite",
data=self.edges_modified[fid],
action=r"update")
# Create a new changeset Id
changeset_id = self.create_changeset(enr_api_dossier=enr_api_dossier, commentaire=commentaire)
# Add changeset value in our XML document
root.attrib[r"changeset"] = changeset_id
# Send data
edit = self.conn.edit(self.zone, EltTree.tostring(root))
if edit.code != 200:
edit_read = edit.read()
# DEBUG
# urlresp_to_file(edit_read)
err_tree = EltTree.fromstring(edit_read)
msgs_log = []
for log in err_tree.iter(r"log"):
msgs_log.append("%s: %s" % (log.attrib["type"], log.text))
raise Exception(msgs_log)
tree = EltTree.fromstring(edit.read())
err = tree.find(r"./erreur")
if err:
debug_msg('DEBUG', "erreur: %s" , (str(err)))
err_tree = EltTree.fromstring(err)
msgs_log = []
for log in err_tree.iter(r"log"):
msgs_log.append("%s: %s" % (log.attrib["type"], log.text))
raise Exception(msgs_log)
# Returns log info
msgs_log = []
for log in tree.iter(r"log"):
msgs_log.append("%s: %s" % (log.attrib["type"], log.text))
# Close the changeset
self.destroy_changeset(changeset_id)
# Reset all
self.edges_added = {}
self.edges_added_ft = {}
self.vertices_added = {}
self.vertices_added_ft = {}
self.edges_removed = {}
self.vertices_removed = {}
self.edges_modified = {}
self.vertices_modified = {}
# Alert message if elements out of bbox
msg_outbbox = ""
if not first_vtx_kept:
msg_outbbox = msg_outbbox_vtx.format(vtx_outofbbox_lname)
if not first_edge_kept:
if msg_outbbox != "":
msg_outbbox += "<br>"
msg_outbbox += msg_outbbox_edge.format(edge_outofbbox_lname)
if msg_outbbox != "":
self.canvas.refresh()
m_box = mbox_w_params(tl_atn, txt_msg_outbbox, msg_outbbox)
m_box.exec_()
return msgs_log
def create_changeset(self, enr_api_dossier=None, commentaire=None):
"""Open a new changeset from Géofoncier API.
On success, returns the new changeset id.
"""
opencs = self.conn.open_changeset(self.zone, enr_api_dossier=enr_api_dossier, commentaire=commentaire)
if opencs.code != 200:
raise Exception(opencs.read())
tree = EltTree.fromstring(opencs.read())
err = tree.find(r"./log")
if err:
raise Exception(err.text)
# treeterator = list(tree.getiterator(tag=r"changeset"))
# Python 3.9 -> getiterator deprecated
treeterator = list(tree.iter(tag=r"changeset"))
# We should get only one changeset
if len(treeterator) != 1:
raise Exception("Le nombre de \'changeset\' est incohérent.\n"
"Merci de contacter l'administrateur Géofoncier.")
return treeterator[0].attrib[r"id"]
def destroy_changeset(self, id):
"""Close a changeset."""
closecs = self.conn.close_changeset(self.zone, id)
if closecs.code != 200:
raise Exception(closecs.read())
tree = EltTree.fromstring(closecs.read())
err = tree.find(r"./log")
if err:
raise Exception(err.text)
return True
def abort_action(self, msg=None):
for layer in self.layers:
if layer and not layer.isEditable():
layer.startEditing()
# Clear message bar
self.iface.messageBar().clearWidgets()
if msg:
return QMessageBox.warning(self, r"Attention", msg)
return
def extract_layers(self, tree):
"""Return a list of RFU layers."""
# Create vector layers..
l_vertex = QgsVectorLayer(r"Point?crs=epsg:4326&index=yes",
"Sommet RFU", r"memory")
l_edge = QgsVectorLayer(r"LineString?crs=epsg:4326&index=yes",
"Limite RFU", r"memory")
p_vertex = l_vertex.dataProvider()
p_edge = l_edge.dataProvider()
# Define default style renderer..
renderer_vertex = QgsRuleBasedRenderer(QgsMarkerSymbol())
vertex_root_rule = renderer_vertex.rootRule()
# Modified in v2.1 (som_nature replaced by som_typologie_nature) >>
vertex_rules = (
(
("Borne, borne à puce, pierre, piquet, clou ou broche"),
("$id >= 0 AND \"som_typologie_nature\" IN ('Borne',"
"'Borne à puce', 'Pierre', 'Piquet', 'Clou ou broche')"),
r"#EC0000", 2.2
), (
("Axe cours d'eau, axe fossé, haut de talus, pied de talus"),
("$id >= 0 AND \"som_typologie_nature\" IN ('Axe cours d\'\'eau',"
"'Axe fossé', 'Haut de talus', 'Pied de talus')"),
r"#EE8012", 2.2
), (
("Angle de bâtiment, axe de mur, angle de mur, "
"angle de clôture, pylône et toute autre valeur"),
("$id >= 0 AND \"som_typologie_nature\" NOT IN ('Borne',"
"'Borne à puce', 'Pierre', 'Piquet', 'Clou ou broche',"
"'Axe cours d\'\'eau', 'Axe fossé', 'Haut de talus',"
"'Pied de talus')"),
r"#9784EC", 2.2
), (
"Temporaire", r"$id < 0", "cyan", 2.4
),
(
"Point nouveau à traiter car proche d'un existant", r"point_rfu_proche is not null", "#bcff03", 3
))
# >>
for label, expression, color, size in vertex_rules:
rule = vertex_root_rule.children()[0].clone()
rule.setLabel(label)
rule.setFilterExpression(expression)
rule.symbol().setColor(QColor(color))
rule.symbol().setSize(size)
vertex_root_rule.appendChild(rule)
vertex_root_rule.removeChildAt(0)
l_vertex.setRenderer(renderer_vertex)
renderer_edge = QgsRuleBasedRenderer(QgsLineSymbol())
edge_root_rule = renderer_edge.rootRule()
# Modified in v2.1 (lim_typologie_nature added) <<
edge_rules = (
(
"Limite privée",
"$id >= 0 AND \"lim_typologie_nature\" = '" + lim_typo_nat_vals[0] + "'",
"#0A0AFF",
0.5
),
(
"Limite naturelle",
"$id >= 0 AND \"lim_typologie_nature\" = '" + lim_typo_nat_vals[1] + "'",
"#aa876d",
0.5
),
(
"Temporaire",
"$id < 0",
"cyan",
1
)
)
# >>
for label, expression, color, width in edge_rules:
rule = edge_root_rule.children()[0].clone()
rule.setLabel(label)
rule.setFilterExpression(expression)
rule.symbol().setColor(QColor(color))
rule.symbol().setWidth(width)
edge_root_rule.appendChild(rule)
edge_root_rule.removeChildAt(0)
l_edge.setRenderer(renderer_edge)
# Add fields..
p_vertex.addAttributes(vtx_atts)
p_edge.addAttributes(edge_atts)
# Add features from xml tree..
# ..to vertex layer..
fts_vertex = []
for e in tree.findall(r"sommet"):
ft_vertex = QgsFeature()
ft_vertex.setGeometry(QgsGeometry.fromWkt(e.attrib[r"geometrie"]))
_id_noeud = int(e.attrib[r"id_noeud"])
_version = int(e.attrib[r"version"])
som_ge_createur = str(e.find(r"./som_ge_createur").text)
som_nature = str(e.find(r"./som_nature").text)
som_prec_rattcht = int(e.find(r"./som_precision_rattachement").text)
som_coord_est = float(e.find(r"./som_coord_est").text)
som_coord_nord = float(e.find(r"./som_coord_nord").text)
som_repres_plane = str(e.find(r"./som_representation_plane").text)
som_tolerance = float(e.find(r"./som_tolerance").text)
# Field used to store the attestation_qualite value
# when modifying a vertex ("false" or "true")
attestation_qualite = "false"
som_delim_pub = str(e.find(r"./som_delimitation_publique").text)
som_typo_nature = str(e.find(r"./som_typologie_nature").text)
ft_vertex.setAttributes([
_id_noeud,
_version,
som_ge_createur,
som_delim_pub,
som_typo_nature,
som_nature,
som_prec_rattcht,
som_coord_est,
som_coord_nord,
som_repres_plane,
som_tolerance,
attestation_qualite,
NULL
])
fts_vertex.append(ft_vertex)
# ..to edge layer..
fts_edge = []
for e in tree.findall(r"limite"):
ft_edge = QgsFeature()
ft_edge.setGeometry(QgsGeometry.fromWkt(e.attrib[r"geometrie"]))
_id_arc = int(e.attrib[r"id_arc"])
_version = int(e.attrib[r"version"])
lim_ge_createur = str(e.find(r"./lim_ge_createur").text)
lim_typo_nature = str(e.find(r"./lim_typologie_nature").text)
lim_delim_pub = str(e.find(r"./lim_delimitation_publique").text)
ft_edge.setAttributes([
_id_arc,
_version,
lim_ge_createur,
lim_delim_pub,
lim_typo_nature
])
fts_edge.append(ft_edge)
# Add features to layers..
p_vertex.addFeatures(fts_vertex)
p_edge.addFeatures(fts_edge)
# Update fields..
l_vertex.updateFields()
l_edge.updateFields()
# Update layer's extent..
l_vertex.updateExtents()
l_edge.updateExtents()
# Check | |
matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metric", [metric_name, props]))
@jsii.member(jsii_name="metricGetRecords")
def metric_get_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records retrieved from the shard, measured over the specified time period.
Minimum, Maximum, and
Average statistics represent the records in a single GetRecords operation for the stream in the specified time
period.
average
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecords", [props]))
@jsii.member(jsii_name="metricGetRecordsBytes")
def metric_get_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes retrieved from the Kinesis stream, measured over the specified time period.
Minimum, Maximum,
and Average statistics represent the bytes in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsBytes", [props]))
@jsii.member(jsii_name="metricGetRecordsIteratorAgeMilliseconds")
def metric_get_records_iterator_age_milliseconds(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The age of the last record in all GetRecords calls made against a Kinesis stream, measured over the specified time period.
Age is the difference between the current time and when the last record of the GetRecords call was written
to the stream. The Minimum and Maximum statistics can be used to track the progress of Kinesis consumer
applications. A value of zero indicates that the records being read are completely caught up with the stream.
The metric defaults to maximum over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsIteratorAgeMilliseconds", [props]))
@jsii.member(jsii_name="metricGetRecordsLatency")
def metric_get_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: | |
'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861386650':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861386651':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861386652':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'861386653':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'861386654':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861386655':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861386656':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861386657':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861386658':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861386659':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861383392':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861383393':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861383390':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861383391':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861383396':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861383397':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861397270':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397271':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397272':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397273':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397274':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861383394':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861378958':{'en': 'Bayannur, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5df4\u5f66\u6dd6\u5c14\u5e02')},
'861378959':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861378956':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861378957':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861378954':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861383395':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861378952':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')},
'861378953':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861378950':{'en': 'Hulun, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u4f26\u8d1d\u5c14\u5e02')},
'861378951':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861390666':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861379346':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'86138542':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861379347':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'86138017':{'en': 'Shanghai', 'zh': u('\u4e0a\u6d77\u5e02')},
'86138540':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')},
'861452139':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861452138':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861379349':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861452135':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'86138011':{'en': 'Beijing', 'zh': u('\u5317\u4eac\u5e02')},
'861452137':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861452136':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861452131':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861452130':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861452133':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'86138010':{'en': 'Beijing', 'zh': u('\u5317\u4eac\u5e02')},
'86145387':{'en': 'Kunming, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'86145386':{'en': 'Kunming, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'86138013':{'en': 'Beijing', 'zh': u('\u5317\u4eac\u5e02')},
'86138012':{'en': 'Beijing', 'zh': u('\u5317\u4eac\u5e02')},
'86139544':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'86139545':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'86138549':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'86139547':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'86139540':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')},
'86139541':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861452683':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861452682':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u5ef6\u5b89\u5e02')},
'861452681':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861452680':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861452687':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861399731':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861452685':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'861452684':{'en': 'Weinan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861452689':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861399730':{'en': 'Haidong, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u4e1c\u5730\u533a')},
'86139250':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86139251':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86139252':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86139253':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'86139254':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'86139255':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'86139256':{'en': 'Jieyang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u63ed\u9633\u5e02')},
'86139257':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'86139258':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'86139259':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'86139708':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'86139709':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861390424':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u672c\u6eaa\u5e02')},
'861452229':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861452228':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861390425':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u4e39\u4e1c\u5e02')},
'861380722':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861380723':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861380720':{'en': 'Yichang, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')},
'861380721':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')},
'861380726':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861380727':{'en': '<NAME>i', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861380724':{'en': 'Xianning, Hubei', 'zh': u('\u6e56\u5317\u7701\u54b8\u5b81\u5e02')},
'861380725':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861380098':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861380099':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861380728':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861380729':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861453628':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861453629':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861453622':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861453623':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861453620':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861453621':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861453626':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861453627':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861453624':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861453625':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861380898':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861380899':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861380890':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861380891':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861380892':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861380893':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861380894':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861380895':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861380896':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861380897':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861394864':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861380547':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861394866':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861380545':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861380542':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861380543':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861380540':{'en': 'Jinan, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861394863':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861452226':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861394868':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861394869':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861380548':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861380549':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861453565':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861396624':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'86138071':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'86138076':{'en': 'Haikou, Hainan', 'zh': u('\u6d77\u5357\u7701\u6d77\u53e3\u5e02')},
'86138075':{'en': 'Haikou, Hainan', 'zh': u('\u6d77\u5357\u7701\u6d77\u53e3\u5e02')},
'861396648':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861396649':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861396642':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861396643':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861396640':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861396641':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861396646':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861396647':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861396644':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861396645':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'86138469':{'en': 'Shuangyashan, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u53cc\u9e2d\u5c71\u5e02')},
'861379778':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861379775':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861379774':{'en': 'Xiangfan, Hubei', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861379777':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861379776':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861379771':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861379770':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861379773':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861379772':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'86138617':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'86138612':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'86138611':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'86138610':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'86138619':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'86138618':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861383019':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861383018':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861380253':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861383011':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861383010':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861383013':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861383012':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861383015':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861383014':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861383017':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861383016':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861379591':{'en': 'De<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5fb7\u9633\u5e02')},
'861379590':{'en': 'De<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5fb7\u9633\u5e02')},
'861379593':{'en': 'Bazhong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861379592':{'en': 'Yibin, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861379595':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861379594':{'en': 'Bazhong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861379597':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861379596':{'en': 'Dazhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861379599':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861379598':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861380257':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861380254':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861382498':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861382499':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861382496':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861382497':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861382494':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861382495':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861382492':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861382493':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861382490':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861382491':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861452919':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861452918':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861452913':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861452912':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861452911':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861452910':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861452917':{'en': 'Shanwei, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5c3e\u5e02')},
'861452916':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861452915':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861452914':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861450964':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861450965':{'en': 'Kizilsu, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')},
'861450966':{'en': 'Kizilsu, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')},
'861450967':{'en': 'Kizilsu, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')},
'861450960':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861450961':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861450962':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861450963':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861450968':{'en': 'Kizilsu, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')},
'861450969':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861452519':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'861452518':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'861390943':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u767d\u94f6\u5e02')},
'861390942':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861390941':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u7518\u5357\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861390940':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861390947':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861390946':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861390945':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')},
'861390944':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861390949':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861390948':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861452511':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861452510':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861389149':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861389148':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861389145':{'en': 'Weinan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861389144':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861389147':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861389146':{'en': 'Weinan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861389141':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861389140':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861389143':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861389142':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861453437':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861453436':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u9526\u5dde\u5e02')},
'861453435':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u4e39\u4e1c\u5e02')},
'861453434':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861453433':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u94c1\u5cad\u5e02')},
'861453432':{'en': '<NAME>oning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861453431':{'en': '<NAME>oning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861453430':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861453439':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861453438':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861390411':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861390410':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u94c1\u5cad\u5e02')},
'861395133':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861390412':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861390415':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u4e39\u4e1c\u5e02')},
'861390414':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u672c\u6eaa\u5e02')},
'861395137':{'en': '<NAME>iangsu', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861395136':{'en': 'Suqian, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861395139':{'en': 'Suqian, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861395138':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861395685':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861395684':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861395683':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861395682':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861395681':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861395680':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861450604':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u677e\u539f\u5e02')},
'861385729':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861385728':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861385725':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861385724':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861385727':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861385726':{'en': 'Huzhou, Zhejiang', 'zh': | |
vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 9000.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.002, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 2048, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.002, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 2048, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 0, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 2, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'g', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# VFM: ellipsoidMirror
['op_VFM_hfn', 's', 'None', 'heightProfileFile'],
['op_VFM_dim', 's', 'x', 'orientation'],
['op_VFM_p', 'f', 50.0, 'firstFocusLength'],
['op_VFM_q', 'f', 0.4, 'focalLength'],
['op_VFM_ang', 'f', 0.003, 'grazingAngle'],
['op_VFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VFM_size_tang', 'f', 0.2, 'tangentialSize'],
['op_VFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_VFM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VFM_nvy', 'f', 0.999995500003375, 'normalVectorY'],
['op_VFM_nvz', 'f', -0.002999995500002025, 'normalVectorZ'],
['op_VFM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VFM_tvy', 'f', -0.002999995500002025, 'tangentialVectorY'],
['op_VFM_x', 'f', 0.0, 'horizontalOffset'],
['op_VFM_y', 'f', 0.0, 'verticalOffset'],
# VFM_HFM: drift
['op_VFM_HFM_L', 'f', 0.20000000000000284, 'length'],
# HFM: ellipsoidMirror
['op_HFM_hfn', 's', 'None', 'heightProfileFile'],
['op_HFM_dim', 's', 'x', 'orientation'],
['op_HFM_p', 'f', 50.0, 'firstFocusLength'],
['op_HFM_q', 'f', 0.2, 'focalLength'],
['op_HFM_ang', 'f', 0.003, 'grazingAngle'],
['op_HFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_HFM_size_tang', 'f', 0.2, 'tangentialSize'],
['op_HFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_HFM_nvx', 'f', 0.999995500003375, 'normalVectorX'],
['op_HFM_nvy', 'f', 0.0, 'normalVectorY'],
['op_HFM_nvz', 'f', -0.002999995500002025, 'normalVectorZ'],
['op_HFM_tvx', 'f', -0.002999995500002025, 'tangentialVectorX'],
['op_HFM_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_HFM_x', 'f', 0.0, 'horizontalOffset'],
['op_HFM_y', 'f', 0.0, 'verticalOffset'],
# HFM_Watchpoint: drift
['op_HFM_Watchpoint_L', 'f', 0.19999999999999574, 'length'],
# Watchpoint_Mask: drift
['op_Watchpoint_Mask_L', 'f', 0.20000000000000284, 'length'],
# Mask: mask
['op_Mask_delta', 'f', 1.0, 'refractiveIndex'],
['op_Mask_atten_len', 'f', 1.0, 'attenuationLength'],
['op_Mask_thick', 'f', 1.0, 'maskThickness'],
['op_Mask_grid_sh', 'f', 0, 'gridShape'],
['op_Mask_grid_dx', 'f', 5e-06, 'horizontalGridDimension'],
['op_Mask_grid_dy', 'f', 5e-06, 'verticalGridDimension'],
['op_Mask_pitch_x', 'f', 2e-05, 'horizontalGridPitch'],
['op_Mask_pitch_y', 'f', 2e-05, 'verticalGridPitch'],
['op_Mask_gridTiltAngle', 'f', 0.4363323129985824, 'gridTiltAngle'],
['op_Mask_hx', 'f', 7.319999999999999e-07, 'horizontalSamplingInterval'],
['op_Mask_hy', 'f', 7.319999999999999e-07, 'verticalSamplingInterval'],
['op_Mask_mask_x0', 'f', 0.0, 'horizontalMaskCoordinate'],
['op_Mask_mask_y0', 'f', 0.0, 'verticalMaskCoordinate'],
['op_Mask_mask_Nx', 'i', 1024, 'horizontalPixelsNumber'],
['op_Mask_mask_Ny', 'i', 1024, 'verticalPixelsNumber'],
['op_Mask_grid_nx', 'i', 21, 'horizontalGridsNumber'],
['op_Mask_grid_ny', 'i', 21, 'verticalGridsNumber'],
#---Propagation parameters
['op_VFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM'],
['op_VFM_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM_HFM'],
['op_HFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM'],
['op_HFM_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM_Watchpoint'],
['op_Watchpoint_Mask_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Watchpoint_Mask'],
['op_Mask_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Mask'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) | |
<gh_stars>1-10
import numpy as np
import copy
import math
import scipy
from susi import props
from susi import sampling
from .result import result_obj, strurel_result
from scipy.stats import norm
class strurel(object):
'''
###################################################################
Description:
The strurel object
- is defined by the properties in susi.props (stochastic attributes and limit states)
- we apply sampling methods (susi.sampling) on it
- all results from the sampling are collected in it and saved in the strurel_result class,
this yields a discrete set of results in the strurel_result class
- a continuous result is created by the given interpolation methods
###################################################################
Parameters:
attrs: object of class col_attr (see /props/attrs), no default
stochastic properties of the structure, related to the given
limit state equation (ls)
e.g.
props.col_attr([props.attr(name="x{0}".format(i),rtype="n",mx=1e-10,vx=1e10)
for i in range(3)])
ls: object of limitstate class (see /props/limitst), no default
limit state function of the stochastic reliability
formulation
e.g.
ls=props.limitstate(name="ls1"
,f_str='-(np.sum([x[0],x[1],x[2]]))/math.sqrt(3)+4.7534243088229'
,argslist=["x2","x0","x1"]
,prob=None)
corrX: matrix (type: numpy array), default=independent
correlation matrix of stochastic properties,
default is np.eye (independence),
e.g.
np.array([[1,0,0],[0,1,0.2],[0,0.2,1]])
###################################################################
Returns:
Creates strurel object for further analysis by e.g. SuS or SuSI
'''
def __init__(self,attrs,ls,corrX=None):
self.attrs=attrs
self.ls=ls
self.corr_z=0
if corrX is None: #default set independent variables
self.corrX=np.identity(len(self.ls.argslist))
else:
self.corrX=corrX
self.rvs=props.col_rv(rvs=self.attrs.get_rvs(),corrX=corrX)
if ls!=None:
self.match=props.limitstate_match(attrs,ls.argslist)
self.results=list()
self.result=None
self.ls.prob=self
def get_result(self
,method="sus"
,Nlist=[500],plist=[0.1],bstar=0.0
,fixcsteps=10,palist=[0.2],reuse=1,vers="b",saveU=0
,choose_seedN=0,dropbi=0,seedplist=[1.0],i0=0.0
,boundpf=1e-15
,xk="x0",mono="i",max_error=1e-10,bi=0.0
,Npred=100,predict_by_prev=0,prevNpredict=1,prob_b=0.8
,pl=0.2,pu=0.4,firstvalsampling=0,firstvalsamplingN=20,Nbound=15000,raise_err=1
,fixNpred=0,testmc=0,maxsteps=5,q=0):
'''
###################################################################
#Description:
Use Subset Simulation or Subset Simulation Interpolation to compute
the reliability of the strurel_object. Note that there are many
option parameters for scientific analysis of the properties of
the algorithms. By default, they are set to values that provide
good efficiency in most cases.
#Important Remark:
SuSI also uses the SuS parameters
as it is based on and utilizes ordinary SuS
###################################################################
Parameters:
########
#general
method: str, "sus" or "susi", default="sus"
choose method Subset Simulation or Subset Simulation Interpolation
########
#SuS parameters - obligatory (note these are lists to allow adaptiveness with
respect to the subset level)
Nlist: list of integer values, default=[500]
defines the sample number in each subset, starting with
the first list element Nlist[0] for Monte Carlo, then taking
Nlist[i] samples in subset i,
if last list element is reached, last element is taken for all
higher level subsets
e.g.
[500,400] (500 samples for Monte Carlo, then 400 for
all higher level subsets)
plist: list of float values in (0,1), default=0.1
defines the intermediate subset probability
as in Nlist, the list allows to select different values
for different subsets
e.g.
[0.1,0.3,0.5]
bstar: float, default=0.0
defines the threshold for failure of the structure
########
#SuS parameters - optional (alteration for scientific examination mostly,
default settings provide good efficiency)
fixcsteps: integer, default=10
defines the steps made to create the single MCMC chains,
for creation of new samples in function acs (see /sampling/acs)
palist: list of float values in (0,1), default=[0.2]
defines the percentage of chains in MCMC sampling after which
the proposal spread is updated
reuse: int in {0,1}, default=1
decide wheter to reuse sample seeds (1)
or not reuse sample seeds (0)
vers: str in {"a","b"}, default="b"
version "a" results in equally updated proposal spreads
for all variables in acs (see /sampling/acs),
version "b" weights according to the importance of specific
variables
saveU: int in {0,1}, default=0
decide wheter samples in Uspace and corresponding limitstates
in Xspace are saved (1) or not (0)
choose_seedN: integer>=0, default=0
if set zero, the parameter remains unused,
if >0, then we select choose_seedN seeds for MCMC
so the number of seeds is explicitly stated then
e.g.
10
dropbi: int in {0,1,2}, default=0
decide whether we drop sample seeds that are related to
the limit state threshold, these are not in the stationary
distribution, if 0 then nothing is dropped,
if 1 then samples with limit state value equal to the
threshold are dropped
if 2 also MCMC chain elements close to samples with
limit state value equal to the threshold are dropped,
samples are dropped up to distance "rem_s_samps=5"
from such samples within chains
seedplist: float (0,1], default=[1.0]
list of percentage of seeds used for MCMC sampling
note that fixcsteps overwrites this if set !=0
i0: float, default=0.0
changes the way we update the proposal spread
e_iter=1.0/math.sqrt(i0+chain/Na)
#######
#SuS parameters for parameter state model like results:
specify a probability where SuS stops calculation
otherwise SuS would not terminate (R>0)
boundpf: float value in (0,1)
sets the minimum of considered failure probabilities,
if it is reached, the algorithm terminates
e.g.
1e-12
#######
#SuSI parameters obligatory (+SuS parameters obligatory)
xk: str in names of strurel variables
name of dynamic variable
e.g.
"x0"
mono: str in {"i","d"}
monotonicity of the conditional failure function
if "i", then we assume the failure probability is
increasing if xk is increased
if "d", then we assume the failure probability is
decreasing if xk is increased
max_error: float in (0,1)
the maximum error allowed by approximation/extrapolation
this refers to the sum of both errors by extrapolation,
extrapolation above the maximum xk evaluated and
extrapolation below the minimum xk value evaluated
these parts of the domain are evaluated under the safety principle
by default
e.g.
1e-10,bi=0.0
#prediction step
Npred: positive integer, default=100
number of samples used to evaluate in the prediction step
predict_by_prev: int in {0,1},default=0
if 0, the prediction step is normally performed by extrapolation
if 1, previous results are used to select the next grid point for xk
prevNpredict: positive integer, default=1
only activate if predict_by_prev=1, then we use the average of
the results by interpolating with the prevNpredict last results
for selecting the optimal value for the next grid point for xk
prob_b: float in (0,1), default=0.8
this is the probability that sets the stopping criterion for
the prediction step, if we have a probability higher than prob_b
(by prediction) to have found a grid point that yields an
intermediate failure probability in [pl,pu], then we stop and take it
pl: float in (0,pu), default=0.2
lower boundary for admissible intermediate probability
note that (pl+pu)*0.5 as an intermediate probability
should refer to the desired grid point selection
pu: float in (pl,1), default=0.4
upper boundary for admissible intermediate probability
#######
#SuSI parameters optional
firstvalsampling: int in {0,1}, default=0
decide whether to start with interval search or SuS to
initialize SuSI, if 0 start by SuS
firstvalsamplingN: positive integer, default=20
defines the number of samples used for interval sampling,
only active in use if firstvalsampling=1
Nbound: positive integer, default=15000
this is a maximum number of samples used for evaluation of
intermediate probabilities, because we adapt the sample
number so that effort is distributed equally among
all subsets, depending on their predicted intermediate
probability, we can get very high values here if
the interemdiate probability is close to zero
raise_err: int in {0,1}, default=1
raise an error if an intermediate probability is zero or 1
if set 1, an error is raised, otherwise p=1 is set to a value
close to 1 and if p=0 it is set to a value very close to zero
fixNpred: positive integer, default=0
If fixNpred=0, we use only available samples in the given subset
otherwise we create fixNpred new ones, meaning that we guarantee
fixNpred samples for prediction (however this is more expensive
than fixNpred=0)
testmc: int in {0,1}, default=0
if activated (1) then we only do one Monte Carlo simulation
to check for failures given a specific xk value
maxsteps: positive integer, default=5
maximum steps for the prediction step,
if there is no satisfying candidate for
a grid point found after this number of
preditions we stop anyhow and take the
currently best value
q: float in (0,1), default=0
quantile of | |
<gh_stars>1-10
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''Multigrid to compute DFT integrals'''
import ctypes
import copy
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.gto import ATOM_OF, ANG_OF, NPRIM_OF, PTR_EXP, PTR_COEFF
from pyscf.dft.numint import libdft
from pyscf.pbc import tools
from pyscf.pbc import gto
from pyscf.pbc.gto import pseudo
from pyscf.pbc.dft import numint, gen_grid
from pyscf.pbc.df.df_jk import _format_dms, _format_kpts_band, _format_jks
from pyscf.pbc.lib.kpts_helper import gamma_point
from pyscf.pbc.df import fft
from pyscf.pbc.df import ft_ao
from pyscf import __config__
#sys.stderr.write('WARN: multigrid is an experimental feature. It is still in '
# 'testing\nFeatures and APIs may be changed in the future.\n')
BLKSIZE = numint.BLKSIZE
EXTRA_PREC = getattr(__config__, 'pbc_gto_eval_gto_extra_precision', 1e-2)
TO_EVEN_GRIDS = getattr(__config__, 'pbc_dft_multigrid_to_even', False)
RMAX_FACTOR_ORTH = getattr(__config__, 'pbc_dft_multigrid_rmax_factor_orth', 1.1)
RMAX_FACTOR_NONORTH = getattr(__config__, 'pbc_dft_multigrid_rmax_factor_nonorth', 0.5)
RMAX_RATIO = getattr(__config__, 'pbc_dft_multigrid_rmax_ratio', 0.7)
R_RATIO_SUBLOOP = getattr(__config__, 'pbc_dft_multigrid_r_ratio_subloop', 0.6)
INIT_MESH_ORTH = getattr(__config__, 'pbc_dft_multigrid_init_mesh_orth', (12,12,12))
INIT_MESH_NONORTH = getattr(__config__, 'pbc_dft_multigrid_init_mesh_nonorth', (32,32,32))
KE_RATIO = getattr(__config__, 'pbc_dft_multigrid_ke_ratio', 1.3)
TASKS_TYPE = getattr(__config__, 'pbc_dft_multigrid_tasks_type', 'ke_cut') # 'rcut'
# RHOG_HIGH_ORDER=True will compute the high order derivatives of electron
# density in real space and FT to reciprocal space. Set RHOG_HIGH_ORDER=False
# to approximate the density derivatives in reciprocal space (without
# evaluating the high order derivatives in real space).
RHOG_HIGH_ORDER = getattr(__config__, 'pbc_dft_multigrid_rhog_high_order', False)
PTR_EXPDROP = 16
EXPDROP = getattr(__config__, 'pbc_dft_multigrid_expdrop', 1e-12)
IMAG_TOL = 1e-9
def eval_mat(cell, weights, shls_slice=None, comp=1, hermi=0,
xctype='LDA', kpts=None, mesh=None, offset=None, submesh=None):
assert(all(cell._bas[:,NPRIM_OF] == 1))
atm, bas, env = gto.conc_env(cell._atm, cell._bas, cell._env,
cell._atm, cell._bas, cell._env)
env[PTR_EXPDROP] = min(cell.precision*EXTRA_PREC, EXPDROP)
ao_loc = gto.moleintor.make_loc(bas, 'cart')
if shls_slice is None:
shls_slice = (0, cell.nbas, 0, cell.nbas)
i0, i1, j0, j1 = shls_slice
j0 += cell.nbas
j1 += cell.nbas
naoi = ao_loc[i1] - ao_loc[i0]
naoj = ao_loc[j1] - ao_loc[j0]
if cell.dimension > 0:
Ls = numpy.asarray(cell.get_lattice_Ls(), order='C')
else:
Ls = numpy.zeros((1,3))
nimgs = len(Ls)
if mesh is None:
mesh = cell.mesh
weights = numpy.asarray(weights, order='C')
assert(weights.dtype == numpy.double)
xctype = xctype.upper()
n_mat = None
if xctype == 'LDA':
if weights.ndim == 1:
weights = weights.reshape(-1, numpy.prod(mesh))
else:
n_mat = weights.shape[0]
elif xctype == 'GGA':
if hermi == 1:
raise RuntimeError('hermi=1 is not supported for GGA functional')
if weights.ndim == 2:
weights = weights.reshape(-1, 4, numpy.prod(mesh))
else:
n_mat = weights.shape[0]
else:
raise NotImplementedError
a = cell.lattice_vectors()
b = numpy.linalg.inv(a.T)
if offset is None:
offset = (0, 0, 0)
if submesh is None:
submesh = mesh
# log_prec is used to estimate the gto_rcut. Add EXTRA_PREC to count
# other possible factors and coefficients in the integral.
log_prec = numpy.log(cell.precision * EXTRA_PREC)
if abs(a-numpy.diag(a.diagonal())).max() < 1e-12:
lattice_type = '_orth'
else:
lattice_type = '_nonorth'
eval_fn = 'NUMINTeval_' + xctype.lower() + lattice_type
drv = libdft.NUMINT_fill2c
def make_mat(weights):
if comp == 1:
mat = numpy.zeros((nimgs,naoj,naoi))
else:
mat = numpy.zeros((nimgs,comp,naoj,naoi))
drv(getattr(libdft, eval_fn),
weights.ctypes.data_as(ctypes.c_void_p),
mat.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(comp), ctypes.c_int(hermi),
(ctypes.c_int*4)(i0, i1, j0, j1),
ao_loc.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(log_prec),
ctypes.c_int(cell.dimension),
ctypes.c_int(nimgs),
Ls.ctypes.data_as(ctypes.c_void_p),
a.ctypes.data_as(ctypes.c_void_p),
b.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*3)(*offset), (ctypes.c_int*3)(*submesh),
(ctypes.c_int*3)(*mesh),
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(atm)),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(bas)),
env.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(env)))
return mat
out = []
for wv in weights:
if cell.dimension == 0:
mat = numpy.rollaxis(make_mat(wv)[0], -1, -2)
elif kpts is None or gamma_point(kpts):
mat = numpy.rollaxis(make_mat(wv).sum(axis=0), -1, -2)
if getattr(kpts, 'ndim', None) == 2:
mat = mat.reshape((1,)+mat.shape)
else:
mat = make_mat(wv)
mat_shape = mat.shape
expkL = numpy.exp(1j*kpts.reshape(-1,3).dot(Ls.T))
mat = numpy.dot(expkL, mat.reshape(nimgs,-1))
mat = numpy.rollaxis(mat.reshape((-1,)+mat_shape[1:]), -1, -2)
out.append(mat)
if n_mat is None:
out = out[0]
return out
def eval_rho(cell, dm, shls_slice=None, hermi=0, xctype='LDA', kpts=None,
mesh=None, offset=None, submesh=None, ignore_imag=False,
out=None):
'''Collocate the *real* density (opt. gradients) on the real-space grid.
'''
assert(all(cell._bas[:,NPRIM_OF] == 1))
atm, bas, env = gto.conc_env(cell._atm, cell._bas, cell._env,
cell._atm, cell._bas, cell._env)
env[PTR_EXPDROP] = min(cell.precision*EXTRA_PREC, EXPDROP)
ao_loc = gto.moleintor.make_loc(bas, 'cart')
if shls_slice is None:
shls_slice = (0, cell.nbas, 0, cell.nbas)
i0, i1, j0, j1 = shls_slice
if hermi:
assert(i0 == j0 and i1 == j1)
j0 += cell.nbas
j1 += cell.nbas
naoi = ao_loc[i1] - ao_loc[i0]
naoj = ao_loc[j1] - ao_loc[j0]
dm = numpy.asarray(dm, order='C')
assert(dm.shape[-2:] == (naoi, naoj))
if cell.dimension > 0:
Ls = numpy.asarray(cell.get_lattice_Ls(), order='C')
else:
Ls = numpy.zeros((1,3))
if cell.dimension == 0 or kpts is None or gamma_point(kpts):
nkpts, nimgs = 1, Ls.shape[0]
dm = dm.reshape(-1,1,naoi,naoj).transpose(0,1,3,2)
else:
expkL = numpy.exp(1j*kpts.reshape(-1,3).dot(Ls.T))
nkpts, nimgs = expkL.shape
dm = dm.reshape(-1,nkpts,naoi,naoj).transpose(0,1,3,2)
n_dm = dm.shape[0]
a = cell.lattice_vectors()
b = numpy.linalg.inv(a.T)
if mesh is None:
mesh = cell.mesh
if offset is None:
offset = (0, 0, 0)
if submesh is None:
submesh = mesh
log_prec = numpy.log(cell.precision * EXTRA_PREC)
if abs(a-numpy.diag(a.diagonal())).max() < 1e-12:
lattice_type = '_orth'
else:
lattice_type = '_nonorth'
xctype = xctype.upper()
if xctype == 'LDA':
comp = 1
elif xctype == 'GGA':
if hermi == 1:
raise RuntimeError('hermi=1 is not supported for GGA functional')
comp = 4
else:
raise NotImplementedError('meta-GGA')
if comp == 1:
shape = (numpy.prod(submesh),)
else:
shape = (comp, numpy.prod(submesh))
eval_fn = 'NUMINTrho_' + xctype.lower() + lattice_type
drv = libdft.NUMINT_rho_drv
def make_rho_(rho, dm):
drv(getattr(libdft, eval_fn),
rho.ctypes.data_as(ctypes.c_void_p),
dm.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(comp), ctypes.c_int(hermi),
(ctypes.c_int*4)(i0, i1, j0, j1),
ao_loc.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(log_prec),
ctypes.c_int(cell.dimension),
ctypes.c_int(nimgs),
Ls.ctypes.data_as(ctypes.c_void_p),
a.ctypes.data_as(ctypes.c_void_p),
b.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*3)(*offset), (ctypes.c_int*3)(*submesh),
(ctypes.c_int*3)(*mesh),
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(atm)),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(bas)),
env.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(env)))
return rho
rho = []
for i, dm_i in enumerate(dm):
if out is None:
rho_i = numpy.zeros(shape)
else:
rho_i = out[i]
assert(rho_i.size == numpy.prod(shape))
if cell.dimension == 0:
# make a copy because the dm may be overwritten in the
# NUMINT_rho_drv inplace
make_rho_(rho_i, numpy.array(dm_i, order='C', copy=True))
elif kpts is None or gamma_point(kpts):
make_rho_(rho_i, numpy.repeat(dm_i, nimgs, axis=0))
else:
dm_i = lib.dot(expkL.T, dm_i.reshape(nkpts,-1)).reshape(nimgs,naoj,naoi)
dmR = numpy.asarray(dm_i.real, order='C')
if ignore_imag:
has_imag = False
else:
dmI = numpy.asarray(dm_i.imag, order='C')
has_imag = (hermi == 0 and abs(dmI).max() > 1e-8)
if (has_imag and xctype == 'LDA' and
naoi == naoj and
# For hermitian density matrices, the anti-symmetry character of the imaginary
# part of the density matrices can be found by rearranging the repeated images.
abs(dmI + dmI[::-1].transpose(0,2,1)).max() < 1e-8):
has_imag = False
dm_i = None
if has_imag:
if out is None:
rho_i = make_rho_(rho_i, dmI)*1j
rho_i += make_rho_(numpy.zeros(shape), dmR)
else:
out[i] = make_rho_(numpy.zeros(shape), dmI)*1j
out[i] += make_rho_(numpy.zeros(shape), dmR)
else:
assert(rho_i.dtype == numpy.double)
make_rho_(rho_i, dmR)
dmR = dmI = None
rho.append(rho_i)
if n_dm == 1:
rho = rho[0]
return rho
def get_nuc(mydf, kpts=None):
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
mesh = mydf.mesh
charge = -cell.atom_charges()
Gv = cell.get_Gv(mesh)
SI = cell.get_SI(Gv)
rhoG = numpy.dot(charge, SI)
coulG = tools.get_coulG(cell, mesh=mesh, Gv=Gv)
vneG = rhoG * coulG
vne = _get_j_pass2(mydf, vneG, kpts_lst)[0]
if kpts is None or numpy.shape(kpts) == (3,):
vne = vne[0]
return numpy.asarray(vne)
def get_pp(mydf, kpts=None):
'''Get the periodic pseudotential nuc-el AO matrix, with G=0 removed.
'''
from pyscf import gto
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
mesh = mydf.mesh
SI = cell.get_SI()
Gv = cell.get_Gv(mesh)
vpplocG = pseudo.get_vlocG(cell, Gv)
vpplocG = -numpy.einsum('ij,ij->j', SI, vpplocG)
# from get_jvloc_G0 function
vpplocG[0] = numpy.sum(pseudo.get_alphas(cell))
ngrids = len(vpplocG)
vpp = _get_j_pass2(mydf, vpplocG, kpts_lst)[0]
# vppnonloc evaluated in reciprocal space
fakemol = gto.Mole()
fakemol._atm = numpy.zeros((1,gto.ATM_SLOTS), dtype=numpy.int32)
fakemol._bas = numpy.zeros((1,gto.BAS_SLOTS), dtype=numpy.int32)
ptr = gto.PTR_ENV_START
fakemol._env = numpy.zeros(ptr+10)
fakemol._bas[0,gto.NPRIM_OF ] = 1
fakemol._bas[0,gto.NCTR_OF ] = 1
fakemol._bas[0,gto.PTR_EXP ] = ptr+3
fakemol._bas[0,gto.PTR_COEFF] = ptr+4
# buf for SPG_lmi upto l=0..3 and nl=3
buf = numpy.empty((48,ngrids), dtype=numpy.complex128)
def vppnl_by_k(kpt):
Gk = Gv + kpt
G_rad = lib.norm(Gk, axis=1)
aokG = ft_ao.ft_ao(cell, Gv, kpt=kpt) * (ngrids/cell.vol)
vppnl = 0
for ia in range(cell.natm):
symb = cell.atom_symbol(ia)
if symb not in cell._pseudo:
continue
pp = cell._pseudo[symb]
p1 = 0
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl > 0:
fakemol._bas[0,gto.ANG_OF] = l
fakemol._env[ptr+3] = .5*rl**2
fakemol._env[ptr+4] = rl**(l+1.5)*numpy.pi**1.25
pYlm_part = fakemol.eval_gto('GTOval', Gk)
p0, p1 = p1, p1+nl*(l*2+1)
# pYlm is real, SI[ia] is complex
pYlm = numpy.ndarray((nl,l*2+1,ngrids), dtype=numpy.complex128, buffer=buf[p0:p1])
for k in range(nl):
| |
<filename>openfold/model/primitives.py
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Callable, List, Tuple, Sequence
import numpy as np
import torch
import torch.nn as nn
from scipy.stats import truncnorm
from openfold.utils.tensor_utils import (
permute_final_dims,
flatten_final_dims,
_chunk_slice,
)
def _prod(nums):
out = 1
for n in nums:
out = out * n
return out
def _calculate_fan(linear_weight_shape, fan="fan_in"):
fan_out, fan_in = linear_weight_shape
if fan == "fan_in":
f = fan_in
elif fan == "fan_out":
f = fan_out
elif fan == "fan_avg":
f = (fan_in + fan_out) / 2
else:
raise ValueError("Invalid fan option")
return f
def trunc_normal_init_(weights, scale=1.0, fan="fan_in"):
shape = weights.shape
f = _calculate_fan(shape, fan)
scale = scale / max(1, f)
a = -2
b = 2
std = math.sqrt(scale) / truncnorm.std(a=a, b=b, loc=0, scale=1)
size = _prod(shape)
samples = truncnorm.rvs(a=a, b=b, loc=0, scale=std, size=size)
samples = np.reshape(samples, shape)
with torch.no_grad():
weights.copy_(torch.tensor(samples, device=weights.device))
def lecun_normal_init_(weights):
trunc_normal_init_(weights, scale=1.0)
def he_normal_init_(weights):
trunc_normal_init_(weights, scale=2.0)
def glorot_uniform_init_(weights):
nn.init.xavier_uniform_(weights, gain=1)
def final_init_(weights):
with torch.no_grad():
weights.fill_(0.0)
def gating_init_(weights):
with torch.no_grad():
weights.fill_(0.0)
def normal_init_(weights):
torch.nn.init.kaiming_normal_(weights, nonlinearity="linear")
def ipa_point_weights_init_(weights):
with torch.no_grad():
softplus_inverse_1 = 0.541324854612918
weights.fill_(softplus_inverse_1)
class Linear(nn.Linear):
"""
A Linear layer with built-in nonstandard initializations. Called just
like torch.nn.Linear.
Implements the initializers in 1.11.4, plus some additional ones found
in the code.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
bias: bool = True,
init: str = "default",
init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,
):
"""
Args:
in_dim:
The final dimension of inputs to the layer
out_dim:
The final dimension of layer outputs
bias:
Whether to learn an additive bias. True by default
init:
The initializer to use. Choose from:
"default": LeCun fan-in truncated normal initialization
"relu": He initialization w/ truncated normal distribution
"glorot": Fan-average Glorot uniform initialization
"gating": Weights=0, Bias=1
"normal": Normal initialization with std=1/sqrt(fan_in)
"final": Weights=0, Bias=0
Overridden by init_fn if the latter is not None.
init_fn:
A custom initializer taking weight and bias as inputs.
Overrides init if not None.
"""
super(Linear, self).__init__(in_dim, out_dim, bias=bias)
if bias:
with torch.no_grad():
self.bias.fill_(0)
if init_fn is not None:
init_fn(self.weight, self.bias)
else:
if init == "default":
lecun_normal_init_(self.weight)
elif init == "relu":
he_normal_init_(self.weight)
elif init == "glorot":
glorot_uniform_init_(self.weight)
elif init == "gating":
gating_init_(self.weight)
if bias:
with torch.no_grad():
self.bias.fill_(1.0)
elif init == "normal":
normal_init_(self.weight)
elif init == "final":
final_init_(self.weight)
else:
raise ValueError("Invalid init string.")
class Attention(nn.Module):
"""
Standard multi-head attention using AlphaFold's default layer
initialization. Allows multiple bias vectors.
"""
def __init__(
self,
c_q: int,
c_k: int,
c_v: int,
c_hidden: int,
no_heads: int,
gating: bool = True,
):
"""
Args:
c_q:
Input dimension of query data
c_k:
Input dimension of key data
c_v:
Input dimension of value data
c_hidden:
Per-head hidden dimension
no_heads:
Number of attention heads
gating:
Whether the output should be gated using query data
"""
super(Attention, self).__init__()
self.c_q = c_q
self.c_k = c_k
self.c_v = c_v
self.c_hidden = c_hidden
self.no_heads = no_heads
self.gating = gating
# DISCREPANCY: c_hidden is not the per-head channel dimension, as
# stated in the supplement, but the overall channel dimension.
self.linear_q = Linear(
self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot"
)
self.linear_k = Linear(
self.c_k, self.c_hidden * self.no_heads, bias=False, init="glorot"
)
self.linear_v = Linear(
self.c_v, self.c_hidden * self.no_heads, bias=False, init="glorot"
)
self.linear_o = Linear(
self.c_hidden * self.no_heads, self.c_q, init="final"
)
if self.gating:
self.linear_g = Linear(
self.c_q, self.c_hidden * self.no_heads, init="gating"
)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=-1)
def forward(
self,
q_x: torch.Tensor,
k_x: torch.Tensor,
v_x: torch.Tensor,
biases: Optional[List[torch.Tensor]] = None,
) -> torch.Tensor:
"""
Args:
q_x:
[*, Q, C_q] query data
k_x:
[*, K, C_k] key data
v_x:
[*, V, C_v] value data
Returns
[*, Q, C_q] attention update
"""
# [*, Q/K/V, H * C_hidden]
q = self.linear_q(q_x)
k = self.linear_k(k_x)
v = self.linear_v(v_x)
# [*, Q/K, H, C_hidden]
q = q.view(q.shape[:-1] + (self.no_heads, -1))
k = k.view(k.shape[:-1] + (self.no_heads, -1))
v = v.view(v.shape[:-1] + (self.no_heads, -1))
# [*, H, Q, C_hidden]
q = permute_final_dims(q, (1, 0, 2))
# [*, H, C_hidden, K]
k = permute_final_dims(k, (1, 2, 0))
# [*, H, Q, K]
a = torch.matmul(q, k)
del q, k
norm = 1 / math.sqrt(self.c_hidden) # [1]
a *= norm
if biases is not None:
for b in biases:
a += b
a = self.softmax(a)
# [*, H, V, C_hidden]
v = permute_final_dims(v, (1, 0, 2))
# [*, H, Q, C_hidden]
o = torch.matmul(a, v)
# [*, Q, H, C_hidden]
o = o.transpose(-2, -3)
if self.gating:
g = self.sigmoid(self.linear_g(q_x))
# [*, Q, H, C_hidden]
g = g.view(g.shape[:-1] + (self.no_heads, -1))
o = o * g
# [*, Q, H * C_hidden]
o = flatten_final_dims(o, 2)
# [*, Q, C_q]
o = self.linear_o(o)
return o
class GlobalAttention(nn.Module):
def __init__(self, c_in, c_hidden, no_heads, inf, eps):
super(GlobalAttention, self).__init__()
self.c_in = c_in
self.c_hidden = c_hidden
self.no_heads = no_heads
self.inf = inf
self.eps = eps
self.linear_q = Linear(
c_in, c_hidden * no_heads, bias=False, init="glorot"
)
self.linear_k = Linear(
c_in, c_hidden, bias=False, init="glorot",
)
self.linear_v = Linear(
c_in, c_hidden, bias=False, init="glorot",
)
self.linear_g = Linear(c_in, c_hidden * no_heads, init="gating")
self.linear_o = Linear(c_hidden * no_heads, c_in, init="final")
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=-1)
def forward(self, m: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# [*, N_res, C_in]
q = torch.sum(m * mask.unsqueeze(-1), dim=-2) / (
torch.sum(mask, dim=-1)[..., None] + self.eps
)
# [*, N_res, H * C_hidden]
q = self.linear_q(q)
q = q * (self.c_hidden ** (-0.5))
# [*, N_res, H, C_hidden]
q = q.view(q.shape[:-1] + (self.no_heads, -1))
# [*, N_res, N_seq, C_hidden]
k = self.linear_k(m)
v = self.linear_v(m)
# [*, N_res, H, N_seq]
a = torch.matmul(
q,
k.transpose(-1, -2), # [*, N_res, C_hidden, N_seq]
)
bias = (self.inf * (mask - 1))[..., :, None, :]
a = a + bias
a = self.softmax(a)
# [*, N_res, H, C_hidden]
o = torch.matmul(
a,
v,
)
# [*, N_res, N_seq, C_hidden]
g = self.sigmoid(self.linear_g(m))
# [*, N_res, N_seq, H, C_hidden]
g = g.view(g.shape[:-1] + (self.no_heads, -1))
# [*, N_res, N_seq, H, C_hidden]
o = o.unsqueeze(-3) * g
# [*, N_res, N_seq, H * C_hidden]
o = o.reshape(o.shape[:-2] + (-1,))
# [*, N_res, N_seq, C_in]
m = self.linear_o(o)
return m
@torch.jit.script
def _lma(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
biases: List[torch.Tensor],
q_chunk_size: int,
kv_chunk_size: int
):
no_q, no_kv = q.shape[-3], k.shape[-3]
# [*, Q, H, C_hidden]
o = q.new_zeros(q.shape)
for q_s in range(0, no_q, q_chunk_size):
q_chunk = q[..., q_s: q_s + q_chunk_size, :, :]
big_bias_chunks = [
b[..., q_s: q_s + q_chunk_size, :] for b in biases
]
maxes = []
weights = []
values = []
for kv_s in range(0, no_kv, kv_chunk_size):
k_chunk = k[..., kv_s: kv_s + kv_chunk_size, :, :]
v_chunk = v[..., kv_s: kv_s + kv_chunk_size, :, :]
small_bias_chunks = [
b[..., kv_s: kv_s + kv_chunk_size] for b in big_bias_chunks
]
a = torch.einsum(
"...qhd,...khd->...hqk", q_chunk, k_chunk
)
for b in small_bias_chunks:
a += b
a = a.transpose(-2, -3)
max_a = torch.max(a, dim=-1, keepdim=True)[0].detach()
exp_a = torch.exp(a - max_a)
exp_v = torch.einsum("...vhf,...qhv->...qhf", v_chunk, exp_a)
maxes.append(max_a.squeeze(-1))
weights.append(torch.sum(exp_a, dim=-1))
values.append(exp_v)
chunk_max = torch.stack(maxes, dim=-3)
chunk_weights = torch.stack(weights, dim=-3)
chunk_values = torch.stack(values, dim=-4)
global_max = torch.max(chunk_max, dim=-3, keepdim=True)[0]
max_diffs = torch.exp(chunk_max - global_max)
chunk_values *= max_diffs.unsqueeze(-1)
chunk_weights *= max_diffs
all_values = torch.sum(chunk_values, dim=-4)
all_weights = torch.sum(chunk_weights.unsqueeze(-1), dim=-4)
q_chunk_out = all_values / all_weights
o[..., q_s: q_s + q_chunk_size, :, :] = q_chunk_out
return o
class LowMemoryAttention(nn.Module):
"""
Standard multi-head attention using AlphaFold's default layer
initialization. Allows multiple bias vectors. Implements Rabe and Staats'
low-memory self-attention algorithm.
"""
def __init__(
self,
c_q: int,
c_k: int,
c_v: int,
c_hidden: | |
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = dict((row, i) for i, row in enumerate(col_slice))
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self)
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
else: # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.copy().insert_row(self.orig_coords(pos), vals,
columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.copy().sort()
def __repr__(self):
if self.original:
return repr(self.index)
return 'Index slice {} of\n{}'.format(
(self.start, self.stop, self.step), self.index)
def __str__(self):
return repr(self)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
return Index([col_slice], engine=self.data.__class__)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
return Index(new_cols, engine=self.data.__class__)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
if names is not None and table_copy is not None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is None and table_copy is None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is not None:
names = set(names)
else:
names = set(table_copy.colnames)
if not names <= set(table.colnames):
raise ValueError(f'{names} is not a subset of table columns')
for name in names:
for index in table[name].info.indices:
if set([col.info.name for col in index.columns]) == names:
return index
return None
def get_index_by_names(table, names):
'''
Returns an index in ``table`` corresponding to the ``names`` columns or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
nmaes : tuple, list
Column names
'''
names = list(names)
for index in table.indices:
index_names = [col.info.name for col in index.columns]
if index_names == names:
return index
else:
return None
class _IndexModeContext:
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif | |
= np.sum(~wall)
weight[:] = 0
if nnotwall > 0:
weight[~wall] = (1 / nnotwall)
else:
raise RuntimeError('No non-wall cells surrounding cell. '
'Please report error.')
weight = weight / np.sum(weight)
else:
raise RuntimeError('Water sum(weight) less than 0. '
'Please report error.')
# final sanity check
if np.any(np.isnan(weight)):
raise RuntimeError('NaN encountered in return from water weighting. '
'Please report error.')
return weight
# @njit('(float32[:,:], float32[:,:], int64[:,:], float32[:,:], float32[:,:],'
# 'float32[:], float32[:], float32[:],'
# 'float64, float64, float64)')
@njit
def _get_water_weight_array(depth, stage, mod_water_weight, cell_type, qx, qy,
ivec_flat, jvec_flat, distances_flat,
dry_depth, gamma, theta_water):
"""Worker for :obj:`_get_water_weight_array`.
This is a jitted function which handles the actual computation of looping
through locations of the model domain and determining the water
weighting.
See :meth:`get_water_weight_array` for more information.
.. note::
If you are trying to change water weighting behavior, consider
reimplementing this method, which calls a custom version of
:func:`_get_weight_at_cell_water`.
"""
L, W = depth.shape
pad_stage = shared_tools.custom_pad(stage)
pad_depth = shared_tools.custom_pad(depth)
mod_pad_water_weight = shared_tools.custom_pad(mod_water_weight)
pad_cell_type = shared_tools.custom_pad(cell_type)
water_weights = np.zeros((L, W, 9))
for i in range(L):
for j in range(W):
stage_nbrs = pad_stage[i - 1 + 1:i + 2 + 1, j - 1 + 1:j + 2 + 1]
depth_nbrs = pad_depth[i - 1 + 1:i + 2 + 1, j - 1 + 1:j + 2 + 1]
mod_water_weight_nbrs = mod_pad_water_weight[i - 1 + 1:i + 2 + 1, j - 1 + 1:j + 2 + 1]
ct_nbrs = pad_cell_type[i - 1 + 1:i + 2 + 1, j - 1 + 1:j + 2 + 1]
weight_sfc, weight_int = shared_tools.get_weight_sfc_int(
stage[i, j], stage_nbrs.ravel(),
qx[i, j], qy[i, j], ivec_flat, jvec_flat,
distances_flat)
water_weights[i, j] = _get_weight_at_cell_water(
(i, j), weight_sfc, weight_int,
depth_nbrs.ravel(), mod_water_weight_nbrs.ravel(), ct_nbrs.ravel(),
dry_depth, gamma, theta_water)
return water_weights
@njit('int64[:](int64[:], float64[:,:])')
def _choose_next_directions(inds: np.ndarray, water_weights: np.ndarray) -> np.ndarray:
"""Get new cell locations, based on water weights.
Algorithm is to:
1. loop through each parcel, which is described by a pair in the
`inds` array.
2. determine the water weights for that location (from `water_weights`)
3. choose a new cell based on the probabilities of the weights (using
the `random_pick` function)
Parameters
----------
inds : :obj:`ndarray`
Current unraveled indices of the parcels. ``(N,)`` `ndarray`
containing the unraveled indices.
water_weights : :obj:`ndarray`
Weights of every water cell. ``(LxW, 9)`` `ndarray`, uses unraveled
indicies along 0th dimension; 9 cells represent self and 8 neighboring
cells.
Returns
-------
next_direction : :obj:`ndarray`
The direction to move towards the new cell for water parcels, relative
to the current location. I.e., this is the D8 direction the parcel is
going to travel in the next stage,
:obj:`pyDeltaRCM.shared_tools._calculate_new_ind`.
"""
next_direction = np.zeros_like(inds)
for p in range(inds.shape[0]):
ind = inds[p]
if ind != 0:
weight = water_weights[ind, :]
next_direction[p] = shared_tools.random_pick(weight)
else:
next_direction[p] = 4
return next_direction
@njit('int64[:](int64[:], int64[:], int64[:])')
def _calculate_new_inds(current_inds: np.ndarray, new_direction, ravel_walk):
"""Calculate the new location (current_inds) of parcels.
Use the information of the current parcel (`current_inds`) in conjunction
with the D8 direction the parcel needs to travel (`new_direction`) to
determine the new current_inds of each parcel.
In implementation, we use the flattened `ravel_walk` array, but the result
is identical to unraveling the index, adding `iwalk` and `jwalk` to the
location and then raveling the index back.
.. code::
ind_tuple = shared_tools.custom_unravel(ind, domain_shape)
new_ind = (ind_tuple[0] + jwalk[newd],
ind_tuple[1] + iwalk[newd])
new_inds[p] = shared_tools.custom_ravel(new_ind, domain_shape)
"""
# preallocate return array
new_inds = np.zeros_like(current_inds)
# loop through every parcel
for p in range(current_inds.shape[0]):
# extract current_ind and direction
ind = current_inds[p]
newd = new_direction[p]
# check if the parcel moves
if newd != 4:
# if moves, compute new ind for parcel
new_inds[p] = ind + ravel_walk[newd]
else:
# if not moves, set new ind to 0
# (should be only those already at 0)
new_inds[p] = 0
return new_inds
@njit
def _check_for_loops(free_surf_walk_inds: np.ndarray, new_inds, _step: int,
L0: int, CTR: int, stage_above_SL: np.ndarray):
"""Check for loops in water parcel pathways.
Look for looping random walks. I.e., this function checks for where a
parcel will return on its :obj:`new_inds` to somewhere it has already been
in :obj:`free_surf_walk_inds`. If the loop is found, the parcel is
relocated along the mean transport vector of the parcel, which is computed
as the vector from the cell `(0, CTR)` to the new location in `new_inds`.
This implementation of loop checking will relocate any parcel that has
looped, but only disqualifies a parcel `p` from contributing to the free
surface in :obj:`_accumulate_free_surf_walks` (i.e., `looped[p] == 1`) if
the stage at the looped location is above the sea level in the domain.
Parameters
----------
free_surf_walk_inds
Array recording the walk of parcels. Shape is `(:obj:`Np_water`,
...)`, where the second dimension will depend on the step number, but
records each step of the parcel. Each element in the array records the
*flat* index into the domain.
new_inds
Array recording the new index for each water parcel, if the step is
taken. Shape is `(Np_water, 1)`, with each element recording
the *flat* index into the domain shape.
_step
Step number of water parcels.
L0
Domain shape parameter, number of cells inlet length.
CTR
Domain shape parameter, index along inlet wall making the center of
the domain. I.e., `(0, CTR)` is the midpoint across the inlet, along
the inlet domain edge.
stage_above_SL
Water surface elevation minus the domain sea level.
Returns
-------
new_inds
An updated array of parcel indicies, where the index of a parcel has
been changed, if and only if, that parcel was looped.
looped
A binary integer array indicating whether a parcel was determined to
have been looped, and should be disqualified from the free surface
computation.
Examples
--------
The following shows an example of how water parcels that looped along
their paths would be relocated. Note than in this example, the parcels are
artificially forced to loop, just for the sake of demonstration.
.. plot:: water_tools/_check_for_loops.py
"""
nparcels = free_surf_walk_inds.shape[0]
domain_shape = stage_above_SL.shape
domain_min_x = domain_shape[0] - 2
domain_min_y = domain_shape[1] - 2
L0_ind_cut = ((L0) * domain_shape[1])-1
looped = np.zeros_like(new_inds)
stage_v_SL = np.abs(stage_above_SL) < 1e-1 # true if they are same
# if the _step number is larger than the inlet length
if (_step > L0):
# loop though every parcel walk
for p in np.arange(nparcels):
new_ind = new_inds[p] # the new index of the parcel
full_walk = free_surf_walk_inds[p, :] # the parcel's walk
nonz_walk = full_walk[full_walk > 0] # where non-zero
relv_walk = nonz_walk[nonz_walk > L0_ind_cut]
if (new_ind > 0):
# determine if has a repeat ind
has_repeat_ind = False
for _, iind in enumerate(relv_walk):
if iind == new_ind:
has_repeat_ind = True
break
if has_repeat_ind:
# handle when a loop is detected
px0, py0 = shared_tools.custom_unravel(
new_ind, domain_shape)
# compute a new location for the parcel along the
# mean-transport vector
Fx = px0 - 1
Fy = py0 - CTR
Fw = np.sqrt(Fx**2 + Fy**2)
# relocate the parcel along mean-transport vector
if Fw != 0:
px = px0 + int(np.round(Fx / Fw * 5.))
py = py0 + int(np.round(Fy / Fw * 5.))
# limit the new px and py to beyond the inlet, and
# away from domain edges
px = np.minimum(domain_min_x, np.maximum(px, L0))
py = np.minimum(domain_min_y, np.maximum(1, py))
# ravel the index for return
nind = shared_tools.custom_ravel((px, py), domain_shape)
new_inds[p] = nind
# only disqualify the parcel if it has not reached sea
# level by the time it loops
if not stage_v_SL[px0, py0]:
looped[p] = 1 # this parcel is looped
return new_inds, looped
@njit
def _update_dirQfield(qfield, dist, inds, astep, dirstep):
"""Update unit vector of water flux in x or y."""
for i, ii in enumerate(inds):
if astep[i]:
qfield[ii] += dirstep[i] / dist[i]
return qfield
@njit
def _update_absQfield(qfield, dist, inds, astep, Qp_water, dx):
"""Update norm of water flux vector."""
for i, ii in enumerate(inds):
if astep[i]:
qfield[ii] += Qp_water / dx / 2
return qfield
@njit
def _accumulate_free_surface_walks(free_surf_walk_inds: np.ndarray,
free_surf_flag: np.ndarray,
cell_type: np.ndarray, uw: np.ndarray,
ux: np.ndarray, uy: np.ndarray,
depth: np.ndarray,
| |
"""Specifies that no values are allowed for this parameter or quantity."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if NoValues.subclass:
return NoValues.subclass(*args_, **kwargs_)
else:
return NoValues(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='NoValues', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='NoValues')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='NoValues'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='NoValues', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NoValues'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class NoValues
class ValuesReference(GeneratedsSuper):
"""Reference to externally specified list of all the valid values
and/or ranges of values for this quantity. (Informative: This
element was simplified from the metaDataProperty element in GML
3.0.) Human-readable name of the list of values provided by the
referenced document. Can be empty string when this list has no
name."""
subclass = None
superclass = None
def __init__(self, reference=None, valueOf_=None):
self.reference = _cast(None, reference)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if ValuesReference.subclass:
return ValuesReference.subclass(*args_, **kwargs_)
else:
return ValuesReference(*args_, **kwargs_)
factory = staticmethod(factory)
def get_reference(self): return self.reference
def set_reference(self, reference): self.reference = reference
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='ValuesReference', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ValuesReference')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ValuesReference'):
if self.reference is not None and 'reference' not in already_processed:
already_processed.append('reference')
outfile.write(' reference=%s' % (self.gds_format_string(quote_attrib(self.reference).encode(ExternalEncoding), input_name='reference'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ValuesReference', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ValuesReference'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.reference is not None and 'reference' not in already_processed:
already_processed.append('reference')
showIndent(outfile, level)
outfile.write('reference = "%s",\n' % (self.reference,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reference', node)
if value is not None and 'reference' not in already_processed:
already_processed.append('reference')
self.reference = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ValuesReference
class AllowedValues(GeneratedsSuper):
"""List of all the valid values and/or ranges of values for this
quantity. For numeric quantities, signed values should be
ordered from negative infinity to positive infinity."""
subclass = None
superclass = None
def __init__(self, Value=None, Range=None):
if Value is None:
self.Value = []
else:
self.Value = Value
if Range is None:
self.Range = []
else:
self.Range = Range
def factory(*args_, **kwargs_):
if AllowedValues.subclass:
return AllowedValues.subclass(*args_, **kwargs_)
else:
return AllowedValues(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def add_Value(self, value): self.Value.append(value)
def insert_Value(self, index, value): self.Value[index] = value
def get_Range(self): return self.Range
def set_Range(self, Range): self.Range = Range
def add_Range(self, value): self.Range.append(value)
def insert_Range(self, index, value): self.Range[index] = value
def export(self, outfile, level, namespace_='', name_='AllowedValues', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='AllowedValues')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AllowedValues'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AllowedValues', fromsubclass_=False):
for Value_ in self.Value:
Value_.export(outfile, level, namespace_, name_='Value')
for Range_ in self.Range:
Range_.export(outfile, level, namespace_, name_='Range')
def hasContent_(self):
if (
self.Value or
self.Range
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AllowedValues'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Value=[\n')
level += 1
for Value_ in self.Value:
showIndent(outfile, level)
outfile.write('model_.Value(\n')
Value_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Range=[\n')
level += 1
for Range_ in self.Range:
showIndent(outfile, level)
outfile.write('model_.Range(\n')
Range_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Value':
obj_ = ValueType.factory()
obj_.build(child_)
self.Value.append(obj_)
elif nodeName_ == 'Range':
obj_ = RangeType.factory()
obj_.build(child_)
self.Range.append(obj_)
# end class AllowedValues
class ValueType(GeneratedsSuper):
"""A single value, encoded as a string. This type can be used for one
value, for a spacing between allowed values, or for the default
value of a parameter."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if ValueType.subclass:
return ValueType.subclass(*args_, **kwargs_)
else:
return ValueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='ValueType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ValueType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ValueType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ValueType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ValueType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ValueType
class RangeType(GeneratedsSuper):
"""A range of values of a numeric parameter. This range can be
continuous or discrete, defined by a fixed spacing between
adjacent valid values. If the MinimumValue or MaximumValue is
not included, there is no value limit in that direction.
Inclusion of the specified minimum and maximum values in the
range shall be defined by the rangeClosure. Shall be included
unless the default value applies."""
subclass = None
superclass = None
def __init__(self, rangeClosure=None, MinimumValue=None, MaximumValue=None, Spacing=None):
self.rangeClosure = _cast(None, rangeClosure)
self.MinimumValue = MinimumValue
self.MaximumValue = MaximumValue
self.Spacing = Spacing
def factory(*args_, **kwargs_):
if RangeType.subclass:
return RangeType.subclass(*args_, **kwargs_)
else:
return RangeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MinimumValue(self): return self.MinimumValue
def set_MinimumValue(self, MinimumValue): self.MinimumValue = MinimumValue
def get_MaximumValue(self): return self.MaximumValue
def set_MaximumValue(self, MaximumValue): self.MaximumValue = MaximumValue
def get_Spacing(self): return self.Spacing
def set_Spacing(self, Spacing): self.Spacing = Spacing
def get_rangeClosure(self): return self.rangeClosure
def set_rangeClosure(self, rangeClosure): self.rangeClosure = rangeClosure
def export(self, outfile, level, namespace_='', name_='RangeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='RangeType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RangeType'):
if self.rangeClosure is not None and 'rangeClosure' not in already_processed:
already_processed.append('rangeClosure')
outfile.write(' rangeClosure=%s' % (self.gds_format_string(quote_attrib(self.rangeClosure).encode(ExternalEncoding), input_name='rangeClosure'), ))
def exportChildren(self, outfile, level, namespace_='', name_='RangeType', fromsubclass_=False):
if self.MinimumValue:
self.MinimumValue.export(outfile, level, namespace_, name_='MinimumValue')
if self.MaximumValue:
self.MaximumValue.export(outfile, level, namespace_, name_='MaximumValue')
if self.Spacing:
self.Spacing.export(outfile, level, namespace_, name_='Spacing')
def hasContent_(self):
if (
self.MinimumValue is not None or
self.MaximumValue is not None or
self.Spacing | |
# coding: utf-8
"""
ELEMENTS API
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from elements_sdk.configuration import Configuration
class FilePartialUpdate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'files': 'list[BasicFile]',
'parent': 'str',
'mode': 'str',
'uid': 'int',
'gid': 'int',
'user': 'str',
'group': 'str',
'recursive': 'bool',
'affinity': 'str',
'mode_setuid': 'bool',
'mode_setgid': 'bool',
'mode_setvfx': 'bool',
'mode_user_read': 'bool',
'mode_user_write': 'bool',
'mode_user_execute': 'bool',
'mode_group_read': 'bool',
'mode_group_write': 'bool',
'mode_group_execute': 'bool',
'mode_others_read': 'bool',
'mode_others_write': 'bool',
'mode_others_execute': 'bool'
}
attribute_map = {
'name': 'name',
'files': 'files',
'parent': 'parent',
'mode': 'mode',
'uid': 'uid',
'gid': 'gid',
'user': 'user',
'group': 'group',
'recursive': 'recursive',
'affinity': 'affinity',
'mode_setuid': 'mode_setuid',
'mode_setgid': 'mode_setgid',
'mode_setvfx': 'mode_setvfx',
'mode_user_read': 'mode_user_read',
'mode_user_write': 'mode_user_write',
'mode_user_execute': 'mode_user_execute',
'mode_group_read': 'mode_group_read',
'mode_group_write': 'mode_group_write',
'mode_group_execute': 'mode_group_execute',
'mode_others_read': 'mode_others_read',
'mode_others_write': 'mode_others_write',
'mode_others_execute': 'mode_others_execute'
}
def __init__(self, name=None, files=None, parent=None, mode=None, uid=None, gid=None, user=None, group=None, recursive=None, affinity=None, mode_setuid=None, mode_setgid=None, mode_setvfx=None, mode_user_read=None, mode_user_write=None, mode_user_execute=None, mode_group_read=None, mode_group_write=None, mode_group_execute=None, mode_others_read=None, mode_others_write=None, mode_others_execute=None, local_vars_configuration=None): # noqa: E501
"""FilePartialUpdate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._files = None
self._parent = None
self._mode = None
self._uid = None
self._gid = None
self._user = None
self._group = None
self._recursive = None
self._affinity = None
self._mode_setuid = None
self._mode_setgid = None
self._mode_setvfx = None
self._mode_user_read = None
self._mode_user_write = None
self._mode_user_execute = None
self._mode_group_read = None
self._mode_group_write = None
self._mode_group_execute = None
self._mode_others_read = None
self._mode_others_write = None
self._mode_others_execute = None
self.discriminator = None
if name is not None:
self.name = name
self.files = files
if parent is not None:
self.parent = parent
if mode is not None:
self.mode = mode
if uid is not None:
self.uid = uid
if gid is not None:
self.gid = gid
if user is not None:
self.user = user
if group is not None:
self.group = group
if recursive is not None:
self.recursive = recursive
self.affinity = affinity
if mode_setuid is not None:
self.mode_setuid = mode_setuid
if mode_setgid is not None:
self.mode_setgid = mode_setgid
if mode_setvfx is not None:
self.mode_setvfx = mode_setvfx
if mode_user_read is not None:
self.mode_user_read = mode_user_read
if mode_user_write is not None:
self.mode_user_write = mode_user_write
if mode_user_execute is not None:
self.mode_user_execute = mode_user_execute
if mode_group_read is not None:
self.mode_group_read = mode_group_read
if mode_group_write is not None:
self.mode_group_write = mode_group_write
if mode_group_execute is not None:
self.mode_group_execute = mode_group_execute
if mode_others_read is not None:
self.mode_others_read = mode_others_read
if mode_others_write is not None:
self.mode_others_write = mode_others_write
if mode_others_execute is not None:
self.mode_others_execute = mode_others_execute
@property
def name(self):
"""Gets the name of this FilePartialUpdate. # noqa: E501
:return: The name of this FilePartialUpdate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FilePartialUpdate.
:param name: The name of this FilePartialUpdate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def files(self):
"""Gets the files of this FilePartialUpdate. # noqa: E501
:return: The files of this FilePartialUpdate. # noqa: E501
:rtype: list[BasicFile]
"""
return self._files
@files.setter
def files(self, files):
"""Sets the files of this FilePartialUpdate.
:param files: The files of this FilePartialUpdate. # noqa: E501
:type: list[BasicFile]
"""
self._files = files
@property
def parent(self):
"""Gets the parent of this FilePartialUpdate. # noqa: E501
:return: The parent of this FilePartialUpdate. # noqa: E501
:rtype: str
"""
return self._parent
@parent.setter
def parent(self, parent):
"""Sets the parent of this FilePartialUpdate.
:param parent: The parent of this FilePartialUpdate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
parent is not None and len(parent) < 1):
raise ValueError("Invalid value for `parent`, length must be greater than or equal to `1`") # noqa: E501
self._parent = parent
@property
def mode(self):
"""Gets the mode of this FilePartialUpdate. # noqa: E501
:return: The mode of this FilePartialUpdate. # noqa: E501
:rtype: str
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this FilePartialUpdate.
:param mode: The mode of this FilePartialUpdate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
mode is not None and len(mode) < 1):
raise ValueError("Invalid value for `mode`, length must be greater than or equal to `1`") # noqa: E501
self._mode = mode
@property
def uid(self):
"""Gets the uid of this FilePartialUpdate. # noqa: E501
:return: The uid of this FilePartialUpdate. # noqa: E501
:rtype: int
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this FilePartialUpdate.
:param uid: The uid of this FilePartialUpdate. # noqa: E501
:type: int
"""
self._uid = uid
@property
def gid(self):
"""Gets the gid of this FilePartialUpdate. # noqa: E501
:return: The gid of this FilePartialUpdate. # noqa: E501
:rtype: int
"""
return self._gid
@gid.setter
def gid(self, gid):
"""Sets the gid of this FilePartialUpdate.
:param gid: The gid of this FilePartialUpdate. # noqa: E501
:type: int
"""
self._gid = gid
@property
def user(self):
"""Gets the user of this FilePartialUpdate. # noqa: E501
:return: The user of this FilePartialUpdate. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this FilePartialUpdate.
:param user: The user of this FilePartialUpdate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
user is not None and len(user) < 1):
raise ValueError("Invalid value for `user`, length must be greater than or equal to `1`") # noqa: E501
self._user = user
@property
def group(self):
"""Gets the group of this FilePartialUpdate. # noqa: E501
:return: The group of this FilePartialUpdate. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this FilePartialUpdate.
:param group: The group of this FilePartialUpdate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
group is not None and len(group) < 1):
raise ValueError("Invalid value for `group`, length must be greater than or equal to `1`") # noqa: E501
self._group = group
@property
def recursive(self):
"""Gets the recursive of this FilePartialUpdate. # noqa: E501
:return: The recursive of this FilePartialUpdate. # noqa: E501
:rtype: bool
"""
return self._recursive
@recursive.setter
def recursive(self, recursive):
"""Sets the recursive of this FilePartialUpdate.
:param recursive: The recursive of this FilePartialUpdate. # noqa: E501
:type: bool
"""
self._recursive = recursive
@property
def affinity(self):
"""Gets the affinity of this FilePartialUpdate. # noqa: E501
:return: The affinity of this FilePartialUpdate. # noqa: E501
:rtype: str
"""
return self._affinity
@affinity.setter
def affinity(self, affinity):
"""Sets the affinity of this FilePartialUpdate.
:param affinity: The affinity of this FilePartialUpdate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
affinity is not None and len(affinity) < 1):
raise ValueError("Invalid value for `affinity`, length must be greater than or equal to `1`") # noqa: E501
self._affinity = affinity
@property
def mode_setuid(self):
"""Gets the mode_setuid of this FilePartialUpdate. # noqa: E501
:return: The mode_setuid of this FilePartialUpdate. # noqa: E501
:rtype: bool
"""
return self._mode_setuid
@mode_setuid.setter
def mode_setuid(self, mode_setuid):
"""Sets the mode_setuid of this FilePartialUpdate.
:param mode_setuid: The mode_setuid of this FilePartialUpdate. # noqa: E501
:type: bool
"""
self._mode_setuid = mode_setuid
@property
def mode_setgid(self):
"""Gets the mode_setgid of this FilePartialUpdate. # noqa: E501
:return: The mode_setgid of this FilePartialUpdate. # noqa: E501
:rtype: bool
"""
return self._mode_setgid
@mode_setgid.setter
def mode_setgid(self, mode_setgid):
"""Sets the mode_setgid of this FilePartialUpdate.
:param mode_setgid: The mode_setgid of this FilePartialUpdate. # noqa: E501
:type: bool
"""
self._mode_setgid = mode_setgid
@property
def mode_setvfx(self):
"""Gets the mode_setvfx of this FilePartialUpdate. # noqa: E501
:return: The mode_setvfx of this FilePartialUpdate. # noqa: E501
:rtype: bool
"""
return self._mode_setvfx
@mode_setvfx.setter
def mode_setvfx(self, mode_setvfx):
"""Sets the mode_setvfx of this | |
xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x0c\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpneq_oqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x0d\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpgepd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x0e\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpgtpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x0f\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmptruepd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x10\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpeq_ospd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmplt_oqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x12\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmple_oqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x13\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpunord_spd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x14\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpneq_uspd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x15\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpnlt_uqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x16\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpnle_uqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x17\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpord_spd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x18\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpeq_uspd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x19\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpnge_uqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x1a\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpngt_uqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x1b\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpfalse_ospd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x1c\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpneq_ospd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x1d\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpge_oqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x1e\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpgt_oqpd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x81\xc2\x00\x1f\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 128)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmptrue_uspd xmm8, xmm15, xmmword ptr [r8]')
Buffer = b'\xc4\x01\x05\xc2\x00\x1f\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + AVX_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 256)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + AVX_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 256)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 256)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmptrue_uspd ymm8, ymm15, ymmword ptr [r8]')
def test_cmpps(self):
Buffer = b'\x0F\xC2\x00\x00\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG0)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, | |
# Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from copy import deepcopy
from numbers import Number
from typing import Any, Dict, Iterator, Iterable, Mapping, Optional, Sequence, Tuple, Union
from typing import TYPE_CHECKING
import numpy as np
try:
from numpy.typing import ArrayLike, DTypeLike
except ImportError:
ArrayLike = Any
DTypeLike = Any
from dimod.decorators import forwarding_method
from dimod.quadratic.cyqm import cyQM_float32, cyQM_float64
from dimod.typing import Variable, Bias, VartypeLike
from dimod.variables import Variables
from dimod.vartypes import Vartype
from dimod.views.quadratic import QuadraticViewsMixin
if TYPE_CHECKING:
# avoid circular imports
from dimod import BinaryQuadraticModel
__all__ = ['QuadraticModel', 'QM', 'Integer']
Vartypes = Union[Mapping[Variable, Vartype], Iterable[Tuple[Variable, VartypeLike]]]
class QuadraticModel(QuadraticViewsMixin):
_DATA_CLASSES = {
np.dtype(np.float32): cyQM_float32,
np.dtype(np.float64): cyQM_float64,
}
DEFAULT_DTYPE = np.float64
"""The default dtype used to construct the class."""
def __init__(self,
linear: Optional[Mapping[Variable, Bias]] = None,
quadratic: Optional[Mapping[Tuple[Variable, Variable], Bias]] = None,
offset: Bias = 0,
vartypes: Optional[Vartypes] = None,
*,
dtype: Optional[DTypeLike] = None):
dtype = np.dtype(self.DEFAULT_DTYPE) if dtype is None else np.dtype(dtype)
self.data = self._DATA_CLASSES[np.dtype(dtype)]()
if vartypes is not None:
if isinstance(vartypes, Mapping):
vartypes = vartypes.items()
for v, vartype in vartypes:
self.add_variable(vartype, v)
self.set_linear(v, 0)
# todo: in the future we can support more types for construction, but
# let's keep it simple for now
if linear is not None:
for v, bias in linear.items():
self.add_linear(v, bias)
if quadratic is not None:
for (u, v), bias in quadratic.items():
self.add_quadratic(u, v, bias)
self.offset += offset
def __deepcopy__(self, memo: Dict[int, Any]) -> 'QuadraticModel':
new = type(self).__new__(type(self))
new.data = deepcopy(self.data, memo)
memo[id(self)] = new
return new
def __repr__(self):
vartypes = {v: self.vartype(v).name for v in self.variables}
return (f"{type(self).__name__}({self.linear}, {self.quadratic}, "
f"{self.offset}, {vartypes}, dtype={self.dtype.name!r})")
def __add__(self, other: Union['QuadraticModel', Bias]) -> 'QuadraticModel':
# in python 3.8+ we could do this is functools.singledispatchmethod
if isinstance(other, QuadraticModel):
new = self.copy()
new.update(other)
return new
if isinstance(other, Number):
new = self.copy()
new.offset += other
return new
return NotImplemented
def __iadd__(self, other: Union['QuadraticModel', Bias]) -> 'QuadraticModel':
# in python 3.8+ we could do this is functools.singledispatchmethod
if isinstance(other, QuadraticModel):
self.update(other)
return self
if isinstance(other, Number):
self.offset += other
return self
return NotImplemented
def __radd__(self, other: Bias) -> 'QuadraticModel':
# should only miss on number
if isinstance(other, Number):
new = self.copy()
new.offset += other
return new
return NotImplemented
def __mul__(self, other: Union['QuadraticModel', Bias]) -> 'QuadraticModel':
if isinstance(other, QuadraticModel):
if not (self.is_linear() and other.is_linear()):
raise TypeError(
"cannot multiply QMs with interactions")
# todo: performance
new = type(self)(dtype=self.dtype)
for v in self.variables:
new.add_variable(self.vartype(v), v)
for v in other.variables:
new.add_variable(other.vartype(v), v)
self_offset = self.offset
other_offset = other.offset
for u, ubias in self.linear.items():
for v, vbias in other.linear.items():
if u == v:
u_vartype = self.vartype(u)
if u_vartype is Vartype.BINARY:
new.add_linear(u, ubias*vbias)
elif u_vartype is Vartype.SPIN:
new.offset += ubias * vbias
elif u_vartype is Vartype.INTEGER:
new.add_quadratic(u, v, ubias*vbias)
else:
raise RuntimeError("unexpected vartype")
else:
new.add_quadratic(u, v, ubias * vbias)
new.add_linear(u, ubias * other_offset)
for v, bias in other.linear.items():
new.add_linear(v, bias*self_offset)
return new
if isinstance(other, Number):
new = self.copy()
new.scale(other)
return new
return NotImplemented
def __imul__(self, other: Bias) -> 'QuadraticModel':
# in-place multiplication is only defined for numbers
if isinstance(other, Number):
raise NotImplementedError
return NotImplemented
def __rmul__(self, other: Bias) -> 'QuadraticModel':
# should only miss on number
if isinstance(other, Number):
return self * other # communative
return NotImplemented
def __neg__(self: 'QuadraticModel') -> 'QuadraticModel':
new = self.copy()
new.scale(-1)
return new
def __sub__(self, other: Union['QuadraticModel', Bias]) -> 'QuadraticModel':
if isinstance(other, QuadraticModel):
new = self.copy()
new.scale(-1)
new.update(other)
new.scale(-1)
return new
if isinstance(other, Number):
new = self.copy()
new.offset -= other
return new
return NotImplemented
def __isub__(self, other: Union['QuadraticModel', Bias]) -> 'QuadraticModel':
if isinstance(other, QuadraticModel):
self.scale(-1)
self.update(other)
self.scale(-1)
return self
if isinstance(other, Number):
self.offset -= other
return self
return NotImplemented
def __rsub__(self, other: Bias) -> 'QuadraticModel':
# should only miss on a number
if isinstance(other, Number):
new = self.copy()
new.scale(-1)
new += other
return new
return NotImplemented
@property
def dtype(self) -> np.dtype:
"""Data-type of the model's biases."""
return self.data.dtype
@property
def num_interactions(self) -> int:
"""Number of interactions in the model.
The complexity is linear in the number of variables.
"""
return self.data.num_interactions()
@property
def num_variables(self) -> int:
"""Number of variables in the model."""
return self.data.num_variables()
@property
def offset(self) -> np.number:
"""Constant energy offset associated with the model."""
return self.data.offset
@offset.setter
def offset(self, offset):
self.data.offset = offset
@property
def shape(self) -> Tuple[int, int]:
"""A 2-tuple of :attr:`num_variables` and :attr:`num_interactions`."""
return self.num_variables, self.num_interactions
@property
def variables(self) -> Variables:
"""The variables of the quadratic model"""
return self.data.variables
@forwarding_method
def add_linear(self, v: Variable, bias: Bias):
"""Add a quadratic term."""
return self.data.add_linear
@forwarding_method
def add_quadratic(self, u: Variable, v: Variable, bias: Bias):
return self.data.add_quadratic
@forwarding_method
def add_variable(self, vartype: VartypeLike,
v: Optional[Variable] = None, bias: Bias = 0) -> Variable:
"""Add a quadratic term."""
return self.data.add_variable
def copy(self):
"""Return a copy."""
return deepcopy(self)
@forwarding_method
def degree(self, v: Variable) -> int:
"""Return the degree of variable `v`.
The degree is the number of interactions that contain `v`.
"""
return self.data.degree
@classmethod
def from_bqm(cls, bqm: 'BinaryQuadraticModel') -> 'QuadraticModel':
obj = cls(dtype=bqm.dtype)
# this can be improved a great deal with c++, but for now let's use
# the python fallback for everything
for v in bqm.variables:
obj.set_linear(obj.add_variable(bqm.vartype, v), bqm.get_linear(v))
for u, v, bias in bqm.iter_quadratic():
obj.set_quadratic(u, v, bias)
obj.offset = bqm.offset
return obj
@forwarding_method
def get_linear(self, v: Variable) -> Bias:
"""Get the linear bias of `v`."""
return self.data.get_linear
@forwarding_method
def get_quadratic(self, u: Variable, v: Variable,
default: Optional[Bias] = None) -> Bias:
return self.data.get_quadratic
def is_equal(self, other):
"""Return True if the given model has the same variables, vartypes and biases."""
if isinstance(other, Number):
return not self.num_variables and self.offset == other
# todo: performance
try:
return (self.shape == other.shape # redundant, fast to check
and self.offset == other.offset
and self.linear == other.linear
and all(self.vartype(v) == other.vartype(v) for v in self.variables)
and self.adj == other.adj)
except AttributeError:
return False
def is_linear(self) -> bool:
"""Return True if the model has no quadratic interactions."""
return self.data.is_linear()
@forwarding_method
def iter_neighborhood(self, v: Variable) -> Iterator[Tuple[Variable, Bias]]:
"""Iterate over the neighbors and quadratic biases of a variable."""
return self.data.iter_neighborhood
@forwarding_method
def iter_quadratic(self) -> Iterator[Tuple[Variable, Variable, Bias]]:
return self.data.iter_quadratic
@forwarding_method
def reduce_linear(self, function: Callable,
initializer: Optional[Bias] = None) -> Any:
"""Apply function of two arguments cumulatively to the linear biases.
"""
return self.data.reduce_linear
@forwarding_method
def reduce_neighborhood(self, v: Variable, function: Callable,
initializer: Optional[Bias] = None) -> Any:
"""Apply function of two arguments cumulatively to the quadratic biases
associated with a single variable.
"""
return self.data.reduce_neighborhood
@forwarding_method
def reduce_quadratic(self, function: Callable,
initializer: Optional[Bias] = None) -> Any:
"""Apply function of two arguments cumulatively to the quadratic
biases.
"""
return self.data.reduce_quadratic
def remove_interaction(self, u: Variable, v: Variable):
# This is needed for the views, but I am not sure how often users are
# removing variables/interactions. For now let's leave it here so
# we satisfy the ABC and see if it comes up. If not, in the future we
# can consider removing __delitem__ from the various views.
raise NotImplementedError("not yet implemented - please open a feature request")
def remove_variable(self, v: Optional[Variable] = None) -> Variable:
# see note in remove_interaction
raise NotImplementedError("not yet implemented - please open a feature request")
@forwarding_method
def scale(self, scalar: Bias):
return self.data.scale
@forwarding_method
def set_linear(self, v: Variable, bias: Bias):
"""Set the linear bias of `v`.
Raises:
TypeError: If `v` is not hashable.
"""
return self.data.set_linear
@forwarding_method
def set_quadratic(self, u: Variable, v: Variable, bias: Bias):
"""Set the quadratic bias of `(u, v)`.
Raises:
TypeError: If `u` or `v` is not hashable.
"""
return self.data.set_quadratic
def update(self, other: 'QuadraticModel'):
# this can be improved a | |
<reponame>PatrickKoss/BettingPrediction<filename>BettingRestAPI/csgo_api/views.py<gh_stars>0
import json
from datetime import datetime, timedelta
from threading import Thread
import numpy as np
from django.conf import settings
from django.db import connection
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from BettingRestAPI.Helper.CSGOHelper import check_authorization
from BettingRestAPI.Serializer.CSGOSerializer import MatchSerializer, TeamSerializer, TeamsPredictionSerializer
from BettingRestAPI.Serializer.Encoder import ComplexEncoder
from BettingRestAPI.utils.Message import Message
from csgo_api.models import Match, Team
class GetUpcomingMatches(APIView):
def get(self, request):
"""return all upcoming matches. Matches need to start later than now"""
check, json_message, response_status = check_authorization(request)
if check:
message = Message("success", f"CSGO Upcoming Matches")
upcoming_matches = Match.objects.filter(
date__range=(datetime.now(), datetime.now() + timedelta(days=15))).order_by("date")
upcoming_matches = MatchSerializer(upcoming_matches, context={'request': request}, many=True).data
for match in upcoming_matches:
match.update({
"nnPickedTeam": match["Team_1"]["name"] if match["team_1_confidence"] >= match["team_2_confidence"]
else match["Team_2"]["name"]})
match.update({
"svmPickedTeam": match["Team_1"]["name"] if float(match["prediction_svm"]) == 0 else
match["Team_2"][
"name"]})
json_rep = json.dumps({'message': message.repr_json(), 'upcoming_matches': upcoming_matches},
cls=ComplexEncoder)
json_rep = json.loads(json_rep)
return Response(json_rep, status=response_status)
else:
return Response(json_message, status=response_status)
class GetMatchResult(APIView):
def get(self, request):
"""return the result of the matches"""
check, json_message, response_status = check_authorization(request)
if check:
with connection.cursor() as cursor:
cursor.execute(f"select m.Team_1_id, m.Team_2_id, DATE(m.date) as date , m.team_1_win, m.team_2_win, "
f"m.odds_team_1, m.odds_team_2, m.team_1_confidence, m.team_2_confidence, m.prediction_svm, "
f"m.name as Team1, t.name as Team2, m.mode, "
f"CASE m.team_1_confidence >= m.team_2_confidence WHEN TRUE THEN m.name ELSE t.name END AS nnPickedTeam, "
f"CASE m.prediction_svm = 0 WHEN TRUE THEN m.name ELSE t.name END AS svmPickedTeam, "
f"CASE m.team_1_win = 1 WHEN TRUE THEN m.name ELSE t.name END AS winningTeam "
f"FROM (SELECT name, id FROM csgo_api_team) as t INNER JOIN ("
f"select m.Team_1_id, m.Team_2_id, m.date, m.team_1_win, m.team_2_win, "
f"m.odds_team_1, m.odds_team_2, m.team_1_confidence, m.team_2_confidence, m.prediction_svm, t.name, m.mode "
f"FROM (SELECT name, id FROM csgo_api_team) as t INNER JOIN ("
f"select m.Team_1_id, m.Team_2_id, m.date, mr.team_1_win, mr.team_2_win, "
f"m.odds_team_1, m.odds_team_2, m.team_1_confidence, m.team_2_confidence, m.prediction_svm, m.mode "
f"From csgo_api_matchResult as mr "
f"INNER JOIN csgo_api_match as m "
f"ON mr.Team_1_id = m.Team_1_id AND mr.Team_2_id = m.Team_2_id AND m.date = mr.date"
f") as m "
f"ON t.id = m.Team_1_id"
f") as m "
f"ON t.id = m.Team_2_id")
columns = [col[0] for col in cursor.description]
res = [dict(zip(columns, row)) for row in cursor.fetchall()]
return Response({'message': {"message": "fine", "messageType": "success"}, 'matchResult': res},
status=response_status)
else:
return Response(json_message, status=response_status)
class GetMatchResultStats(APIView):
def get(self, request):
"""return json of stats that the model produces"""
check, json_message, response_status = check_authorization(request)
if check:
result_list = []
# multi threading is about 4 times faster than single threaded
threads = [None] * 9
results = [None] * 54
threads_all = [None] * 9
results_all = [None] * 18
index_threads = 0
index_results = 0
index_threads_all = 0
index_results_all = 0
for odd in np.arange(1, 1.9, 0.1):
odd = round(odd, 2)
threads[index_threads] = Thread(target=self.get_query_dict_groups, args=(odd, results, index_results))
threads[index_threads].start()
index_threads += 1
index_results += 6
threads_all[index_threads_all] = Thread(target=self.get_query_dict_all,
args=(odd, results_all, index_results_all))
threads_all[index_threads_all].start()
index_threads_all += 1
index_results_all += 2
for i in range(len(threads)):
threads[i].join()
for i in range(len(threads_all)):
threads_all[i].join()
results = results + results_all
if None not in results:
results = sorted(results, key=lambda k: k["odds"])
return Response({'message': {"message": "fine", "messageType": "success"}, 'stats': results},
status=response_status)
else:
return Response(json_message, status=response_status)
def get_query_dict_groups(self, odd, results, index):
with connection.cursor() as cursor:
cursor.execute("select COUNT(id) as sampleSize, round(SUM(NN_Money)/COUNT(id),2) as roi_nn, "
"round(SUM(SVM_Money)/COUNT(id),2) as roi_svm, mode, "
"round(SUM(odds)/COUNT(id),2) as average_odds, "
"round(SUM(CASE NN_Money > 0 WHEN TRUE THEN NN_Money END)/COUNT(CASE NN_Money > 0 WHEN TRUE THEN NN_Money END)+1,2) as nn_winning_odds, "
"round(SUM(CASE SVM_Money > 0 WHEN TRUE THEN SVM_Money END)/COUNT(CASE SVM_Money > 0 WHEN TRUE THEN SVM_Money END)+1,2) as svm_winning_odds, "
"round(CAST(COUNT(CASE NN_Money > 0 WHEN TRUE THEN NN_Money END) as double)/CAST(COUNT(id) as double),2) as nn_accuracy, "
"round(CAST(COUNT(CASE SVM_Money > 0 WHEN TRUE THEN SVM_Money END) as double)/CAST(COUNT(id) as double),2) as svm_accuracy "
"FROM ("
f"select mr.id as id, m.mode as mode, "
f"CASE "
f"WHEN mr.team_1_win = 1 AND m.team_1_confidence >= m.team_2_confidence THEN m.odds_team_1 - 1 "
f"WHEN mr.team_2_win = 1 AND m.team_1_confidence < m.team_2_confidence THEN m.odds_team_2 - 1 "
f"ELSE -1 END AS NN_Money,"
f"CASE "
f"WHEN mr.team_1_win = 1 AND m.prediction_svm = 0 THEN m.odds_team_1 - 1 "
f"WHEN mr.team_2_win = 1 AND m.prediction_svm = 1 THEN m.odds_team_2 - 1 "
f"ELSE -1 END AS SVM_Money, "
f"CASE "
f"WHEN mr.team_1_win = 1 THEN m.odds_team_1 "
f"WHEN mr.team_2_win = 1 THEN m.odds_team_2 "
f"ELSE 1 END AS odds "
f"From csgo_api_matchResult as mr "
f"INNER JOIN csgo_api_match as m "
f"ON mr.Team_1_id = m.Team_1_id AND mr.Team_2_id = m.Team_2_id AND m.date = mr.date "
f"WHERE m.odds_team_1 >= %s and m.odds_team_2 >= %s) "
f"GROUP BY mode", [odd, odd])
columns = [col[0] for col in cursor.description]
res = [dict(zip(columns, row)) for row in cursor.fetchall()]
for r in res:
results[index] = {'accuracy': r["svm_accuracy"], 'roi': r["roi_svm"], 'sampleSize': r["sampleSize"],
'averageOdds': r["average_odds"], 'svm': "SVM",
'mode': r["mode"], 'odds': odd, 'average_winning_odds': r["svm_winning_odds"]}
results[index + 1] = {'accuracy': r["nn_accuracy"], 'roi': r["roi_nn"], 'sampleSize': r["sampleSize"],
'averageOdds': r["average_odds"], 'svm': "NN",
'mode': r["mode"], 'odds': odd, 'average_winning_odds': r["nn_winning_odds"]}
index += 2
def get_query_dict_all(self, odd, results, index):
with connection.cursor() as cursor:
cursor.execute("select COUNT(id) as sampleSize, round(SUM(NN_Money)/COUNT(id),2) as roi_nn, "
"round(SUM(SVM_Money)/COUNT(id),2) as roi_svm, mode, "
"round(SUM(odds)/COUNT(id),2) as average_odds, "
"round(SUM(CASE NN_Money > 0 WHEN TRUE THEN NN_Money END)/COUNT(CASE NN_Money > 0 WHEN TRUE THEN NN_Money END)+1,2) as nn_winning_odds, "
"round(SUM(CASE SVM_Money > 0 WHEN TRUE THEN SVM_Money END)/COUNT(CASE SVM_Money > 0 WHEN TRUE THEN SVM_Money END)+1,2) as svm_winning_odds, "
"round(CAST(COUNT(CASE NN_Money > 0 WHEN TRUE THEN NN_Money END) as double)/CAST(COUNT(id) as double),2) as nn_accuracy, "
"round(CAST(COUNT(CASE SVM_Money > 0 WHEN TRUE THEN SVM_Money END) as double)/CAST(COUNT(id) as double),2) as svm_accuracy "
"FROM ("
f"select mr.id as id, m.mode as mode, "
f"CASE "
f"WHEN mr.team_1_win = 1 AND m.team_1_confidence >= m.team_2_confidence THEN m.odds_team_1 - 1 "
f"WHEN mr.team_2_win = 1 AND m.team_1_confidence < m.team_2_confidence THEN m.odds_team_2 - 1 "
f"ELSE -1 END AS NN_Money,"
f"CASE "
f"WHEN mr.team_1_win = 1 AND m.prediction_svm = 0 THEN m.odds_team_1 - 1 "
f"WHEN mr.team_2_win = 1 AND m.prediction_svm = 1 THEN m.odds_team_2 - 1 "
f"ELSE -1 END AS SVM_Money, "
f"CASE "
f"WHEN mr.team_1_win = 1 THEN m.odds_team_1 "
f"WHEN mr.team_2_win = 1 THEN m.odds_team_2 "
f"ELSE 1 END AS odds "
f"From csgo_api_matchResult as mr "
f"INNER JOIN csgo_api_match as m "
f"ON mr.Team_1_id = m.Team_1_id AND mr.Team_2_id = m.Team_2_id AND m.date = mr.date "
f"WHERE m.odds_team_1 >= %s and m.odds_team_2 >= %s) "
f"", [odd, odd])
columns = [col[0] for col in cursor.description]
res = [dict(zip(columns, row)) for row in cursor.fetchall()]
for r in res:
results[index] = {'accuracy': r["svm_accuracy"], 'roi': r["roi_svm"], 'sampleSize': r["sampleSize"],
'averageOdds': r["average_odds"], 'svm': "SVM",
'mode': "all games", 'odds': odd, 'average_winning_odds': r["svm_winning_odds"]}
results[index + 1] = {'accuracy': r["nn_accuracy"], 'roi': r["roi_nn"], 'sampleSize': r["sampleSize"],
'averageOdds': r["average_odds"], 'svm': "NN",
'mode': "all games", 'odds': odd, 'average_winning_odds': r["nn_winning_odds"]}
index += 2
class GetTeam(APIView):
def get(self, request, id):
"""return the team given by an id"""
check, json_message, response_status = check_authorization(request)
if check:
message = Message("success", f"Here is the Team")
team = Team.objects.filter(id=id)
if not team.exists():
message = Message("error", f"No team found")
json_rep = json.dumps({'message': message.repr_json()},
cls=ComplexEncoder)
return Response(json_rep, status=response_status)
team = team.first()
team = TeamSerializer(team, context={'request': request}).data
json_rep = json.dumps({'message': message.repr_json(), 'team': team},
cls=ComplexEncoder)
json_rep = json.loads(json_rep)
return Response(json_rep, status=response_status)
else:
return Response(json_message, status=response_status)
class GetTeams(APIView):
def get(self, request):
"""return all teams"""
check, json_message, response_status = check_authorization(request)
if check:
message = Message("success", f"Here are the teams")
teams = Team.objects.all().order_by("end_date")
team_set = list({team.name: team for team in teams}.values())
team_set.sort(key=lambda x: x.name)
teams = TeamsPredictionSerializer(team_set, context={'request': request}, many=True).data
json_rep = json.dumps({'message': message.repr_json(), 'teams': teams},
cls=ComplexEncoder)
json_rep = json.loads(json_rep)
return Response(json_rep, status=response_status)
else:
return Response(json_message, status=response_status)
class CreatePrediction(APIView):
def post(self, request):
"""make a prediction on two given teams"""
check, json_message, response_status = check_authorization(request)
if check:
data = json.loads(request.body or "{}")
# check if the teams are correct
check_teams, response = self.check_teams(data)
if not check_teams:
return response
# check if the players are correct
check_player_team_1, response = self.check_players_in_team(data, "team_1")
if not check_player_team_1:
return response
check_player_team_2, response = self.check_players_in_team(data, "team_1")
if not check_player_team_2:
return response
# check if a single player is correct
check_players, response = self.check_players(data)
if not check_players:
return response
# check if the values are correct
| |
self.dummy_logger, verify=False)
self.assertEqual(self.processed_prometheus_data_example, actual_output)
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_does_not_change_last_prom_sourced_used_if_online(
self, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.return_value = \
self.processed_prometheus_data_example
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
self.test_monitor._get_prometheus_data()
self.assertEqual(old_last_prometheus_source_used,
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
(IncompleteRead, IncompleteRead('test'),),
(ChunkedEncodingError, ChunkedEncodingError('test'),),
(ProtocolError, ProtocolError('test'),),
(InvalidURL, InvalidURL('test'),),
(InvalidSchema, InvalidSchema('test'),),
(MissingSchema, MissingSchema('test'),),
(MetricNotFoundException, MetricNotFoundException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_raises_non_conn_err_if_last_source_used_on_and_errs(
self, exception_class, exception_instance,
mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.side_effect = exception_instance
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
self.assertRaises(exception_class,
self.test_monitor._get_prometheus_data)
mock_get_prometheus_metrics_data.assert_called_once_with(
old_last_prometheus_source_used, self.prometheus_metrics,
self.dummy_logger, verify=False)
@parameterized.expand([
(IncompleteRead, IncompleteRead('test'),),
(ChunkedEncodingError, ChunkedEncodingError('test'),),
(ProtocolError, ProtocolError('test'),),
(InvalidURL, InvalidURL('test'),),
(InvalidSchema, InvalidSchema('test'),),
(MissingSchema, MissingSchema('test'),),
(MetricNotFoundException, MetricNotFoundException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_no_change_last_source_used_if_online_and_it_errors(
self, exception_class, exception_instance,
mock_get_prometheus_metrics_data) -> None:
# Here we are assuming that the error is not connection related
mock_get_prometheus_metrics_data.side_effect = exception_instance
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
try:
self.test_monitor._get_data()
except exception_class:
pass
self.assertEqual(old_last_prometheus_source_used,
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
(ReadTimeout('test'),),
(ReqConnectionError('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_gets_data_from_online_source_if_last_source_used_off(
self, exception_instance, mock_get_prometheus_metrics_data) -> None:
# In this case we are setting the final source to be online
mock_get_prometheus_metrics_data.side_effect = [
exception_instance, exception_instance, exception_instance,
self.processed_prometheus_data_example]
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
actual_output = self.test_monitor._get_prometheus_data()
actual_calls = mock_get_prometheus_metrics_data.call_args_list
self.assertEqual(4, len(actual_calls))
# In this case there are two calls to
# self.test_monitor.node_config._node_prometheus_urls[0] because
# initially this url was also the last prometheus source used.
expected_calls = [call(old_last_prometheus_source_used,
self.prometheus_metrics, self.dummy_logger,
verify=False)]
for i in range(0, len(self.node_prometheus_urls)):
expected_calls.append(call(
self.test_monitor.node_config.node_prometheus_urls[i],
self.prometheus_metrics, self.dummy_logger, verify=False))
self.assertEqual(expected_calls, actual_calls)
self.assertEqual(self.processed_prometheus_data_example, actual_output)
@parameterized.expand([
(ReadTimeout('test'),),
(ReqConnectionError('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_changes_last_source_if_last_source_off_other_node_on(
self, exception_instance, mock_get_prometheus_metrics_data) -> None:
# In this case we are setting the final source to be online
mock_get_prometheus_metrics_data.side_effect = [
exception_instance, exception_instance, exception_instance,
self.processed_prometheus_data_example]
self.test_monitor._get_prometheus_data()
self.assertEqual(self.test_monitor.node_config.node_prometheus_urls[-1],
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
(IncompleteRead, IncompleteRead('test'),),
(ChunkedEncodingError, ChunkedEncodingError('test'),),
(ProtocolError, ProtocolError('test'),),
(InvalidURL, InvalidURL('test'),),
(InvalidSchema, InvalidSchema('test'),),
(MissingSchema, MissingSchema('test'),),
(MetricNotFoundException, MetricNotFoundException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_raises_non_connection_err_if_online_source_errors(
self, exception_class, exception_instance,
mock_get_prometheus_metrics_data) -> None:
# Here we will assume that the last prometheus source used was deemed as
# offline as we have already tested the online case in a previous test.
# We will also assume that the second source is online but it errors.
mock_get_prometheus_metrics_data.side_effect = [
ReqConnectionError('test'), ReqConnectionError('test'),
exception_instance]
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
self.assertRaises(exception_class,
self.test_monitor._get_prometheus_data)
actual_calls = mock_get_prometheus_metrics_data.call_args_list
self.assertEqual(3, len(actual_calls))
self.assertEqual([
call(old_last_prometheus_source_used, self.prometheus_metrics,
self.dummy_logger, verify=False),
call(self.test_monitor.node_config._node_prometheus_urls[0],
self.prometheus_metrics, self.dummy_logger, verify=False),
call(self.test_monitor.node_config._node_prometheus_urls[1],
self.prometheus_metrics, self.dummy_logger, verify=False)],
actual_calls)
@parameterized.expand([
(IncompleteRead, IncompleteRead('test'),),
(ChunkedEncodingError, ChunkedEncodingError('test'),),
(ProtocolError, ProtocolError('test'),),
(InvalidURL, InvalidURL('test'),),
(InvalidSchema, InvalidSchema('test'),),
(MissingSchema, MissingSchema('test'),),
(MetricNotFoundException, MetricNotFoundException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_changes_last_prom_source_used_if_online_source_errs(
self, exception_class, exception_instance,
mock_get_prometheus_metrics_data) -> None:
# Here we will assume that the last prometheus source used was deemed as
# offline as we have already tested when it is online in a previous
# test. We will also assume that the second source is online but it
# errors.
mock_get_prometheus_metrics_data.side_effect = [
ReqConnectionError('test'), ReqConnectionError('test'),
exception_instance]
try:
self.test_monitor._get_prometheus_data()
except exception_class:
pass
self.assertEqual(self.test_monitor.node_config.node_prometheus_urls[1],
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
(ReadTimeout('test'),),
(ReqConnectionError('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_raises_NodeIsDownException_if_all_prom_sources_down(
self, exception_instance, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.side_effect = [
exception_instance, exception_instance, exception_instance,
exception_instance]
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
self.assertRaises(NodeIsDownException,
self.test_monitor._get_prometheus_data)
actual_calls = mock_get_prometheus_metrics_data.call_args_list
self.assertEqual(4, len(actual_calls))
# In this case there are two calls to
# self.test_monitor.node_config._node_prometheus_urls[0] because
# initially this url was also the last prometheus source used.
expected_calls = [call(old_last_prometheus_source_used,
self.prometheus_metrics, self.dummy_logger,
verify=False)]
for i in range(0, len(self.node_prometheus_urls)):
expected_calls.append(call(
self.test_monitor.node_config.node_prometheus_urls[i],
self.prometheus_metrics, self.dummy_logger, verify=False))
self.assertEqual(expected_calls, actual_calls)
@parameterized.expand([
(ReadTimeout('test'),),
(ReqConnectionError('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_does_not_change_last_prom_source_used_if_all_down(
self, exception_instance, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.side_effect = [
exception_instance, exception_instance, exception_instance,
exception_instance]
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
try:
self.test_monitor._get_prometheus_data()
except NodeIsDownException:
pass
self.assertEqual(old_last_prometheus_source_used,
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
('self.received_retrieval_info_prometheus_disabled', [], False,),
('self.received_retrieval_info_all_source_types_enabled',
['self.retrieved_prometheus_data_example'], True,),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_data_return_if_no_errors_raised(
self, expected_return, retrieved_prometheus_data,
monitor_prometheus, mock_get_prometheus_metrics_data) -> None:
get_prometheus_metrics_data_return = list(map(
eval, retrieved_prometheus_data))
mock_get_prometheus_metrics_data.side_effect = \
get_prometheus_metrics_data_return
self.test_monitor._node_config._monitor_prometheus = monitor_prometheus
actual_ret = self.test_monitor._get_data()
expected_ret = eval(expected_return)
self.assertEqual(expected_ret, actual_ret)
@parameterized.expand([
("IncompleteRead('test')",
"DataReadingException(self.test_monitor.monitor_name, "
"self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("ChunkedEncodingError('test')",
"DataReadingException(self.test_monitor.monitor_name, "
"self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("ProtocolError('test')",
"DataReadingException(self.test_monitor.monitor_name, "
"self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("InvalidURL('test')",
"InvalidUrlException(self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("InvalidSchema('test')",
"InvalidUrlException(self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("MissingSchema('test')",
"InvalidUrlException(self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("MetricNotFoundException('test_metric', 'test_endpoint')",
"MetricNotFoundException('test_metric', 'test_endpoint')",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
('NodeIsDownException(self.node_name)',
'NodeIsDownException(self.node_name)',
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_data_return_if_recognised_error_raised(
self, raised_err, returned_err, expected_return, monitor_prometheus,
errored_source_type, mock_get_prometheus_metrics_data) -> None:
# This test will be expanded when adding more source types to cater for
# when monitor_prometheus is False
mock_get_prometheus_metrics_data.side_effect = \
eval(raised_err) if errored_source_type == "prometheus" else None
self.test_monitor._node_config._monitor_prometheus = monitor_prometheus
actual_ret = self.test_monitor._get_data()
expected_ret = eval(expected_return)
expected_ret[errored_source_type][
'data_retrieval_exception'] = eval(returned_err)
self.assertEqual(expected_ret, actual_ret)
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_data_raises_unrecognised_error_if_raised(
self, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.side_effect = self.test_exception
self.assertRaises(PANICException, self.test_monitor._get_data)
@parameterized.expand([
("self.test_monitor.last_prometheus_source_used",),
])
@freeze_time("2012-01-01")
def test_process_error_returns_expected_data(self,
last_source_used) -> None:
# We will add more parameters to this test as the source types increase
expected_output = {
'error': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used': eval(last_source_used),
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id': self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'message': self.test_exception.message,
'code': self.test_exception.code,
}
}
actual_output = self.test_monitor._process_error(self.test_exception,
eval(last_source_used))
self.assertEqual(actual_output, expected_output)
@parameterized.expand([
("self.processed_prometheus_data_example",
"self.retrieved_prometheus_data_example"),
("self.processed_prometheus_data_example_optionals_none",
"self.retrieved_prometheus_data_example_optionals_none"),
])
@freeze_time("2012-01-01")
def test_process_retrieved_prometheus_data_returns_expected_data(
self, expected_data_output, retrieved_data) -> None:
expected_output = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used':
self.test_monitor.last_prometheus_source_used,
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id': self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': eval(expected_data_output),
}
}
actual_output = self.test_monitor._process_retrieved_prometheus_data(
eval(retrieved_data))
self.assertEqual(expected_output, actual_output)
def test_process_retrieved_data_returns_the_correct_dict(self) -> None:
def test_fn(x: Dict): return x
actual_ret = self.test_monitor._process_retrieved_data(
test_fn, self.test_data_dict)
expected_ret = test_fn(self.test_data_dict)
self.assertEqual(expected_ret, actual_ret)
def test_send_data_sends_data_correctly(self) -> None:
# This test creates a queue which receives messages with the same
# routing key as the ones sent by send_data, and checks that the
# data is received
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.test_monitor._send_data(self.processed_prometheus_data_example)
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(self.test_queue_name)
self.assertEqual(self.processed_prometheus_data_example,
json.loads(body))
@freeze_time("2012-01-01")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_sends_data_and_hb_if_data_retrieve_and_processing_success(
self, mock_get_data) -> None:
# Here we are assuming that all sources are enabled.
expected_output_data = {
'prometheus': {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used':
self.test_monitor.last_prometheus_source_used,
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id':
self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_prometheus_data_example,
}
}
}
expected_output_hb = {
'component_name': self.test_monitor.monitor_name,
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp()
}
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 2 messages in the queue, the heartbeat and the
# processed data
self.assertEqual(2, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
# Check that the message received is actually the HB
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_hb, json.loads(body))
@parameterized.expand([
(False, ['prometheus'],)
])
@freeze_time("2012-01-01")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_sends_empty_dict_for_disabled_source(
self, monitor_prometheus, disabled_sources, mock_get_data) -> None:
# Once more sources are added this test will make more sense.
self.test_monitor.node_config._monitor_prometheus = monitor_prometheus
expected_output_data = {
'prometheus': {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used':
self.test_monitor.last_prometheus_source_used,
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id':
self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_prometheus_data_example,
}
}
}
for disabled_source in disabled_sources:
expected_output_data[disabled_source] = {}
expected_output_hb = {
'component_name': self.test_monitor.monitor_name,
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp()
}
# We can get all data since that won't effect how _monitor() works
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 2 messages in the queue, the heartbeat and the
# processed data
self.assertEqual(2, res.method.message_count)
# Check that the message received is actually the processed data
| |
buf.write("\2\u0f99\u029e\3\2\2\2\u0f9a\u0f9b\5\u03f1\u01f9\2\u0f9b")
buf.write("\u0f9c\5\u03d7\u01ec\2\u0f9c\u0f9d\5\u03ef\u01f8\2\u0f9d")
buf.write("\u0f9e\5\u03df\u01f0\2\u0f9e\u02a0\3\2\2\2\u0f9f\u0fa0")
buf.write("\5\u03f1\u01f9\2\u0fa0\u0fa1\5\u03df\u01f0\2\u0fa1\u0fa2")
buf.write("\5\u03fb\u01fe\2\u0fa2\u0fa3\5\u03fd\u01ff\2\u0fa3\u0fa4")
buf.write("\5\u03df\u01f0\2\u0fa4\u0fa5\5\u03dd\u01ef\2\u0fa5\u0fa6")
buf.write("\7a\2\2\u0fa6\u0fa7\5\u03fd\u01ff\2\u0fa7\u0fa8\5\u03f9")
buf.write("\u01fd\2\u0fa8\u0fa9\5\u03e7\u01f4\2\u0fa9\u0faa\5\u03e3")
buf.write("\u01f2\2\u0faa\u0fab\5\u03e3\u01f2\2\u0fab\u0fac\5\u03df")
buf.write("\u01f0\2\u0fac\u0fad\5\u03f9\u01fd\2\u0fad\u0fae\5\u03fb")
buf.write("\u01fe\2\u0fae\u02a2\3\2\2\2\u0faf\u0fb0\5\u03f1\u01f9")
buf.write("\2\u0fb0\u0fb1\5\u03df\u01f0\2\u0fb1\u0fb2\5\u0403\u0202")
buf.write("\2\u0fb2\u0fb3\7a\2\2\u0fb3\u0fb4\5\u03d9\u01ed\2\u0fb4")
buf.write("\u0fb5\5\u03f9\u01fd\2\u0fb5\u0fb6\5\u03f3\u01fa\2\u0fb6")
buf.write("\u0fb7\5\u03eb\u01f6\2\u0fb7\u0fb8\5\u03df\u01f0\2\u0fb8")
buf.write("\u0fb9\5\u03f9\u01fd\2\u0fb9\u02a4\3\2\2\2\u0fba\u0fbb")
buf.write("\5\u03f1\u01f9\2\u0fbb\u0fbc\5\u03f3\u01fa\2\u0fbc\u0fbd")
buf.write("\5\u03db\u01ee\2\u0fbd\u0fbe\5\u03f3\u01fa\2\u0fbe\u0fbf")
buf.write("\5\u03ff\u0200\2\u0fbf\u0fc0\5\u03f1\u01f9\2\u0fc0\u0fc1")
buf.write("\5\u03fd\u01ff\2\u0fc1\u02a6\3\2\2\2\u0fc2\u0fc3\5\u03f1")
buf.write("\u01f9\2\u0fc3\u0fc4\5\u03f3\u01fa\2\u0fc4\u0fc5\5\u03dd")
buf.write("\u01ef\2\u0fc5\u0fc6\5\u03df\u01f0\2\u0fc6\u0fc7\5\u03fb")
buf.write("\u01fe\2\u0fc7\u02a8\3\2\2\2\u0fc8\u0fc9\5\u03f1\u01f9")
buf.write("\2\u0fc9\u0fca\5\u03f3\u01fa\2\u0fca\u0fcb\5\u03df\u01f0")
buf.write("\2\u0fcb\u0fcc\5\u0405\u0203\2\u0fcc\u0fcd\5\u03f5\u01fb")
buf.write("\2\u0fcd\u0fce\5\u03d7\u01ec\2\u0fce\u0fcf\5\u03f1\u01f9")
buf.write("\2\u0fcf\u0fd0\5\u03dd\u01ef\2\u0fd0\u02aa\3\2\2\2\u0fd1")
buf.write("\u0fd2\5\u03f1\u01f9\2\u0fd2\u0fd3\5\u03f3\u01fa\2\u0fd3")
buf.write("\u0fd4\5\u03f1\u01f9\2\u0fd4\u0fd5\7a\2\2\u0fd5\u0fd6")
buf.write("\5\u03fd\u01ff\2\u0fd6\u0fd7\5\u03f9\u01fd\2\u0fd7\u0fd8")
buf.write("\5\u03d7\u01ec\2\u0fd8\u0fd9\5\u03f1\u01f9\2\u0fd9\u0fda")
buf.write("\5\u03fb\u01fe\2\u0fda\u0fdb\5\u03d7\u01ec\2\u0fdb\u0fdc")
buf.write("\5\u03db\u01ee\2\u0fdc\u0fdd\5\u03fd\u01ff\2\u0fdd\u0fde")
buf.write("\5\u03df\u01f0\2\u0fde\u0fdf\5\u03dd\u01ef\2\u0fdf\u0fe0")
buf.write("\7a\2\2\u0fe0\u0fe1\5\u03d7\u01ec\2\u0fe1\u0fe2\5\u03db")
buf.write("\u01ee\2\u0fe2\u0fe3\5\u03db\u01ee\2\u0fe3\u0fe4\5\u03df")
buf.write("\u01f0\2\u0fe4\u0fe5\5\u03fb\u01fe\2\u0fe5\u0fe6\5\u03fb")
buf.write("\u01fe\2\u0fe6\u02ac\3\2\2\2\u0fe7\u0fe8\5\u03f1\u01f9")
buf.write("\2\u0fe8\u0fe9\5\u03f3\u01fa\2\u0fe9\u0fea\5\u03f9\u01fd")
buf.write("\2\u0fea\u0feb\5\u03df\u01f0\2\u0feb\u0fec\5\u03db\u01ee")
buf.write("\2\u0fec\u0fed\5\u03f3\u01fa\2\u0fed\u0fee\5\u03ef\u01f8")
buf.write("\2\u0fee\u0fef\5\u03f5\u01fb\2\u0fef\u0ff0\5\u03ff\u0200")
buf.write("\2\u0ff0\u0ff1\5\u03fd\u01ff\2\u0ff1\u0ff2\5\u03df\u01f0")
buf.write("\2\u0ff2\u02ae\3\2\2\2\u0ff3\u0ff4\5\u03f1\u01f9\2\u0ff4")
buf.write("\u0ff5\5\u03f3\u01fa\2\u0ff5\u0ff6\7a\2\2\u0ff6\u0ff7")
buf.write("\5\u0403\u0202\2\u0ff7\u0ff8\5\u03d7\u01ec\2\u0ff8\u0ff9")
buf.write("\5\u03e7\u01f4\2\u0ff9\u0ffa\5\u03fd\u01ff\2\u0ffa\u02b0")
buf.write("\3\2\2\2\u0ffb\u0ffc\5\u03f1\u01f9\2\u0ffc\u0ffd\5\u03fd")
buf.write("\u01ff\2\u0ffd\u0ffe\5\u03e7\u01f4\2\u0ffe\u0fff\5\u03ed")
buf.write("\u01f7\2\u0fff\u1000\5\u03df\u01f0\2\u1000\u02b2\3\2\2")
buf.write("\2\u1001\u1002\5\u03f1\u01f9\2\u1002\u1003\5\u03ff\u0200")
buf.write("\2\u1003\u1004\5\u03ef\u01f8\2\u1004\u1005\5\u03d9\u01ed")
buf.write("\2\u1005\u1006\5\u03df\u01f0\2\u1006\u1007\5\u03f9\u01fd")
buf.write("\2\u1007\u02b4\3\2\2\2\u1008\u1009\5\u03f1\u01f9\2\u1009")
buf.write("\u100a\5\u03ff\u0200\2\u100a\u100b\5\u03ef\u01f8\2\u100b")
buf.write("\u100c\5\u03df\u01f0\2\u100c\u100d\5\u03f9\u01fd\2\u100d")
buf.write("\u100e\5\u03e7\u01f4\2\u100e\u100f\5\u03db\u01ee\2\u100f")
buf.write("\u1010\7a\2\2\u1010\u1011\5\u03f9\u01fd\2\u1011\u1012")
buf.write("\5\u03f3\u01fa\2\u1012\u1013\5\u03ff\u0200\2\u1013\u1014")
buf.write("\5\u03f1\u01f9\2\u1014\u1015\5\u03dd\u01ef\2\u1015\u1016")
buf.write("\5\u03d7\u01ec\2\u1016\u1017\5\u03d9\u01ed\2\u1017\u1018")
buf.write("\5\u03f3\u01fa\2\u1018\u1019\5\u03f9\u01fd\2\u1019\u101a")
buf.write("\5\u03fd\u01ff\2\u101a\u02b6\3\2\2\2\u101b\u101c\5\u03f3")
buf.write("\u01fa\2\u101c\u101d\5\u03e1\u01f1\2\u101d\u101e\5\u03e1")
buf.write("\u01f1\2\u101e\u101f\5\u03ed\u01f7\2\u101f\u1020\5\u03e7")
buf.write("\u01f4\2\u1020\u1021\5\u03f1\u01f9\2\u1021\u1022\5\u03df")
buf.write("\u01f0\2\u1022\u02b8\3\2\2\2\u1023\u1024\5\u03f3\u01fa")
buf.write("\2\u1024\u1025\5\u03e1\u01f1\2\u1025\u1026\5\u03e1\u01f1")
buf.write("\2\u1026\u1027\5\u03fb\u01fe\2\u1027\u1028\5\u03df\u01f0")
buf.write("\2\u1028\u1029\5\u03fd\u01ff\2\u1029\u02ba\3\2\2\2\u102a")
buf.write("\u102b\5\u03f3\u01fa\2\u102b\u102c\5\u03f1\u01f9\2\u102c")
buf.write("\u102d\5\u03ed\u01f7\2\u102d\u102e\5\u03e7\u01f4\2\u102e")
buf.write("\u102f\5\u03f1\u01f9\2\u102f\u1030\5\u03df\u01f0\2\u1030")
buf.write("\u02bc\3\2\2\2\u1031\u1032\5\u03f3\u01fa\2\u1032\u1033")
buf.write("\5\u03f1\u01f9\2\u1033\u1034\5\u03ed\u01f7\2\u1034\u1035")
buf.write("\5\u0407\u0204\2\u1035\u02be\3\2\2\2\u1036\u1037\5\u03f3")
buf.write("\u01fa\2\u1037\u1038\5\u03f5\u01fb\2\u1038\u1039\5\u03fd")
buf.write("\u01ff\2\u1039\u103a\5\u03e7\u01f4\2\u103a\u103b\5\u03ef")
buf.write("\u01f8\2\u103b\u103c\5\u03e7\u01f4\2\u103c\u103d\5\u03fb")
buf.write("\u01fe\2\u103d\u103e\5\u03fd\u01ff\2\u103e\u103f\5\u03e7")
buf.write("\u01f4\2\u103f\u1040\5\u03db\u01ee\2\u1040\u02c0\3\2\2")
buf.write("\2\u1041\u1042\5\u03f3\u01fa\2\u1042\u1043\5\u03f5\u01fb")
buf.write("\2\u1043\u1044\5\u03fd\u01ff\2\u1044\u1045\5\u03e7\u01f4")
buf.write("\2\u1045\u1046\5\u03ef\u01f8\2\u1046\u1047\5\u03e7\u01f4")
buf.write("\2\u1047\u1048\5\u0409\u0205\2\u1048\u1049\5\u03df\u01f0")
buf.write("\2\u1049\u02c2\3\2\2\2\u104a\u104b\5\u03f3\u01fa\2\u104b")
buf.write("\u104c\5\u03ff\u0200\2\u104c\u104d\5\u03fd\u01ff\2\u104d")
buf.write("\u02c4\3\2\2\2\u104e\u104f\5\u03f3\u01fa\2\u104f\u1050")
buf.write("\5\u03ff\u0200\2\u1050\u1051\5\u03fd\u01ff\2\u1051\u1052")
buf.write("\5\u03f5\u01fb\2\u1052\u1053\5\u03ff\u0200\2\u1053\u1054")
buf.write("\5\u03fd\u01ff\2\u1054\u02c6\3\2\2\2\u1055\u1056\5\u03f3")
buf.write("\u01fa\2\u1056\u1057\5\u0403\u0202\2\u1057\u1058\5\u03f1")
buf.write("\u01f9\2\u1058\u1059\5\u03df\u01f0\2\u1059\u105a\5\u03f9")
buf.write("\u01fd\2\u105a\u02c8\3\2\2\2\u105b\u105c\5\u03f5\u01fb")
buf.write("\2\u105c\u105d\5\u03d7\u01ec\2\u105d\u105e\5\u03e3\u01f2")
buf.write("\2\u105e\u105f\5\u03df\u01f0\2\u105f\u1060\7a\2\2\u1060")
buf.write("\u1061\5\u0401\u0201\2\u1061\u1062\5\u03df\u01f0\2\u1062")
buf.write("\u1063\5\u03f9\u01fd\2\u1063\u1064\5\u03e7\u01f4\2\u1064")
buf.write("\u1065\5\u03e1\u01f1\2\u1065\u1066\5\u0407\u0204\2\u1066")
buf.write("\u02ca\3\2\2\2\u1067\u1068\5\u03f5\u01fb\2\u1068\u1069")
buf.write("\5\u03d7\u01ec\2\u1069\u106a\5\u03f9\u01fd\2\u106a\u106b")
buf.write("\5\u03d7\u01ec\2\u106b\u106c\5\u03ef\u01f8\2\u106c\u106d")
buf.write("\5\u03df\u01f0\2\u106d\u106e\5\u03fd\u01ff\2\u106e\u106f")
buf.write("\5\u03df\u01f0\2\u106f\u1070\5\u03f9\u01fd\2\u1070\u1071")
buf.write("\5\u03e7\u01f4\2\u1071\u1072\5\u0409\u0205\2\u1072\u1073")
buf.write("\5\u03d7\u01ec\2\u1073\u1074\5\u03fd\u01ff\2\u1074\u1075")
buf.write("\5\u03e7\u01f4\2\u1075\u1076\5\u03f3\u01fa\2\u1076\u1077")
buf.write("\5\u03f1\u01f9\2\u1077\u02cc\3\2\2\2\u1078\u1079\5\u03f5")
buf.write("\u01fb\2\u1079\u107a\5\u03d7\u01ec\2\u107a\u107b\5\u03f9")
buf.write("\u01fd\2\u107b\u107c\5\u03fb\u01fe\2\u107c\u107d\5\u03df")
buf.write("\u01f0\2\u107d\u02ce\3\2\2\2\u107e\u107f\5\u03f5\u01fb")
buf.write("\2\u107f\u1080\5\u03d7\u01ec\2\u1080\u1081\5\u03f9\u01fd")
buf.write("\2\u1081\u1082\5\u03fd\u01ff\2\u1082\u1083\5\u03e7\u01f4")
buf.write("\2\u1083\u1084\5\u03fd\u01ff\2\u1084\u1085\5\u03e7\u01f4")
buf.write("\2\u1085\u1086\5\u03f3\u01fa\2\u1086\u1087\5\u03f1\u01f9")
buf.write("\2\u1087\u02d0\3\2\2\2\u1088\u1089\5\u03f5\u01fb\2\u1089")
buf.write("\u108a\5\u03d7\u01ec\2\u108a\u108b\5\u03fd\u01ff\2\u108b")
buf.write("\u108c\5\u03e5\u01f3\2\u108c\u02d2\3\2\2\2\u108d\u108e")
buf.write("\5\u03f5\u01fb\2\u108e\u108f\5\u03df\u01f0\2\u108f\u1090")
buf.write("\5\u03f9\u01fd\2\u1090\u1091\5\u03db\u01ee\2\u1091\u1092")
buf.write("\5\u03df\u01f0\2\u1092\u1093\5\u03f1\u01f9\2\u1093\u1094")
buf.write("\5\u03fd\u01ff\2\u1094\u1095\5\u03e7\u01f4\2\u1095\u1096")
buf.write("\5\u03ed\u01f7\2\u1096\u1097\5\u03df\u01f0\2\u1097\u1098")
buf.write("\7a\2\2\u1098\u1099\5\u03db\u01ee\2\u1099\u109a\5\u03f3")
buf.write("\u01fa\2\u109a\u109b\5\u03f1\u01f9\2\u109b\u109c\5\u03fd")
buf.write("\u01ff\2\u109c\u02d4\3\2\2\2\u109d\u109e\5\u03f5\u01fb")
buf.write("\2\u109e\u109f\5\u03f3\u01fa\2\u109f\u10a0\5\u0403\u0202")
buf.write("\2\u10a0\u10a1\5\u03df\u01f0\2\u10a1\u10a2\5\u03f9\u01fd")
buf.write("\2\u10a2\u02d6\3\2\2\2\u10a3\u10a4\5\u03f5\u01fb\2\u10a4")
buf.write("\u10a5\5\u03f9\u01fd\2\u10a5\u10a6\5\u03df\u01f0\2\u10a6")
buf.write("\u10a7\5\u03db\u01ee\2\u10a7\u10a8\5\u03df\u01f0\2\u10a8")
buf.write("\u10a9\5\u03dd\u01ef\2\u10a9\u10aa\5\u03e7\u01f4\2\u10aa")
buf.write("\u10ab\5\u03f1\u01f9\2\u10ab\u10ac\5\u03e3\u01f2\2\u10ac")
buf.write("\u02d8\3\2\2\2\u10ad\u10ae\5\u03f5\u01fb\2\u10ae\u10af")
buf.write("\5\u03f9\u01fd\2\u10af\u10b0\5\u03e7\u01f4\2\u10b0\u10b1")
buf.write("\5\u03f3\u01fa\2\u10b1\u10b2\5\u03f9\u01fd\2\u10b2\u02da")
buf.write("\3\2\2\2\u10b3\u10b4\5\u03f5\u01fb\2\u10b4\u10b5\5\u03f9")
buf.write("\u01fd\2\u10b5\u10b6\5\u03e7\u01f4\2\u10b6\u10b7\5\u0401")
buf.write("\u0201\2\u10b7\u10b8\5\u03e7\u01f4\2\u10b8\u10b9\5\u03ed")
buf.write("\u01f7\2\u10b9\u10ba\5\u03df\u01f0\2\u10ba\u10bb\5\u03e3")
buf.write("\u01f2\2\u10bb\u10bc\5\u03df\u01f0\2\u10bc\u10bd\5\u03fb")
buf.write("\u01fe\2\u10bd\u02dc\3\2\2\2\u10be\u10bf\5\u03f7\u01fc")
buf.write("\2\u10bf\u10c0\5\u03ff\u0200\2\u10c0\u10c1\5\u03df\u01f0")
buf.write("\2\u10c1\u10c2\5\u03f9\u01fd\2\u10c2\u10c3\5\u0407\u0204")
buf.write("\2\u10c3\u02de\3\2\2\2\u10c4\u10c5\5\u03f7\u01fc\2\u10c5")
buf.write("\u10c6\5\u03ff\u0200\2\u10c6\u10c7\5\u03f3\u01fa\2\u10c7")
buf.write("\u10c8\5\u03fd\u01ff\2\u10c8\u10c9\5\u03df\u01f0\2\u10c9")
buf.write("\u10ca\5\u03dd\u01ef\2\u10ca\u10cb\7a\2\2\u10cb\u10cc")
buf.write("\5\u03e7\u01f4\2\u10cc\u10cd\5\u03dd\u01ef\2\u10cd\u10ce")
buf.write("\5\u03df\u01f0\2\u10ce\u10cf\5\u03f1\u01f9\2\u10cf\u10d0")
buf.write("\5\u03fd\u01ff\2\u10d0\u10d1\5\u03e7\u01f4\2\u10d1\u10d2")
buf.write("\5\u03e1\u01f1\2\u10d2\u10d3\5\u03e7\u01f4\2\u10d3\u10d4")
buf.write("\5\u03df\u01f0\2\u10d4\u10d5\5\u03f9\u01fd\2\u10d5\u02e0")
buf.write("\3\2\2\2\u10d6\u10d7\5\u03f9\u01fd\2\u10d7\u10d8\5\u03d7")
buf.write("\u01ec\2\u10d8\u10d9\5\u03f1\u01f9\2\u10d9\u10da\5\u03e3")
buf.write("\u01f2\2\u10da\u10db\5\u03df\u01f0\2\u10db\u02e2\3\2\2")
buf.write("\2\u10dc\u10dd\5\u03f9\u01fd\2\u10dd\u10de\5\u03d7\u01ec")
buf.write("\2\u10de\u10df\5\u03f1\u01f9\2\u10df\u10e0\5\u03eb\u01f6")
buf.write("\2\u10e0\u02e4\3\2\2\2\u10e1\u10e2\5\u03f9\u01fd\2\u10e2")
buf.write("\u10e3\5\u03df\u01f0\2\u10e3\u10e4\5\u03d7\u01ec\2\u10e4")
buf.write("\u10e5\5\u03dd\u01ef\2\u10e5\u10e6\5\u03f3\u01fa\2\u10e6")
buf.write("\u10e7\5\u03f1\u01f9\2\u10e7\u10e8\5\u03ed\u01f7\2\u10e8")
buf.write("\u10e9\5\u0407\u0204\2\u10e9\u02e6\3\2\2\2\u10ea\u10eb")
buf.write("\5\u03f9\u01fd\2\u10eb\u10ec\5\u03df\u01f0\2\u10ec\u10ed")
buf.write("\5\u03d7\u01ec\2\u10ed\u10ee\5\u03dd\u01ef\2\u10ee\u10ef")
buf.write("\7a\2\2\u10ef\u10f0\5\u03db\u01ee\2\u10f0\u10f1\5\u03f3")
buf.write("\u01fa\2\u10f1\u10f2\5\u03ef\u01f8\2\u10f2\u10f3\5\u03ef")
buf.write("\u01f8\2\u10f3\u10f4\5\u03e7\u01f4\2\u10f4\u10f5\5\u03fd")
buf.write("\u01ff\2\u10f5\u10f6\5\u03fd\u01ff\2\u10f6\u10f7\5\u03df")
buf.write("\u01f0\2\u10f7\u10f8\5\u03dd\u01ef\2\u10f8\u10f9\7a\2")
buf.write("\2\u10f9\u10fa\5\u03fb\u01fe\2\u10fa\u10fb\5\u03f1\u01f9")
buf.write("\2\u10fb\u10fc\5\u03d7\u01ec\2\u10fc\u10fd\5\u03f5\u01fb")
buf.write("\2\u10fd\u10fe\5\u03fb\u01fe\2\u10fe\u10ff\5\u03e5\u01f3")
buf.write("\2\u10ff\u1100\5\u03f3\u01fa\2\u1100\u1101\5\u03fd\u01ff")
buf.write("\2\u1101\u02e8\3\2\2\2\u1102\u1103\5\u03f9\u01fd\2\u1103")
buf.write("\u1104\5\u03df\u01f0\2\u1104\u1105\5\u03d7\u01ec\2\u1105")
buf.write("\u1106\5\u03dd\u01ef\2\u1106\u1107\7a\2\2\u1107\u1108")
buf.write("\5\u03f3\u01fa\2\u1108\u1109\5\u03f1\u01f9\2\u1109\u110a")
buf.write("\5\u03ed\u01f7\2\u110a\u110b\5\u0407\u0204\2\u110b\u02ea")
buf.write("\3\2\2\2\u110c\u110d\5\u03f9\u01fd\2\u110d\u110e\5\u03df")
buf.write("\u01f0\2\u110e\u110f\5\u03d7\u01ec\2\u110f\u1110\5\u03dd")
buf.write("\u01ef\2\u1110\u1111\7a\2\2\u1111\u1112\5\u0403\u0202")
buf.write("\2\u1112\u1113\5\u03f9\u01fd\2\u1113\u1114\5\u03e7\u01f4")
buf.write("\2\u1114\u1115\5\u03fd\u01ff\2\u1115\u1116\5\u03df\u01f0")
buf.write("\2\u1116\u02ec\3\2\2\2\u1117\u1118\5\u03f9\u01fd\2\u1118")
buf.write("\u1119\5\u03df\u01f0\2\u1119\u111a\5\u03db\u01ee\2\u111a")
buf.write("\u111b\5\u03f3\u01fa\2\u111b\u111c\5\u03ef\u01f8\2\u111c")
buf.write("\u111d\5\u03f5\u01fb\2\u111d\u111e\5\u03e7\u01f4\2\u111e")
buf.write("\u111f\5\u03ed\u01f7\2\u111f\u1120\5\u03df\u01f0\2\u1120")
buf.write("\u02ee\3\2\2\2\u1121\u1122\5\u03f9\u01fd\2\u1122\u1123")
buf.write("\5\u03df\u01f0\2\u1123\u1124\5\u03db\u01ee\2\u1124\u1125")
buf.write("\5\u03f3\u01fa\2\u1125\u1126\5\u0401\u0201\2\u1126\u1127")
buf.write("\5\u03df\u01f0\2\u1127\u1128\5\u03f9\u01fd\2\u1128\u1129")
buf.write("\5\u0407\u0204\2\u1129\u02f0\3\2\2\2\u112a\u112b\5\u03f9")
buf.write("\u01fd\2\u112b\u112c\5\u03df\u01f0\2\u112c\u112d\5\u03db")
buf.write("\u01ee\2\u112d\u112e\5\u03ff\u0200\2\u112e\u112f\5\u03f9")
buf.write("\u01fd\2\u112f\u1130\5\u03fb\u01fe\2\u1130\u1131\5\u03e7")
buf.write("\u01f4\2\u1131\u1132\5\u0401\u0201\2\u1132\u1133\5\u03df")
buf.write("\u01f0\2\u1133\u1134\7a\2\2\u1134\u1135\5\u03fd\u01ff")
buf.write("\2\u1135\u1136\5\u03f9\u01fd\2\u1136\u1137\5\u03e7\u01f4")
buf.write("\2\u1137\u1138\5\u03e3\u01f2\2\u1138\u1139\5\u03e3\u01f2")
buf.write("\2\u1139\u113a\5\u03df\u01f0\2\u113a\u113b\5\u03f9\u01fd")
buf.write("\2\u113b\u113c\5\u03fb\u01fe\2\u113c\u02f2\3\2\2\2\u113d")
buf.write("\u113e\5\u03f9\u01fd\2\u113e\u113f\5\u03df\u01f0\2\u113f")
buf.write("\u1140\5\u03ed\u01f7\2\u1140\u1141\5\u03d7\u01ec\2\u1141")
buf.write("\u1142\5\u03fd\u01ff\2\u1142\u1143\5\u03e7\u01f4\2\u1143")
buf.write("\u1144\5\u0401\u0201\2\u1144\u1145\5\u03df\u01f0\2\u1145")
buf.write("\u02f4\3\2\2\2\u1146\u1147\5\u03f9\u01fd\2\u1147\u1148")
buf.write("\5\u03df\u01f0\2\u1148\u1149\5\u03ef\u01f8\2\u1149\u114a")
buf.write("\5\u03f3\u01fa\2\u114a\u114b\5\u03fd\u01ff\2\u114b\u114c")
buf.write("\5\u03df\u01f0\2\u114c\u02f6\3\2\2\2\u114d\u114e\5\u03f9")
buf.write("\u01fd\2\u114e\u114f\5\u03df\u01f0\2\u114f\u1150\5\u03f5")
buf.write("\u01fb\2\u1150\u1151\5\u03df\u01f0\2\u1151\u1152\5\u03d7")
buf.write("\u01ec\2\u1152\u1153\5\u03fd\u01ff\2\u1153\u1154\5\u03d7")
buf.write("\u01ec\2\u1154\u1155\5\u03d9\u01ed\2\u1155\u1156\5\u03ed")
buf.write("\u01f7\2\u1156\u1157\5\u03df\u01f0\2\u1157\u02f8\3\2\2")
buf.write("\2\u1158\u1159\5\u03f9\u01fd\2\u1159\u115a\5\u03df\u01f0")
buf.write("\2\u115a\u115b\5\u03fb\u01fe\2\u115b\u115c\5\u03fd\u01ff")
buf.write("\2\u115c\u115d\5\u03f9\u01fd\2\u115d\u115e\5\u03e7\u01f4")
buf.write("\2\u115e\u115f\5\u03db\u01ee\2\u115f\u1160\5\u03fd\u01ff")
buf.write("\2\u1160\u1161\5\u03df\u01f0\2\u1161\u1162\5\u03dd\u01ef")
buf.write("\2\u1162\u1163\7a\2\2\u1163\u1164\5\u03ff\u0200\2\u1164")
buf.write("\u1165\5\u03fb\u01fe\2\u1165\u1166\5\u03df\u01f0\2\u1166")
buf.write("\u1167\5\u03f9\u01fd\2\u1167\u02fa\3\2\2\2\u1168\u1169")
buf.write("\5\u03f9\u01fd\2\u1169\u116a\5\u03f3\u01fa\2\u116a\u116b")
buf.write("\5\u03d9\u01ed\2\u116b\u116c\5\u03ff\u0200\2\u116c\u116d")
buf.write("\5\u03fb\u01fe\2\u116d\u116e\5\u03fd\u01ff\2\u116e\u02fc")
buf.write("\3\2\2\2\u116f\u1170\5\u03f9\u01fd\2\u1170\u1171\5\u03f3")
buf.write("\u01fa\2\u1171\u1172\5\u03ed\u01f7\2\u1172\u1173\5\u03ed")
buf.write("\u01f7\2\u1173\u1174\5\u03ff\u0200\2\u1174\u1175\5\u03f5")
buf.write("\u01fb\2\u1175\u02fe\3\2\2\2\u1176\u1177\5\u03f9\u01fd")
buf.write("\2\u1177\u1178\5\u03f3\u01fa\2\u1178\u1179\5\u03f3\u01fa")
buf.write("\2\u1179\u117a\5\u03fd\u01ff\2\u117a\u0300\3\2\2\2\u117b")
buf.write("\u117c\5\u03f9\u01fd\2\u117c\u117d\5\u03f3\u01fa\2\u117d")
buf.write("\u117e\5\u03ff\u0200\2\u117e\u117f\5\u03f1\u01f9\2\u117f")
buf.write("\u1180\5\u03dd\u01ef\2\u1180\u0302\3\2\2\2\u1181\u1182")
buf.write("\5\u03f9\u01fd\2\u1182\u1183\5\u03f3\u01fa\2\u1183\u1184")
buf.write("\5\u0403\u0202\2\u1184\u0304\3\2\2\2\u1185\u1186\5\u03f9")
buf.write("\u01fd\2\u1186\u1187\5\u03f3\u01fa\2\u1187\u1188\5\u0403")
buf.write("\u0202\2\u1188\u1189\5\u03e3\u01f2\2\u1189\u118a\5\u03ff")
buf.write("\u0200\2\u118a\u118b\5\u03e7\u01f4\2\u118b\u118c\5\u03dd")
buf.write("\u01ef\2\u118c\u0306\3\2\2\2\u118d\u118e\5\u03f9\u01fd")
buf.write("\2\u118e\u118f\5\u03f3\u01fa\2\u118f\u1190\5\u0403\u0202")
buf.write("\2\u1190\u1191\5\u03fb\u01fe\2\u1191\u0308\3\2\2\2\u1192")
buf.write("\u1193\5\u03f9\u01fd\2\u1193\u1194\5\u03f3\u01fa\2\u1194")
buf.write("\u1195\5\u0403\u0202\2\u1195\u1196\7a\2\2\u1196\u1197")
buf.write("\5\u03f1\u01f9\2\u1197\u1198\5\u03ff\u0200\2\u1198\u1199")
buf.write("\5\u03ef\u01f8\2\u1199\u119a\5\u03d9\u01ed\2\u119a\u119b")
buf.write("\5\u03df\u01f0\2\u119b\u119c\5\u03f9\u01fd\2\u119c\u030a")
buf.write("\3\2\2\2\u119d\u119e\5\u03fb\u01fe\2\u119e\u119f\5\u03d7")
buf.write("\u01ec\2\u119f\u11a0\5\u03ef\u01f8\2\u11a0\u11a1\5\u03f5")
buf.write("\u01fb\2\u11a1\u11a2\5\u03ed\u01f7\2\u11a2\u11a3\5\u03df")
buf.write("\u01f0\2\u11a3\u030c\3\2\2\2\u11a4\u11a5\5\u03fb\u01fe")
buf.write("\2\u11a5\u11a6\5\u03db\u01ee\2\u11a6\u11a7\5\u03e5\u01f3")
buf.write("\2\u11a7\u11a8\5\u03df\u01f0\2\u11a8\u11a9\5\u03ef\u01f8")
buf.write("\2\u11a9\u11aa\5\u03d7\u01ec\2\u11aa\u11ab\5\u03d9\u01ed")
buf.write("\2\u11ab\u11ac\5\u03e7\u01f4\2\u11ac\u11ad\5\u03f1\u01f9")
buf.write("\2\u11ad\u11ae\5\u03dd\u01ef\2\u11ae\u11af\5\u03e7\u01f4")
buf.write("\2\u11af\u11b0\5\u03f1\u01f9\2\u11b0\u11b1\5\u03e3\u01f2")
buf.write("\2\u11b1\u030e\3\2\2\2\u11b2\u11b3\5\u03fb\u01fe\2\u11b3")
buf.write("\u11b4\5\u03db\u01ee\2\u11b4\u11b5\5\u03f9\u01fd\2\u11b5")
buf.write("\u11b6\5\u03f3\u01fa\2\u11b6\u11b7\5\u03ed\u01f7\2\u11b7")
buf.write("\u11b8\5\u03ed\u01f7\2\u11b8\u0310\3\2\2\2\u11b9\u11ba")
buf.write("\5\u03fb\u01fe\2\u11ba\u11bb\5\u03db\u01ee\2\u11bb\u11bc")
buf.write("\5\u03f9\u01fd\2\u11bc\u11bd\5\u03f3\u01fa\2\u11bd\u11be")
buf.write("\5\u03ed\u01f7\2\u11be\u11bf\5\u03ed\u01f7\2\u11bf\u11c0")
buf.write("\7a\2\2\u11c0\u11c1\5\u03ed\u01f7\2\u11c1\u11c2\5\u03f3")
buf.write("\u01fa\2\u11c2\u11c3\5\u03db\u01ee\2\u11c3\u11c4\5\u03eb")
buf.write("\u01f6\2\u11c4\u11c5\5\u03fb\u01fe\2\u11c5\u0312\3\2\2")
buf.write("\2\u11c6\u11c7\5\u03fb\u01fe\2\u11c7\u11c8\5\u03df\u01f0")
buf.write("\2\u11c8\u11c9\5\u03db\u01ee\2\u11c9\u11ca\5\u03f3\u01fa")
buf.write("\2\u11ca\u11cb\5\u03f1\u01f9\2\u11cb\u11cc\5\u03dd\u01ef")
buf.write("\2\u11cc\u11cd\5\u03fb\u01fe\2\u11cd\u0314\3\2\2\2\u11ce")
buf.write("\u11cf\5\u03fb\u01fe\2\u11cf\u11d0\5\u03df\u01f0\2\u11d0")
buf.write("\u11d1\5\u03ed\u01f7\2\u11d1\u11d2\5\u03e1\u01f1\2\u11d2")
buf.write("\u0316\3\2\2\2\u11d3\u11d4\5\u03fb\u01fe\2\u11d4\u11d5")
buf.write("\5\u03df\u01f0\2\u11d5\u11d6\5\u03f9\u01fd\2\u11d6\u11d7")
buf.write("\5\u03e7\u01f4\2\u11d7\u11d8\5\u03d7\u01ec\2\u11d8\u11d9")
buf.write("\5\u03ed\u01f7\2\u11d9\u11da\5\u03e7\u01f4\2\u11da\u11db")
buf.write("\5\u0409\u0205\2\u11db\u11dc\5\u03d7\u01ec\2\u11dc\u11dd")
buf.write("\5\u03d9\u01ed\2\u11dd\u11de\5\u03ed\u01f7\2\u11de\u11df")
buf.write("\5\u03df\u01f0\2\u11df\u0318\3\2\2\2\u11e0\u11e1\5\u03fb")
buf.write("\u01fe\2\u11e1\u11e2\5\u03df\u01f0\2\u11e2\u11e3\5\u03fd")
buf.write("\u01ff\2\u11e3\u11e4\5\u03fb\u01fe\2\u11e4\u031a\3\2\2")
buf.write("\2\u11e5\u11e6\5\u03fb\u01fe\2\u11e6\u11e7\5\u03e5\u01f3")
buf.write("\2\u11e7\u11e8\5\u03f3\u01fa\2\u11e8\u11e9\5\u0403\u0202")
buf.write("\2\u11e9\u11ea\5\u03f5\u01fb\2\u11ea\u11eb\5\u03ed\u01f7")
buf.write("\2\u11eb\u11ec\5\u03d7\u01ec\2\u11ec\u11ed\5\u03f1\u01f9")
buf.write("\2\u11ed\u031c\3\2\2\2\u11ee\u11ef\5\u03fb\u01fe\2\u11ef")
buf.write("\u11f0\5\u03e7\u01f4\2\u11f0\u11f1\5\u03ef\u01f8\2\u11f1")
buf.write("\u11f2\5\u03f5\u01fb\2\u11f2\u11f3\5\u03ed\u01f7\2\u11f3")
buf.write("\u11f4\5\u03df\u01f0\2\u11f4\u031e\3\2\2\2\u11f5\u11f6")
buf.write("\5\u03fb\u01fe\2\u11f6\u11f7\5\u03e7\u01f4\2\u11f7\u11f8")
buf.write("\5\u03f1\u01f9\2\u11f8\u0320\3\2\2\2\u11f9\u11fa\5\u03fb")
buf.write("\u01fe\2\u11fa\u11fb\5\u03e7\u01f4\2\u11fb\u11fc\5\u03f1")
buf.write("\u01f9\2\u11fc\u11fd\5\u03e3\u01f2\2\u11fd\u11fe\5\u03ed")
buf.write("\u01f7\2\u11fe\u11ff\5\u03df\u01f0\2\u11ff\u1200\7a\2")
buf.write("\2\u1200\u1201\5\u03ff\u0200\2\u1201\u1202\5\u03fb\u01fe")
buf.write("\2\u1202\u1203\5\u03df\u01f0\2\u1203\u1204\5\u03f9\u01fd")
buf.write("\2\u1204\u0322\3\2\2\2\u1205\u1206\5\u03fb\u01fe\2\u1206")
buf.write("\u1207\5\u03e7\u01f4\2\u1207\u1208\5\u0409\u0205\2\u1208")
buf.write("\u1209\5\u03df\u01f0\2\u1209\u0324\3\2\2\2\u120a\u120b")
buf.write("\5\u03fb\u01fe\2\u120b\u120c\5\u03ef\u01f8\2\u120c\u120d")
buf.write("\5\u03d7\u01ec\2\u120d\u120e\5\u03ed\u01f7\2\u120e\u120f")
buf.write("\5\u03ed\u01f7\2\u120f\u1210\5\u03e7\u01f4\2\u1210\u1211")
buf.write("\5\u03f1\u01f9\2\u1211\u1212\5\u03fd\u01ff\2\u1212\u0326")
buf.write("\3\2\2\2\u1213\u1214\5\u03fb\u01fe\2\u1214\u1215\5\u03f1")
buf.write("\u01f9\2\u1215\u1216\5\u03d7\u01ec\2\u1216\u1217\5\u03f5")
buf.write("\u01fb\2\u1217\u1218\5\u03fb\u01fe\2\u1218\u1219\5\u03e5")
buf.write("\u01f3\2\u1219\u121a\5\u03f3\u01fa\2\u121a\u121b\5\u03fd")
buf.write("\u01ff\2\u121b\u0328\3\2\2\2\u121c\u121d\5\u03fb\u01fe")
buf.write("\2\u121d\u121e\5\u03f5\u01fb\2\u121e\u121f\5\u03d7\u01ec")
buf.write("\2\u121f\u1220\5\u03fd\u01ff\2\u1220\u1221\5\u03e7\u01f4")
buf.write("\2\u1221\u1222\5\u03d7\u01ec\2\u1222\u1223\5\u03ed\u01f7")
buf.write("\2\u1223\u1224\7a\2\2\u1224\u1225\5\u0403\u0202\2\u1225")
buf.write("\u1226\5\u03e7\u01f4\2\u1226\u1227\5\u03f1\u01f9\2\u1227")
buf.write("\u1228\5\u03dd\u01ef\2\u1228\u1229\5\u03f3\u01fa\2\u1229")
buf.write("\u122a\5\u0403\u0202\2\u122a\u122b\7a\2\2\u122b\u122c")
buf.write("\5\u03ef\u01f8\2\u122c\u122d\5\u03d7\u01ec\2\u122d\u122e")
buf.write("\5\u0405\u0203\2\u122e\u122f\7a\2\2\u122f\u1230\5\u03db")
buf.write("\u01ee\2\u1230\u1231\5\u03df\u01f0\2\u1231\u1232\5\u03ed")
buf.write("\u01f7\2\u1232\u1233\5\u03ed\u01f7\2\u1233\u1234\5\u03fb")
buf.write("\u01fe\2\u1234\u032a\3\2\2\2\u1235\u1236\5\u03fb\u01fe")
buf.write("\2\u1236\u1237\5\u03fd\u01ff\2\u1237\u1238\5\u03d7\u01ec")
buf.write("\2\u1238\u1239\5\u03fd\u01ff\2\u1239\u123a\5\u03e7\u01f4")
buf.write("\2\u123a\u123b\5\u03db\u01ee\2\u123b\u032c\3\2\2\2\u123c")
buf.write("\u123d\5\u03fb\u01fe\2\u123d\u123e\5\u03fd\u01ff\2\u123e")
buf.write("\u123f\5\u03d7\u01ec\2\u123f\u1240\5\u03fd\u01ff\2\u1240")
buf.write("\u1241\5\u03fb\u01fe\2\u1241\u1242\7a\2\2\u1242\u1243")
buf.write("\5\u03fb\u01fe\2\u1243\u1244\5\u03fd\u01ff\2\u1244\u1245")
buf.write("\5\u03f9\u01fd\2\u1245\u1246\5\u03df\u01f0\2\u1246\u1247")
buf.write("\5\u03d7\u01ec\2\u1247\u1248\5\u03ef\u01f8\2\u1248\u032e")
buf.write("\3\2\2\2\u1249\u124a\5\u03fb\u01fe\2\u124a\u124b\5\u03fd")
buf.write("\u01ff\2\u124b\u124c\5\u03dd\u01ef\2\u124c\u124d\5\u03df")
buf.write("\u01f0\2\u124d\u124e\5\u0401\u0201\2\u124e\u0330\3\2\2")
buf.write("\2\u124f\u1250\5\u03fb\u01fe\2\u1250\u1251\5\u03fd\u01ff")
buf.write("\2\u1251\u1252\5\u03dd\u01ef\2\u1252\u1253\5\u03df\u01f0")
buf.write("\2\u1253\u1254\5\u0401\u0201\2\u1254\u1255\5\u03f5\u01fb")
buf.write("\2\u1255\u0332\3\2\2\2\u1256\u1257\5\u03fb\u01fe\2\u1257")
buf.write("\u1258\5\u03fd\u01ff\2\u1258\u1259\5\u03f9\u01fd\2\u1259")
buf.write("\u125a\5\u03e7\u01f4\2\u125a\u125b\5\u03f1\u01f9\2\u125b")
buf.write("\u125c\5\u03e3\u01f2\2\u125c\u125d\7a\2\2\u125d\u125e")
buf.write("\5\u03d7\u01ec\2\u125e\u125f\5\u03e3\u01f2\2\u125f\u1260")
buf.write("\5\u03e3\u01f2\2\u1260\u0334\3\2\2\2\u1261\u1262\5\u03fb")
buf.write("\u01fe\2\u1262\u1263\5\u03fd\u01ff\2\u1263\u1264\5\u03f9")
buf.write("\u01fd\2\u1264\u1265\5\u03e7\u01f4\2\u1265\u1266\5\u03f1")
buf.write("\u01f9\2\u1266\u1267\5\u03e3\u01f2\2\u1267\u1268\7a\2")
buf.write("\2\u1268\u1269\5\u03fb\u01fe\2\u1269\u126a\5\u03f5\u01fb")
buf.write("\2\u126a\u126b\5\u03ed\u01f7\2\u126b\u126c\5\u03e7\u01f4")
buf.write("\2\u126c\u126d\5\u03fd\u01ff\2\u126d\u0336\3\2\2\2\u126e")
buf.write("\u126f\5\u03fb\u01fe\2\u126f\u1270\5\u03ff\u0200\2\u1270")
buf.write("\u1271\5\u03ef\u01f8\2\u1271\u0338\3\2\2\2\u1272\u1273")
buf.write("\5\u03fb\u01fe\2\u1273\u1274\5\u03f7\u01fc\2\u1274\u1275")
buf.write("\5\u03f9\u01fd\2\u1275\u1276\5\u03fd\u01ff\2\u1276\u033a")
buf.write("\3\2\2\2\u1277\u1278\5\u03fb\u01fe\2\u1278\u1279\5\u03f7")
buf.write("\u01fc\2\u1279\u127a\5\u03ff\u0200\2\u127a\u127b\5\u03d7")
buf.write("\u01ec\2\u127b\u127c\5\u03f9\u01fd\2\u127c\u127d\5\u03df")
buf.write("\u01f0\2\u127d\u033c\3\2\2\2\u127e\u127f\5\u03fd\u01ff")
buf.write("\2\u127f\u1280\5\u03d7\u01ec\2\u1280\u1281\5\u03eb\u01f6")
buf.write("\2\u1281\u1282\5\u03df\u01f0\2\u1282\u033e\3\2\2\2\u1283")
buf.write("\u1284\5\u03fd\u01ff\2\u1284\u1285\5\u03d7\u01ec\2\u1285")
buf.write("\u1286\5\u03f1\u01f9\2\u1286\u0340\3\2\2\2\u1287\u1288")
buf.write("\5\u03fd\u01ff\2\u1288\u1289\5\u03d7\u01ec\2\u1289\u128a")
buf.write("\5\u03f9\u01fd\2\u128a\u128b\5\u03e3\u01f2\2\u128b\u128c")
buf.write("\5\u03df\u01f0\2\u128c\u128d\5\u03fd\u01ff\2\u128d\u128e")
buf.write("\7a\2\2\u128e\u128f\5\u03f9\u01fd\2\u128f\u1290\5\u03df")
buf.write("\u01f0\2\u1290\u1291\5\u03db\u01ee\2\u1291\u1292\5\u03f3")
buf.write("\u01fa\2\u1292\u1293\5\u0401\u0201\2\u1293\u1294\5\u03df")
buf.write("\u01f0\2\u1294\u1295\5\u03f9\u01fd\2\u1295\u1296\5\u0407")
buf.write("\u0204\2\u1296\u1297\7a\2\2\u1297\u1298\5\u03fd\u01ff")
buf.write("\2\u1298\u1299\5\u03e7\u01f4\2\u1299\u129a\5\u03ef\u01f8")
buf.write("\2\u129a\u129b\5\u03df\u01f0\2\u129b\u0342\3\2\2\2\u129c")
buf.write("\u129d\5\u03fd\u01ff\2\u129d\u129e\5\u03d9\u01ed\2\u129e")
buf.write("\u0344\3\2\2\2\u129f\u12a0\5\u03fd\u01ff\2\u12a0\u12a1")
buf.write("\5\u03df\u01f0\2\u12a1\u12a2\5\u0405\u0203\2\u12a2\u12a3")
buf.write("\5\u03fd\u01ff\2\u12a3\u12a4\5\u03e7\u01f4\2\u12a4\u12a5")
buf.write("\5\u03ef\u01f8\2\u12a5\u12a6\5\u03d7\u01ec\2\u12a6\u12a7")
buf.write("\5\u03e3\u01f2\2\u12a7\u12a8\5\u03df\u01f0\2\u12a8\u12a9")
buf.write("\7a\2\2\u12a9\u12aa\5\u03f3\u01fa\2\u12aa\u12ab\5\u03f1")
buf.write("\u01f9\2\u12ab\u0346\3\2\2\2\u12ac\u12ad\5\u03fd\u01ff")
buf.write("\2\u12ad\u12ae\5\u03e5\u01f3\2\u12ae\u12af\5\u03f9\u01fd")
buf.write("\2\u12af\u12b0\5\u03f3\u01fa\2\u12b0\u12b1\5\u0403\u0202")
buf.write("\2\u12b1\u0348\3\2\2\2\u12b2\u12b3\5\u03fd\u01ff\2\u12b3")
buf.write("\u12b4\5\u03e7\u01f4\2\u12b4\u12b5\5\u03df\u01f0\2\u12b5")
buf.write("\u12b6\5\u03fb\u01fe\2\u12b6\u034a\3\2\2\2\u12b7\u12b8")
buf.write("\5\u03fd\u01ff\2\u12b8\u12b9\5\u03e7\u01f4\2\u12b9\u12ba")
buf.write("\5\u03ef\u01f8\2\u12ba\u12bb\5\u03df\u01f0\2\u12bb\u034c")
buf.write("\3\2\2\2\u12bc\u12bd\5\u03fd\u01ff\2\u12bd\u12be\5\u03e7")
buf.write("\u01f4\2\u12be\u12bf\5\u03f1\u01f9\2\u12bf\u12c0\5\u0407")
buf.write("\u0204\2\u12c0\u12c1\5\u03e7\u01f4\2\u12c1\u12c2\5\u03f1")
buf.write("\u01f9\2\u12c2\u12c3\5\u03fd\u01ff\2\u12c3\u034e\3\2\2")
buf.write("\2\u12c4\u12c5\5\u03fd\u01ff\2\u12c5\u12c6\5\u03f3\u01fa")
buf.write("\2\u12c6\u12c7\5\u03f9\u01fd\2\u12c7\u12c8\5\u03f1\u01f9")
buf.write("\2\u12c8\u12c9\7a\2\2\u12c9\u12ca\5\u03f5\u01fb\2\u12ca")
buf.write("\u12cb\5\u03d7\u01ec\2\u12cb\u12cc\5\u03e3\u01f2\2\u12cc")
buf.write("\u12cd\5\u03df\u01f0\2\u12cd\u12ce\7a\2\2\u12ce\u12cf")
buf.write("\5\u03dd\u01ef\2\u12cf\u12d0\5\u03df\u01f0\2\u12d0\u12d1")
buf.write("\5\u03fd\u01ff\2\u12d1\u12d2\5\u03df\u01f0\2\u12d2\u12d3")
buf.write("\5\u03db\u01ee\2\u12d3\u12d4\5\u03fd\u01ff\2\u12d4\u12d5")
buf.write("\5\u03e7\u01f4\2\u12d5\u12d6\5\u03f3\u01fa\2\u12d6\u12d7")
buf.write("\5\u03f1\u01f9\2\u12d7\u0350\3\2\2\2\u12d8\u12d9\5\u03fd")
buf.write("\u01ff\2\u12d9\u12da\5\u03f9\u01fd\2\u12da\u12db\5\u03d7")
buf.write("\u01ec\2\u12db\u12dc\5\u03f1\u01f9\2\u12dc\u12dd\5\u03fb")
buf.write("\u01fe\2\u12dd\u12de\5\u03e1\u01f1\2\u12de\u12df\5\u03f3")
buf.write("\u01fa\2\u12df\u12e0\5\u03f9\u01fd\2\u12e0\u12e1\5\u03ef")
buf.write("\u01f8\2\u12e1\u12e2\7a\2\2\u12e2\u12e3\5\u03f1\u01f9")
buf.write("\2\u12e3\u12e4\5\u03f3\u01fa\2\u12e4\u12e5\5\u03e7\u01f4")
buf.write("\2\u12e5\u12e6\5\u03fb\u01fe\2\u12e6\u12e7\5\u03df\u01f0")
buf.write("\2\u12e7\u12e8\7a\2\2\u12e8\u12e9\5\u0403\u0202\2\u12e9")
buf.write("\u12ea\5\u03f3\u01fa\2\u12ea\u12eb\5\u03f9\u01fd\2\u12eb")
buf.write("\u12ec\5\u03dd\u01ef\2\u12ec\u12ed\5\u03fb\u01fe\2\u12ed")
buf.write("\u0352\3\2\2\2\u12ee\u12ef\5\u03fd\u01ff\2\u12ef\u12f0")
buf.write("\5\u03f9\u01fd\2\u12f0\u12f1\5\u03ff\u0200\2\u12f1\u12f2")
buf.write("\5\u03fb\u01fe\2\u12f2\u12f3\5\u03fd\u01ff\2\u12f3\u12f4")
buf.write("\5\u0403\u0202\2\u12f4\u12f5\5\u03f3\u01fa\2\u12f5\u12f6")
buf.write("\5\u03f9\u01fd\2\u12f6\u12f7\5\u03fd\u01ff\2\u12f7\u12f8")
buf.write("\5\u03e5\u01f3\2\u12f8\u12f9\5\u0407\u0204\2\u12f9\u0354")
buf.write("\3\2\2\2\u12fa\u12fb\5\u03fd\u01ff\2\u12fb\u12fc\5\u03f9")
buf.write("\u01fd\2\u12fc\u12fd\5\u0407\u0204\2\u12fd\u0356\3\2\2")
buf.write("\2\u12fe\u12ff\5\u03fd\u01ff\2\u12ff\u1300\5\u0403\u0202")
buf.write("\2\u1300\u1301\5\u03f3\u01fa\2\u1301\u1302\7a\2\2\u1302")
buf.write("\u1303\5\u03dd\u01ef\2\u1303\u1304\5\u03e7\u01f4\2\u1304")
buf.write("\u1305\5\u03e3\u01f2\2\u1305\u1306\5\u03e7\u01f4\2\u1306")
buf.write("\u1307\5\u03fd\u01ff\2\u1307\u1308\7a\2\2\u1308\u1309")
buf.write("\5\u0407\u0204\2\u1309\u130a\5\u03df\u01f0\2\u130a\u130b")
buf.write("\5\u03d7\u01ec\2\u130b\u130c\5\u03f9\u01fd\2\u130c\u130d")
buf.write("\7a\2\2\u130d\u130e\5\u03db\u01ee\2\u130e\u130f\5\u03ff")
buf.write("\u0200\2\u130f\u1310\5\u03fd\u01ff\2\u1310\u1311\5\u03f3")
buf.write("\u01fa\2\u1311\u1312\5\u03e1\u01f1\2\u1312\u1313\5\u03e1")
buf.write("\u01f1\2\u1313\u0358\3\2\2\2\u1314\u1315\5\u03fd\u01ff")
buf.write("\2\u1315\u1316\5\u0407\u0204\2\u1316\u1317\5\u03f5\u01fb")
buf.write("\2\u1317\u1318\5\u03df\u01f0\2\u1318\u035a\3\2\2\2\u1319")
buf.write("\u131a\5\u03fd\u01ff\2\u131a\u131b\5\u0407\u0204\2\u131b")
buf.write("\u131c\5\u03f5\u01fb\2\u131c\u131d\5\u03df\u01f0\2\u131d")
buf.write("\u131e\7a\2\2\u131e\u131f\5\u0403\u0202\2\u131f\u1320")
buf.write("\5\u03d7\u01ec\2\u1320\u1321\5\u03f9\u01fd\2\u1321\u1322")
buf.write("\5\u03f1\u01f9\2\u1322\u1323\5\u03e7\u01f4\2\u1323\u1324")
buf.write("\5\u03f1\u01f9\2\u1324\u1325\5\u03e3\u01f2\2\u1325\u035c")
buf.write("\3\2\2\2\u1326\u1327\5\u03ff\u0200\2\u1327\u1328\5\u03f1")
buf.write("\u01f9\2\u1328\u1329\5\u03d9\u01ed\2\u1329\u132a\5\u03f3")
buf.write("\u01fa\2\u132a\u132b\5\u03ff\u0200\2\u132b\u132c\5\u03f1")
buf.write("\u01f9\2\u132c\u132d\5\u03dd\u01ef\2\u132d\u132e\5\u03df")
buf.write("\u01f0\2\u132e\u132f\5\u03dd\u01ef\2\u132f\u035e\3\2\2")
buf.write("\2\u1330\u1331\5\u03ff\u0200\2\u1331\u1332\5\u03f1\u01f9")
buf.write("\2\u1332\u1333\5\u03db\u01ee\2\u1333\u1334\5\u03f3\u01fa")
buf.write("\2\u1334\u1335\5\u03ef\u01f8\2\u1335\u1336\5\u03ef\u01f8")
buf.write("\2\u1336\u1337\5\u03e7\u01f4\2\u1337\u1338\5\u03fd\u01ff")
buf.write("\2\u1338\u1339\5\u03fd\u01ff\2\u1339\u133a\5\u03df\u01f0")
buf.write("\2\u133a\u133b\5\u03dd\u01ef\2\u133b\u0360\3\2\2\2\u133c")
buf.write("\u133d\5\u03ff\u0200\2\u133d\u133e\5\u03f1\u01f9\2\u133e")
buf.write("\u133f\5\u03eb\u01f6\2\u133f\u1340\5\u03f1\u01f9\2\u1340")
buf.write("\u1341\5\u03f3\u01fa\2\u1341\u1342\5\u0403\u0202\2\u1342")
buf.write("\u1343\5\u03f1\u01f9\2\u1343\u0362\3\2\2\2\u1344\u1345")
buf.write("\5\u03ff\u0200\2\u1345\u1346\5\u03f1\u01f9\2\u1346\u1347")
buf.write("\5\u03ed\u01f7\2\u1347\u1348\5\u03e7\u01f4\2\u1348\u1349")
buf.write("\5\u03ef\u01f8\2\u1349\u134a\5\u03e7\u01f4\2\u134a\u134b")
buf.write("\5\u03fd\u01ff\2\u134b\u134c\5\u03df\u01f0\2\u134c\u134d")
buf.write("\5\u03dd\u01ef\2\u134d\u0364\3\2\2\2\u134e\u134f\5\u03ff")
buf.write("\u0200\2\u134f\u1350\5\u03fb\u01fe\2\u1350\u1351\5\u03e7")
buf.write("\u01f4\2\u1351\u1352\5\u03f1\u01f9\2\u1352\u1353\5\u03e3")
buf.write("\u01f2\2\u1353\u0366\3\2\2\2\u1354\u1355\5\u0401\u0201")
buf.write("\2\u1355\u1356\5\u03d7\u01ec\2\u1356\u1357\5\u03f9\u01fd")
buf.write("\2\u1357\u0368\3\2\2\2\u1358\u1359\5\u0401\u0201\2\u1359")
buf.write("\u135a\5\u03d7\u01ec\2\u135a\u135b\5\u03f9\u01fd\2\u135b")
buf.write("\u135c\5\u03f5\u01fb\2\u135c\u036a\3\2\2\2\u135d\u135e")
buf.write("\5\u0401\u0201\2\u135e\u135f\5\u03e7\u01f4\2\u135f\u1360")
buf.write("\5\u03df\u01f0\2\u1360\u1361\5\u0403\u0202\2\u1361\u1362")
buf.write("\5\u03fb\u01fe\2\u1362\u036c\3\2\2\2\u1363\u1364\5\u0401")
buf.write("\u0201\2\u1364\u1365\5\u03e7\u01f4\2\u1365\u1366\5\u03df")
buf.write("\u01f0\2\u1366\u1367\5\u0403\u0202\2\u1367\u1368\7a\2")
buf.write("\2\u1368\u1369\5\u03ef\u01f8\2\u1369\u136a\5\u03df\u01f0")
buf.write("\2\u136a\u136b\5\u03fd\u01ff\2\u136b\u136c\5\u03d7\u01ec")
buf.write("\2\u136c\u136d\5\u03dd\u01ef\2\u136d\u136e\5\u03d7\u01ec")
buf.write("\2\u136e\u136f\5\u03fd\u01ff\2\u136f\u1370\5\u03d7\u01ec")
buf.write("\2\u1370\u036e\3\2\2\2\u1371\u1372\5\u0403\u0202\2\u1372")
buf.write("\u1373\5\u03f3\u01fa\2\u1373\u1374\5\u03f9\u01fd\2\u1374")
buf.write("\u1375\5\u03eb\u01f6\2\u1375\u0370\3\2\2\2\u1376\u1377")
buf.write("\5\u0405\u0203\2\u1377\u1378\5\u03ef\u01f8\2\u1378\u1379")
buf.write("\5\u03ed\u01f7\2\u1379\u0372\3\2\2\2\u137a\u137b\5\u0405")
buf.write("\u0203\2\u137b\u137c\5\u03ef\u01f8\2\u137c\u137d\5\u03ed")
buf.write("\u01f7\2\u137d\u137e\5\u03f1\u01f9\2\u137e\u137f\5\u03d7")
buf.write("\u01ec\2\u137f\u1380\5\u03ef\u01f8\2\u1380\u1381\5\u03df")
buf.write("\u01f0\2\u1381\u1382\5\u03fb\u01fe\2\u1382\u1383\5\u03f5")
buf.write("\u01fb\2\u1383\u1384\5\u03d7\u01ec\2\u1384\u1385\5\u03db")
buf.write("\u01ee\2\u1385\u1386\5\u03df\u01f0\2\u1386\u1387\5\u03fb")
buf.write("\u01fe\2\u1387\u0374\3\2\2\2\u1388\u1389\5\u0409\u0205")
buf.write("\2\u1389\u138a\5\u03f3\u01fa\2\u138a\u138b\5\u03f1\u01f9")
buf.write("\2\u138b\u138c\5\u03df\u01f0\2\u138c\u0376\3\2\2\2\u138d")
buf.write("\u138e\7&\2\2\u138e\u138f\5\u03d7\u01ec\2\u138f\u1390")
buf.write("\5\u03db\u01ee\2\u1390\u1391\5\u03fd\u01ff\2\u1391\u1392")
buf.write("\5\u03e7\u01f4\2\u1392\u1393\5\u03f3\u01fa\2\u1393\u1394")
buf.write("\5\u03f1\u01f9\2\u1394\u0378\3\2\2\2\u1395\u1397\t\2\2")
buf.write("\2\u1396\u1395\3\2\2\2\u1397\u1398\3\2\2\2\u1398\u1396")
buf.write("\3\2\2\2\u1398\u1399\3\2\2\2\u1399\u139a\3\2\2\2\u139a")
buf.write("\u139b\b\u01bd\2\2\u139b\u037a\3\2\2\2\u139c\u139d\7\61")
buf.write("\2\2\u139d\u139e\7,\2\2\u139e\u13a2\3\2\2\2\u139f\u13a1")
buf.write("\13\2\2\2\u13a0\u139f\3\2\2\2\u13a1\u13a4\3\2\2\2\u13a2")
buf.write("\u13a3\3\2\2\2\u13a2\u13a0\3\2\2\2\u13a3\u13a5\3\2\2\2")
buf.write("\u13a4\u13a2\3\2\2\2\u13a5\u13a6\7,\2\2\u13a6\u13a7\7")
buf.write("\61\2\2\u13a7\u13a8\3\2\2\2\u13a8\u13a9\b\u01be\3\2\u13a9")
buf.write("\u037c\3\2\2\2\u13aa\u13ab\7/\2\2\u13ab\u13ac\7/\2\2\u13ac")
buf.write("\u13b0\3\2\2\2\u13ad\u13af\n\3\2\2\u13ae\u13ad\3\2\2\2")
buf.write("\u13af\u13b2\3\2\2\2\u13b0\u13ae\3\2\2\2\u13b0\u13b1\3")
buf.write("\2\2\2\u13b1\u13b3\3\2\2\2\u13b2\u13b0\3\2\2\2\u13b3\u13b4")
buf.write("\b\u01bf\3\2\u13b4\u037e\3\2\2\2\u13b5\u13b7\7$\2\2\u13b6")
buf.write("\u13b8\n\4\2\2\u13b7\u13b6\3\2\2\2\u13b8\u13b9\3\2\2\2")
buf.write("\u13b9\u13b7\3\2\2\2\u13b9\u13ba\3\2\2\2\u13ba\u13bb\3")
buf.write("\2\2\2\u13bb\u13bc\7$\2\2\u13bc\u0380\3\2\2\2\u13bd\u13bf")
buf.write("\7]\2\2\u13be\u13c0\n\5\2\2\u13bf\u13be\3\2\2\2\u13c0")
buf.write("\u13c1\3\2\2\2\u13c1\u13bf\3\2\2\2\u13c1\u13c2\3\2\2\2")
buf.write("\u13c2\u13c3\3\2\2\2\u13c3\u13c4\7_\2\2\u13c4\u0382\3")
buf.write("\2\2\2\u13c5\u13c8\7B\2\2\u13c6\u13c9\t\6\2\2\u13c7\u13c9")
buf.write("\5\u040b\u0206\2\u13c8\u13c6\3\2\2\2\u13c8\u13c7\3\2\2")
buf.write("\2\u13c9\u13ca\3\2\2\2\u13ca\u13c8\3\2\2\2\u13ca\u13cb")
buf.write("\3\2\2\2\u13cb\u0384\3\2\2\2\u13cc\u13ce\5\u03d5\u01eb")
buf.write("\2\u13cd\u13cc\3\2\2\2\u13ce\u13cf\3\2\2\2\u13cf\u13cd")
buf.write("\3\2\2\2\u13cf\u13d0\3\2\2\2\u13d0\u0386\3\2\2\2\u13d1")
buf.write("\u13d4\t\7\2\2\u13d2\u13d4\5\u040b\u0206\2\u13d3\u13d1")
buf.write("\3\2\2\2\u13d3\u13d2\3\2\2\2\u13d4\u13d9\3\2\2\2\u13d5")
buf.write("\u13d8\t\6\2\2\u13d6\u13d8\5\u040b\u0206\2\u13d7\u13d5")
buf.write("\3\2\2\2\u13d7\u13d6\3\2\2\2\u13d8\u13db\3\2\2\2\u13d9")
buf.write("\u13d7\3\2\2\2\u13d9\u13da\3\2\2\2\u13da\u0388\3\2\2\2")
buf.write("\u13db\u13d9\3\2\2\2\u13dc\u13de\5\u03f1\u01f9\2\u13dd")
buf.write("\u13dc\3\2\2\2\u13dd\u13de\3\2\2\2\u13de\u13df\3\2\2\2")
buf.write("\u13df\u13e5\7)\2\2\u13e0\u13e4\n\b\2\2\u13e1\u13e2\7")
buf.write(")\2\2\u13e2\u13e4\7)\2\2\u13e3\u13e0\3\2\2\2\u13e3\u13e1")
buf.write("\3\2\2\2\u13e4\u13e7\3\2\2\2\u13e5\u13e3\3\2\2\2\u13e5")
buf.write("\u13e6\3\2\2\2\u13e6\u13e8\3\2\2\2\u13e7\u13e5\3\2\2\2")
buf.write("\u13e8\u13e9\7)\2\2\u13e9\u038a\3\2\2\2\u13ea\u13eb\7")
buf.write("\62\2\2\u13eb\u13ef\5\u0405\u0203\2\u13ec\u13ee\5\u03d3")
buf.write("\u01ea\2\u13ed\u13ec\3\2\2\2\u13ee\u13f1\3\2\2\2\u13ef")
buf.write("\u13ed\3\2\2\2\u13ef\u13f0\3\2\2\2\u13f0\u038c\3\2\2\2")
buf.write("\u13f1\u13ef\3\2\2\2\u13f2\u13f3\5\u03d1\u01e9\2\u13f3")
buf.write("\u038e\3\2\2\2\u13f4\u13f7\5\u03d1\u01e9\2\u13f5\u13f7")
buf.write("\5\u03d5\u01eb\2\u13f6\u13f4\3\2\2\2\u13f6\u13f5\3\2\2")
buf.write("\2\u13f7\u1401\3\2\2\2\u13f8\u13fa\5\u03df\u01f0\2\u13f9")
buf.write("\u13fb\t\t\2\2\u13fa\u13f9\3\2\2\2\u13fa\u13fb\3\2\2\2")
buf.write("\u13fb\u13fd\3\2\2\2\u13fc\u13fe\5\u03d5\u01eb\2\u13fd")
buf.write("\u13fc\3\2\2\2\u13fe\u13ff\3\2\2\2\u13ff\u13fd\3\2\2\2")
buf.write("\u13ff\u1400\3\2\2\2\u1400\u1402\3\2\2\2\u1401\u13f8\3")
buf.write("\2\2\2\u1401\u1402\3\2\2\2\u1402\u0390\3\2\2\2\u1403\u1404")
buf.write("\7?\2\2\u1404\u0392\3\2\2\2\u1405\u1406\7@\2\2\u1406\u0394")
buf.write("\3\2\2\2\u1407\u1408\7>\2\2\u1408\u0396\3\2\2\2\u1409")
buf.write("\u140a\7#\2\2\u140a\u0398\3\2\2\2\u140b\u140c\7-\2\2\u140c")
buf.write("\u140d\7?\2\2\u140d\u039a\3\2\2\2\u140e\u140f\7/\2\2\u140f")
buf.write("\u1410\7?\2\2\u1410\u039c\3\2\2\2\u1411\u1412\7,\2\2\u1412")
buf.write("\u1413\7?\2\2\u1413\u039e\3\2\2\2\u1414\u1415\7\61\2\2")
buf.write("\u1415\u1416\7?\2\2\u1416\u03a0\3\2\2\2\u1417\u1418\7")
buf.write("\'\2\2\u1418\u1419\7?\2\2\u1419\u03a2\3\2\2\2\u141a\u141b")
buf.write("\7(\2\2\u141b\u141c\7?\2\2\u141c\u03a4\3\2\2\2\u141d\u141e")
buf.write("\7`\2\2\u141e\u141f\7?\2\2\u141f\u03a6\3\2\2\2\u1420\u1421")
buf.write("\7~\2\2\u1421\u1422\7?\2\2\u1422\u03a8\3\2\2\2\u1423\u1424")
buf.write("\7\60\2\2\u1424\u03aa\3\2\2\2\u1425\u1426\7a\2\2\u1426")
buf.write("\u03ac\3\2\2\2\u1427\u1428\7B\2\2\u1428\u03ae\3\2\2\2")
buf.write("\u1429\u142a\7%\2\2\u142a\u03b0\3\2\2\2\u142b\u142c\7")
buf.write("&\2\2\u142c\u03b2\3\2\2\2\u142d\u142e\7*\2\2\u142e\u03b4")
buf.write("\3\2\2\2\u142f\u1430\7+\2\2\u1430\u03b6\3\2\2\2\u1431")
buf.write("\u1432\7.\2\2\u1432\u03b8\3\2\2\2\u1433\u1434\7=\2\2\u1434")
buf.write("\u03ba\3\2\2\2\u1435\u1436\7<\2\2\u1436\u03bc\3\2\2\2")
buf.write("\u1437\u1438\7,\2\2\u1438\u03be\3\2\2\2\u1439\u143a\7")
buf.write("\61\2\2\u143a\u03c0\3\2\2\2\u143b\u143c\7\'\2\2\u143c")
buf.write("\u03c2\3\2\2\2\u143d\u143e\7-\2\2\u143e\u03c4\3\2\2\2")
buf.write("\u143f\u1440\7/\2\2\u1440\u03c6\3\2\2\2\u1441\u1442\7")
buf.write("\u0080\2\2\u1442\u03c8\3\2\2\2\u1443\u1444\7~\2\2\u1444")
buf.write("\u03ca\3\2\2\2\u1445\u1446\7(\2\2\u1446\u03cc\3\2\2\2")
buf.write("\u1447\u1448\7`\2\2\u1448\u03ce\3\2\2\2\u1449\u144a\t")
buf.write("\n\2\2\u144a\u03d0\3\2\2\2\u144b\u144d\5\u03d5\u01eb\2")
buf.write("\u144c\u144b\3\2\2\2\u144d\u144e\3\2\2\2\u144e\u144c\3")
buf.write("\2\2\2\u144e\u144f\3\2\2\2\u144f\u1450\3\2\2\2\u1450\u1452")
buf.write("\7\60\2\2\u1451\u1453\5\u03d5\u01eb\2\u1452\u1451\3\2")
buf.write("\2\2\u1453\u1454\3\2\2\2\u1454\u1452\3\2\2\2\u1454\u1455")
buf.write("\3\2\2\2\u1455\u1464\3\2\2\2\u1456\u1458\5\u03d5\u01eb")
buf.write("\2\u1457\u1456\3\2\2\2\u1458\u1459\3\2\2\2\u1459\u1457")
buf.write("\3\2\2\2\u1459\u145a\3\2\2\2\u145a\u145b\3\2\2\2\u145b")
buf.write("\u145c\7\60\2\2\u145c\u1464\3\2\2\2\u145d\u145f\7\60\2")
buf.write("\2\u145e\u1460\5\u03d5\u01eb\2\u145f\u145e\3\2\2\2\u1460")
buf.write("\u1461\3\2\2\2\u1461\u145f\3\2\2\2\u1461\u1462\3\2\2\2")
buf.write("\u1462\u1464\3\2\2\2\u1463\u144c\3\2\2\2\u1463\u1457\3")
buf.write("\2\2\2\u1463\u145d\3\2\2\2\u1464\u03d2\3\2\2\2\u1465\u1466")
buf.write("\t\13\2\2\u1466\u03d4\3\2\2\2\u1467\u1468\t\f\2\2\u1468")
buf.write("\u03d6\3\2\2\2\u1469\u146a\t\r\2\2\u146a\u03d8\3\2\2\2")
buf.write("\u146b\u146c\t\16\2\2\u146c\u03da\3\2\2\2\u146d\u146e")
buf.write("\t\17\2\2\u146e\u03dc\3\2\2\2\u146f\u1470\t\20\2\2\u1470")
buf.write("\u03de\3\2\2\2\u1471\u1472\t\21\2\2\u1472\u03e0\3\2\2")
buf.write("\2\u1473\u1474\t\22\2\2\u1474\u03e2\3\2\2\2\u1475\u1476")
buf.write("\t\23\2\2\u1476\u03e4\3\2\2\2\u1477\u1478\t\24\2\2\u1478")
buf.write("\u03e6\3\2\2\2\u1479\u147a\t\25\2\2\u147a\u03e8\3\2\2")
buf.write("\2\u147b\u147c\t\26\2\2\u147c\u03ea\3\2\2\2\u147d\u147e")
buf.write("\t\27\2\2\u147e\u03ec\3\2\2\2\u147f\u1480\t\30\2\2\u1480")
buf.write("\u03ee\3\2\2\2\u1481\u1482\t\31\2\2\u1482\u03f0\3\2\2")
buf.write("\2\u1483\u1484\t\32\2\2\u1484\u03f2\3\2\2\2\u1485\u1486")
buf.write("\t\33\2\2\u1486\u03f4\3\2\2\2\u1487\u1488\t\34\2\2\u1488")
buf.write("\u03f6\3\2\2\2\u1489\u148a\t\35\2\2\u148a\u03f8\3\2\2")
buf.write("\2\u148b\u148c\t\36\2\2\u148c\u03fa\3\2\2\2\u148d\u148e")
buf.write("\t\37\2\2\u148e\u03fc\3\2\2\2\u148f\u1490\t \2\2\u1490")
buf.write("\u03fe\3\2\2\2\u1491\u1492\t!\2\2\u1492\u0400\3\2\2\2")
buf.write("\u1493\u1494\t\"\2\2\u1494\u0402\3\2\2\2\u1495\u1496\t")
buf.write("#\2\2\u1496\u0404\3\2\2\2\u1497\u1498\t$\2\2\u1498\u0406")
buf.write("\3\2\2\2\u1499\u149a\t%\2\2\u149a\u0408\3\2\2\2\u149b")
buf.write("\u149c\t&\2\2\u149c\u040a\3\2\2\2\u149d\u149e\t\'\2\2")
buf.write("\u149e\u040c\3\2\2\2\34\2\u05f6\u1398\u13a2\u13b0\u13b9")
buf.write("\u13c1\u13c8\u13ca\u13cf\u13d3\u13d7\u13d9\u13dd\u13e3")
buf.write("\u13e5\u13ef\u13f6\u13fa\u13ff\u1401\u144e\u1454\u1459")
buf.write("\u1461\u1463\4\b\2\2\2\3\2")
return buf.getvalue()
class tsqlLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
HADR_options = 1
ADD = 2
ALL = 3
ALTER = 4
AND = 5
ANY = 6
AS = 7
ASC = 8
AUTHORIZATION = 9
BACKUP = 10
BEGIN = 11
BETWEEN = 12
BREAK = 13
BROWSE = 14
BULK = 15
BY = 16
CALLED = 17
CASCADE = 18
CASE = 19
CHANGETABLE = 20
CHANGES = 21
CHECK = 22
CHECKPOINT = 23
CLOSE = 24
CLUSTERED = 25
COALESCE = 26
COLLATE = 27
COLUMN = 28
COMMIT = 29
COMPUTE = 30
CONSTRAINT = 31
CONTAINMENT = 32
CONTAINS = 33
CONTAINSTABLE = 34
CONTINUE = 35
CONVERT = 36
CREATE = 37
CROSS = 38
CURRENT = 39
CURRENT_DATE = 40
CURRENT_TIME = 41
CURRENT_TIMESTAMP = 42
CURRENT_USER = 43
CURSOR = 44
DATABASE = 45
DBCC = 46
DEALLOCATE = 47
DECLARE = 48
DEFAULT = 49
DELETE = 50
DENY = 51
DESC = 52
DISK = 53
DISTINCT = 54
DISTRIBUTED = 55
DOUBLE = 56
DROP = 57
DUMP = 58
ELSE = 59
END = 60
ERRLVL = 61
ESCAPE = 62
EXCEPT = 63
EXECUTE = 64
EXISTS = 65
EXIT = 66
EXP = 67
EXTERNAL = 68
FETCH = 69
FILE = 70
FILENAME = 71
FILLFACTOR = 72
FOR = 73
FORCESEEK = 74
FOREIGN = 75
FREETEXT = 76
FREETEXTTABLE = 77
FROM = 78
FULL = 79
FUNCTION = 80
GOTO = 81
GRANT = 82
GROUP = 83
HAVING = 84
IDENTITY = 85
IDENTITYCOL = 86
IDENTITY_INSERT = 87
IIF = 88
IF = 89
IN = 90
INDEX = 91
INCLUDE = 92
INNER = 93
INSERT = 94
INTERSECT = 95
INTO = 96
IS = 97
JOIN = 98
KEY = 99
KILL = 100
LEFT = 101
LIKE = 102
LINENO = 103
LOAD = 104
LOG = 105
LOG10 = 106
MERGE = 107
NATIONAL = 108
NEXT = 109
NOCHECK = 110
NONCLUSTERED = 111
NONE = 112
NOT = 113
NULL = 114
NULLIF = 115
NUMERIC = 116
OF = 117
OFF = 118
OFFSETS = 119
ON = 120
OPEN = 121
OPENDATASOURCE = 122
OPENQUERY = 123
OPENROWSET = 124
OPENXML = 125
OPTION = 126
OR = 127
ORDER = 128
OUTER = 129
OVER = 130
PARTIAL = 131
PERCENT = 132
PIVOT = 133
PLAN = 134
PRECISION = 135
PRIMARY = 136
PRINT = 137
PROC = 138
PROCEDURE = 139
PUBLIC = 140
RAISERROR = 141
READ = 142
READTEXT = 143
RECONFIGURE = 144
REFERENCES = 145
REPLICATION = 146
RESTORE = 147
RESTRICT = 148
RETURN = 149
RETURNS = 150
REVERT = 151
REVOKE = 152
RIGHT = 153
ROLLBACK = 154
ROWCOUNT = 155
ROWGUIDCOL = 156
RULE = 157
SAVE = 158
SCHEMA = 159
SECURITYAUDIT = 160
SELECT = 161
SEMANTICKEYPHRASETABLE = 162
SEMANTICSIMILARITYDETAILSTABLE = 163
SEMANTICSIMILARITYTABLE = 164
SESSION_USER = 165
SET = 166
SETUSER = 167
SHUTDOWN = 168
SIGN = 169
SOME = 170
STATISTICS = 171
SYNONYM = 172
SYSTEM = 173
SYSTEM_USER = 174
TABLE = 175
TABLESAMPLE = 176
TEXTSIZE = 177
THEN = 178
TO = 179
TOP = 180
TRAN = 181
TRANSACTION = 182
TRIGGER = 183
TRUNCATE = 184
TRY_CAST = 185
TRY_CONVERT = 186
TRY_PARSE = 187
TSEQUAL = 188
UNION = 189
UNIQUE = 190
UNPIVOT = 191
UPDATE = 192
UPDATETEXT = 193
USE = 194
USER = 195
VALUES = 196
VALUE = 197
VARYING = 198
VIEW = 199
WAITFOR = 200
WHEN = 201
WHERE = 202
WHILE = 203
WITH = 204
WITHIN = 205
WRITETEXT = 206
ABSOLUTE = 207
AFTER = 208
ALLOWED = 209
ALLOW_SNAPSHOT_ISOLATION = 210
ANSI_NULLS = 211
ANSI_NULL_DEFAULT = 212
ANSI_PADDING = 213
ANSI_WARNINGS = 214
APPLY = 215
ARITHABORT = 216
AT = 217
AUTO = 218
AUTO_CLEANUP = 219
AUTO_CLOSE | |
% name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_DOUBLE, FieldDescriptor.TYPE_FLOAT ]:
lines.extend([
'if (!lua_isnumber(L, 2)) return luaL_error(L, "passed value cannot be converted to a number");',
'lua_Number n = lua_tonumber(L, 2);',
'm->set_%s(n);' % name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_FIXED32,
FieldDescriptor.TYPE_UINT32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32 ]:
lines.extend([
'lua_Integer v = luaL_checkinteger(L, 2);',
'm->set_%s(v);' % name.lower(),
'return 0;',
])
elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]:
lines.extend([
'lua_Integer i = luaL_checkinteger(L, 2);',
'm->set_%s(i);' % name.lower(),
'return 0;',
])
elif type == FieldDescriptor.TYPE_BOOL:
lines.extend([
'bool b = !!lua_toboolean(L, 2);',
'm->set_%s(b);' % name.lower(),
'return 0;',
])
elif type == FieldDescriptor.TYPE_ENUM:
lines.extend([
'lua_Integer i = luaL_checkinteger(L, 2);',
'm->set_%s((%s)i);' % ( name.lower(), type_name.replace('.', '::') ),
'return 0;',
])
elif type == FieldDescriptor.TYPE_MESSAGE:
lines.append('return luaL_error(L, "to manipulate embedded messages, obtain the embedded message and manipulate it");')
else:
lines.append('return luaL_error(L, "field type is not yet supported");')
lines.append('}\n')
return lines
def new_message(package, message):
'''Returns function definition for creating a new protocol buffer message'''
lines = []
lines.append('int %snew(lua_State *L)' % message_function_prefix(package, message))
lines.append('{')
c = cpp_class(package, message)
lines.append('msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));')
lines.append('ud->lua_owns = true;')
lines.append('ud->msg = new %s();' % c)
lines.append('ud->gc_callback = NULL;')
lines.append('ud->callback_data = NULL;')
lines.append('luaL_getmetatable(L, "%s");' % metatable(package, message))
lines.append('lua_setmetatable(L, -2);')
lines.append('return 1;')
lines.append('}\n')
return lines
def message_pushcopy_function(package, message):
'''Returns function definition for pushing a copy of a message to the stack'''
return [
'bool %spushcopy(lua_State *L, const %s &from)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = true;',
'ud->msg = new %s(from);' % cpp_class(package, message),
'ud->gc_callback = NULL;',
'ud->callback_data = NULL;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return true;',
'}',
]
def message_getcopy_function(package, message):
'''Returns function definition for getting a copy of a message from the stack'''
return [
'void %sgetcopy(lua_State *L, int index, %s &to)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)luaL_checkudata(L, index, "%s")' % ( metatable(package, message) ),
'to->CopyFrom(*ud->msg);',
'}',
]
def message_pushreference_function(package, message):
'''Returns function definition for pushing a reference of a message on the stack'''
return [
'bool %spushreference(lua_State *L, %s *msg, lua_protobuf_gc_callback f, void *data)' % ( message_function_prefix(package, message), cpp_class(package, message) ),
'{',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = false;',
'ud->msg = msg;',
'ud->gc_callback = f;',
'ud->callback_data = data;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return true;',
'}',
]
def parsefromstring_message_function(package, message):
'''Returns function definition for parsing a message from a serialized string'''
lines = []
lines.append('int %sparsefromstring(lua_State *L)' % message_function_prefix(package, message))
c = cpp_class(package, message)
lines.extend([
'{',
'if (lua_gettop(L) != 1) {',
'return luaL_error(L, "parsefromstring() requires a string argument. none given");',
'}',
'size_t len;',
'const char *s = luaL_checklstring(L, -1, &len);',
'%s * msg = new %s();' % ( c, c ),
'if (!msg->ParseFromArray((const void *)s, len)) {',
'return luaL_error(L, "error deserializing message");',
'}',
'msg_udata * ud = (msg_udata *)lua_newuserdata(L, sizeof(msg_udata));',
'ud->lua_owns = true;',
'ud->msg = msg;',
'ud->gc_callback = NULL;',
'ud->callback_data = NULL;',
'luaL_getmetatable(L, "%s");' % metatable(package, message),
'lua_setmetatable(L, -2);',
'return 1;',
'}',
])
return lines
def label_to_string(label_value):
if label_value == FieldDescriptor.LABEL_OPTIONAL:
return "optional"
if label_value == FieldDescriptor.LABEL_REPEATED:
return "repeated"
if label_value == FieldDescriptor.LABEL_REQUIRED:
return "required"
def type_to_string(type_value):
if type_value == FieldDescriptor.TYPE_BOOL:# = 8
return "bool"
if type_value == FieldDescriptor.TYPE_BYTES:# = 12
return "bytes"
if type_value == FieldDescriptor.TYPE_DOUBLE:# = 1
return "double"
if type_value == FieldDescriptor.TYPE_ENUM:# = 14
return "enum"
if type_value == FieldDescriptor.TYPE_FIXED32:# = 7
return "fixed32"
if type_value == FieldDescriptor.TYPE_FIXED64:# = 6
return "fixed64"
if type_value == FieldDescriptor.TYPE_FLOAT:# = 2
return "float"
if type_value == FieldDescriptor.TYPE_GROUP:# = 10
return "group"
if type_value == FieldDescriptor.TYPE_INT32:# = 5
return "int32"
if type_value == FieldDescriptor.TYPE_INT64:# = 3
return "int64"
if type_value == FieldDescriptor.TYPE_MESSAGE:# = 11
return "message"
if type_value == FieldDescriptor.TYPE_SFIXED32:# = 15
return "sfixed32"
if type_value == FieldDescriptor.TYPE_SFIXED64:# = 16
return "sfixed64"
if type_value == FieldDescriptor.TYPE_SINT32:# = 17
return "sint32"
if type_value == FieldDescriptor.TYPE_SINT64:# = 18
return "sint64"
if type_value == FieldDescriptor.TYPE_STRING:# = 9
return "string"
if type_value == FieldDescriptor.TYPE_UINT32:# = 13
return "uint32"
if type_value == FieldDescriptor.TYPE_UINT64:# = 4
return "uint64"
def descriptor_message_function(package, message, descriptor):
''' Return a function that builds a table that describes message. Returns table to Lua for inspection'''
lines = []
lines.extend([
'int %sdescriptor(lua_State* L)' % message_function_prefix(package, message),
'{',
' lua_newtable(L);',
' ',
]);
for fields_descriptor in descriptor.field:
lines.extend([
' // Field: default_value = %s' % fields_descriptor.default_value,
' lua_newtable(L);',
' lua_pushstring(L, "%s");' % fields_descriptor.name,
' lua_setfield(L, -2, "name");',
' lua_pushstring(L, "%s");' % label_to_string(fields_descriptor.label),
' lua_setfield(L, -2, "label");',
' lua_pushnumber(L, %s);' % fields_descriptor.number,
' lua_setfield(L, -2, "number");',
' lua_pushstring(L, "%s");' % type_to_string(fields_descriptor.type),
' lua_setfield(L, -2, "type");',
' lua_pushstring(L, "%s");' % (fields_descriptor.type_name) if fields_descriptor.type_name else '',
' lua_setfield(L, -2, "type_name");' if fields_descriptor.type_name else '',
' lua_setfield(L, -2, "%s");' % fields_descriptor.name,
]);
lines.extend([
'',
' return 1;',
'}',
])
return lines
def gc_message_function(package, message):
'''Returns function definition for garbage collecting a message'''
lines = [
'int %sgc(lua_State *L)' % message_function_prefix(package, message),
'{',
]
lines.extend(obtain_message_from_udata(package, message, 1))
# if Lua "owns" the message, we delete it
# else, we delete only if a callback exists and it says it is OK
lines.extend([
'if (mud->lua_owns) {',
'delete mud->msg;',
'mud->msg = NULL;',
'return 0;',
'}',
'if (mud->gc_callback && mud->gc_callback(m, mud->callback_data)) {',
'delete mud->msg;',
'mud->msg = NULL;',
'return 0;',
'}',
'return 0;',
'}',
])
return lines
def clear_message_function(package, message):
'''Returns the function definition for clearing a message'''
lines = [
'int %sclear(lua_State *L)' % message_function_prefix(package, message),
'{'
]
lines.extend(obtain_message_from_udata(package, message, 1))
lines.extend([
'm->Clear();',
'return 0;',
'}',
])
return lines
def serialized_message_function(package, message):
'''Returns the function definition for serializing a message and its length'''
lines = [
'int %sserialized(lua_State *L)' % message_function_prefix(package, message),
'{'
]
lines.extend(obtain_message_from_udata(package, message, 1))
lines.extend([
'string s;',
'if (!m->SerializeToString(&s)) {',
'return luaL_error(L, "error serializing message");',
'}',
'lua_pushlstring(L, s.c_str(), s.length());',
'lua_pushnumber(L, s.length());',
'return 2;',
'}',
])
return lines
def message_function_array(package, message):
'''Defines functions for Lua object type
These are defined on the Lua metatable for the message type.
These are basically constructors and static methods in Lua land.
'''
return [
'static const struct luaL_Reg %s_functions [] = {' % message,
'{"new", %snew},' % message_function_prefix(package, message),
'{"parsefromstring", %sparsefromstring},' % message_function_prefix(package, message),
'{"descriptor", %sdescriptor},' % message_function_prefix(package, message),
'{NULL, NULL}',
'};\n',
]
def message_method_array(package, descriptor):
'''Defines functions for Lua object instances
These are functions available to each instance of a message.
They take the object userdata as the first parameter.
'''
message = descriptor.name
fp = message_function_prefix(package, message)
lines = []
lines.append('static const struct luaL_Reg %s_methods [] = {' % message)
lines.append('{"serialized", %sserialized},' % fp)
lines.append('{"clear", %sclear},' % fp)
lines.append('{"__gc", %sgc},' % message_function_prefix(package, message))
for fd in descriptor.field:
name = fd.name
label = fd.label
type = fd.type
lines.append('{"clear_%s", %s},' % ( name.lower(), field_function_name(package, message, 'clear', name.lower()) ))
lines.append('{"get_%s", %s},' % ( name.lower(), field_function_name(package, message, 'get', name.lower()) ))
lines.append('{"set_%s", %s},' % ( name.lower(), field_function_name(package, message, 'set', name.lower()) ))
if label in [ FieldDescriptor.LABEL_REQUIRED, FieldDescriptor.LABEL_OPTIONAL ]:
lines.append('{"has_%s", %s},' % ( name.lower(), field_function_name(package, message, 'has', name.lower()) ))
if label == FieldDescriptor.LABEL_REPEATED:
lines.append('{"size_%s", %s},' % ( name.lower(), field_function_name(package, message, 'size', name.lower()) ))
if type == FieldDescriptor.TYPE_MESSAGE:
lines.append('{"add_%s", %s},' % ( name.lower(), field_function_name(package, message, 'add', name.lower()) ))
lines.append('{NULL, NULL},')
lines.append('};\n')
return lines
def message_open_function(package, descriptor):
'''Function definition for opening/registering a message type'''
message = descriptor.name
lines = [
'int %s(lua_State *L)' % message_open_function_name(package, message),
'{',
'luaL_checktype(L, -1, LUA_TTABLE);', #
'luaL_newmetatable(L, "%s");' % metatable(package, message),
'lua_pushvalue(L, -1);',
'lua_setfield(L, -2, "__index");',
'luaL_setfuncs(L, %s_methods, 0);' % message, ##'luaL_register(L, NULL, %s_methods);' % message,
'lua_pop(L, 1); // remove the metatable', #
'if (luaEXT_findtable(L, "%s", -1, 1)) { ' % package, #
' return luaL_error(L, "Error finding correct table");',
'}',
'luaL_newlib(L, %s_functions);' % message, ##'luaL_register(L, "%s", %s_functions);' % (lua_libname(package, message), message),
'lua_setfield(L, -2, "%s");' % message, #
'lua_pop(L, 1); //remove the returned table from findtable' #
]
for enum_descriptor in descriptor.enum_type:
lines.extend(enum_source(enum_descriptor))
lines.extend([
# this is wrong if we are calling through normal Lua module load means
#'lua_pop(L, 1);',
'return 0;',#'return 1;',
'}',
| |
<reponame>mabrahamdevops/python_notebooks
import glob
from ipywidgets import widgets
import os
import re
import shutil
from collections import defaultdict
from IPython.core.display import HTML
from IPython.display import display
import pandas as pd
import subprocess
from __code.file_handler import make_ascii_file_from_string
from __code.file_handler import read_ascii
from __code.ipywe.myfileselector import MyFileSelectorPanel
from __code.utilities import ListRunsParser
from __code.topaz_config import topaz_python_script, topaz_reduction_path
class ConfigLoader(object):
config_dict = {}
def __init__(self, working_dir=''):
self.working_dir = working_dir
def select_config_file(self):
hbox = widgets.HBox([widgets.Label("Configuration File Selected:",
layout=widgets.Layout(width='20%')),
widgets.Label("N/A",
layout=widgets.Layout(width='80%'))])
self.config_selected_label = hbox.children[1]
display(hbox)
self.config_file_ui = MyFileSelectorPanel(instruction='Select Configuration File (*.config)',
next=self.load_config,
start_dir=self.working_dir,
filters={'config': ['*.config']},
default_filter='config',
stay_alive=True)
self.config_file_ui.show()
def load_config(self, config_file_name):
self.config_selected_label.value = config_file_name
#try:
#pd_config = pd.read_csv(config_file_name, sep=' ', comment='#')
full_config = read_ascii(config_file_name)
config_array = full_config.split('\n')
config_dict = dict()
for _line in config_array:
if (not (_line.startswith('#'))) and (not (_line == '')):
_new_line = re.sub('\s+', ',', _line)
my_split = _new_line.split(',')
_key = my_split[0]
_value = my_split[1]
config_dict[_key] = _value
[config_dict['config_name'], _] = os.path.splitext(os.path.basename(config_file_name))
self.config_dict = config_dict
# except:
# display(HTML("Error loading config file {}!".format(config_file_name)))
# return
display(HTML("Configuration file has been loaded with success!"))
# list_para_name = pd_config['instrument_name']
# list_para_value = pd_config['TOPAZ']
# config_dict = dict(zip(list_para_name, list_para_value))
# adding config_name tag that contains the name of the loaded config name
class ConfigParser(object):
def __init__(self, config_file=''):
if config_file:
self.parse_config(config_file = config_file)
def parse_config(self, config_file=''):
pass
class ConfigDict(object):
# for config file output
config = {'instrument_name': 'TOPAZ',
'calibration_file_1': '',
'calibration_file_2': 'None',
'z_offset': 0.0,
'x_offset': 0.0,
'data_directory': '',
'output_directory': '',
'use_monitor_counts': False,
'min_tof': 1000,
'max_tof': 16600,
'monitor_index': 0,
'min_monitor_tof': 800,
'max_monitor_tof': 12500,
'read_UB': True,
'UB_filename': '',
'cell_type': 'Monoclinic',
'centering': 'P',
'num_peaks_to_find': 300,
'min_d': 5,
'max_d': 25,
'tolerance': 0.12,
'integrate_predicted_peaks': True,
'min_pred_wl': 0.5,
'max_pred_wl': 3.4,
'min_pred_dspacing': 0.5,
'max_pred_dspacing': 11.0,
'use_sphere_integration': False,
'use_ellipse_integration': False,
'use_fit_peaks_integration': False,
'use_cylindrical_integration': False,
'peak_radius': 0.130,
'bkg_inner_radius': 0.14,
'bkg_outer_radius': 0.15,
'integrate_if_edge_peak': True,
'ellipse_region_radius': 0.20,
'ellipse_size_specified': True,
'rebin_step': -0.004,
'preserve_events': True,
'use_ikeda_carpenter': False,
'n_bad_edge_pixels': 0,
'cylinder_radius': 0.05,
'cylinder_length': 0.30,
'exp_name': '',
'reduce_one_run_script': '/SNS/TOPAZ/shared/calibrations/Reduction/ReduceSCD_OneRun_xz_offset.py',
'run_nums': '',
'slurm_queue_name': 'None',
'max_processes': 8,
'config_name': 'tmp.config',
}
def __init__(self, config_dict={}):
self.config_dict = config_dict
def get_parameter_value(self, parameter):
para_type = type(self.config[parameter])
if self.config_dict == {}:
return self.config[parameter]
if not (parameter in self.config_dict.keys()):
return self.config[parameter]
if self.config_dict[parameter]:
config_para = self.config_dict[parameter]
if isinstance(self.config[parameter], bool):
if self.config_dict[parameter] in ["True", "true"]:
return True
else:
return False
return para_type(config_para)
else:
return self.config[parameter]
class TopazConfigGenerator(object):
ikeda_flag_ui = None
v_box = None
fit_peaks_vertical_layout = None
reduce_ui = None
reduce_label_ui = None
reduce_one_run_script = 'N/A'
left_column_width = '15%'
# for config file output
config = {'instrument_name': 'TOPAZ',
'calibration_file_1': '',
'calibration_file_2': 'None',
'z_offset': 0.0,
'x_offset': 0.0,
'data_directory': '',
'output_directory': '',
'use_monitor_counts': False,
'min_tof': 1000,
'max_tof': 16600,
'monitor_index': 0,
'min_monitor_tof': 800,
'max_monitor_tof': 12500,
'read_UB': True,
'UB_filename': '',
'cell_type': 'Monoclinic',
'centering': 'P',
'num_peaks_to_find': 300,
'min_d': 5,
'max_d': 25,
'tolerance': 0.12,
'integrate_predicted_peaks': True,
'min_pred_wl': 0.5,
'max_pred_wl': 3.4,
'min_pred_dspacing': 0.5,
'max_pred_dspacing': 11.0,
'use_sphere_integration': False,
'use_ellipse_integration': False,
'use_fit_peaks_integration': False,
'use_cylindrical_integration': False,
'peak_radius': 0.130,
'bkg_inner_radius': 0.14,
'bkg_outer_radius': 0.15,
'integrate_if_edge_peak': True,
'ellipse_region_radius': 0.20,
'ellipse_size_specified': True,
'rebin_step': -0.004,
'preserve_events': True,
'use_ikeda_carpenter': False,
'n_bad_edge_pixels': 0,
'cylinder_radius': 0.05,
'cylinder_length': 0.30,
'exp_name': '',
'reduce_one_run_script': '/SNS/TOPAZ/shared/calibrations/Reduction/ReduceSCD_OneRun_xz_offset.py',
'run_nums': '',
'slurm_queue_name': 'None',
'max_processes': 8,
}
cell_type = ['Triclinic',
'Monoclinic',
'Orthorhombic',
'Tetragonal',
'Rhombohedral',
'Hexagonal',
'Cubic',
'None']
centering_mode = {'P': cell_type[:-1],
'I': ['Tetragonal', 'Monoclinic', 'Cubic'],
'A': ['Monoclinic', 'Orthorhombic'],
'B': ['Monoclinic', 'Orthorhombic'],
'C': ['Monoclinic', 'Orthorhombic'],
'F': ['Orthorhombic', 'Cubic'],
'Robv': ['Rhombohedral'],
'Rrev': ['Rhombohedral'],
'None': ['None']
}
cell_type_dict = {}
o_config_dict = None
display_config_to_super_user = False
def __init__(self, working_dir='', config_dict_loaded={}):
self.working_dir = working_dir
self.config_dict_loaded = config_dict_loaded
self.o_config_dict = ConfigDict(config_dict=config_dict_loaded)
self.init_css()
self.__create_cell_type_centering_dict()
self.run_all()
def init_css(self):
display(HTML("""
<style>
.mylabel_key {
font-style: bold;
color: black;
font-size: 18px;
}
</style>
"""))
def __create_cell_type_centering_dict(self):
self.cell_type_dict = defaultdict()
for _key in self.centering_mode.keys():
_list = self.centering_mode[_key]
for _item in _list:
self.cell_type_dict.setdefault(_item, []).append(_key)
def __get_dict_parameter_value(self, parameter):
if self.config_dict[parameter]:
return self.config_dict[parameter]
else:
return self.config[parameter]
def run_all(self):
self.define_config_file_name()
self.select_input_data_folder()
self.select_output_folder()
self.parameters_1()
self.parameters_2()
self.advanced_options()
def define_config_file_name(self):
_default_config = self.o_config_dict.get_parameter_value('config_name')
[_name_config, _] = os.path.splitext(_default_config)
display(HTML("<h2>Define Config File Name</h2>"))
config_file_ui = widgets.HBox([widgets.Label("Config File Name:",
layout=widgets.Layout(width='20%')),
widgets.Text(_name_config,
layout=widgets.Layout(width='75%')),
widgets.Label(".config",
layout=widgets.Layout(width='5%'))])
self.config_file_ui = config_file_ui.children[1]
display(config_file_ui)
def select_input_data_folder(self):
def update_list_of_runs(list_of_runs):
self.full_list_of_runs_ui.options = list_of_runs
if list_of_runs == []:
self.run_numbers_error_ui.value = ">> Format Error! <<"
self.full_run_numbers_layout.layout.visibility = 'hidden'
else:
self.run_numbers_error_ui.value = ""
self.full_run_numbers_layout.layout.visibility = 'visible'
# ****** Select Input Data Folder ********
display(HTML("<h2 id='input_directory'>Select Input Data Folder</h2>"))
_input_data_folder = self.o_config_dict.get_parameter_value('data_directory')
select_input_data_folder_ui = None
def _select_input_data_folder(selection):
select_input_data_folder_ui.children[1].value = selection
# update the list of runs display at the bottom of the page if user select new input data folder
_path_to_look_for = os.path.abspath(os.path.join(self.input_data_folder_ui.value, 'TOPAZ_*_event.nxs'))
list_of_event_nxs = glob.glob(_path_to_look_for)
list_of_runs = []
if list_of_event_nxs:
re_string = r"^TOPAZ_(?P<run>\d+)_event.nxs$"
for _nexus in list_of_event_nxs:
_short_nexus = os.path.basename(_nexus)
m = re.match(re_string, _short_nexus)
if m:
_run = m.group('run')
list_of_runs.append(_run)
list_of_runs.sort()
update_list_of_runs(list_of_runs)
self.list_of_runs = list_of_runs
select_input_data_folder_ui = widgets.HBox([widgets.Label("Input Data Folder Selected:",
layout=widgets.Layout(width='25%')),
widgets.Label(_input_data_folder,
layout=widgets.Layout(width='70%'))])
select_input_data_folder_ui.children[0].add_class("mylabel_key")
self.input_data_folder_ui = select_input_data_folder_ui.children[1]
display(select_input_data_folder_ui)
if not (_input_data_folder == 'N/A') and os.path.exists(_input_data_folder):
start_dir = _input_data_folder
else:
start_dir = os.path.join(self.working_dir, 'data')
if not os.path.exists(start_dir):
start_dir = self.working_dir
if not os.path.exists(start_dir):
start_dir = '/'
input_folder_ui = MyFileSelectorPanel(instruction='',
start_dir=start_dir,
next=_select_input_data_folder,
type='directory',
stay_alive=True)
input_folder_ui.show()
display(widgets.Label(""))
def select_output_folder(self):
# ****** Select or Create Output Folder ********
display(HTML("<h2 id='output_directory'>Select or Create Output Folder</h2>"))
_output_data_folder = self.o_config_dict.get_parameter_value('output_directory')
select_output_data_folder_ui = None
def select_output_data_folder(selection):
select_output_data_folder_ui.children[1].value = selection
if not (_output_data_folder == 'N/A') and os.path.exists(_output_data_folder):
start_dir = _output_data_folder
else:
start_dir = os.path.join(self.working_dir, 'shared')
if not os.path.exists(start_dir):
start_dir = self.working_dir
if not os.path.exists(start_dir):
start_dir = '/'
select_output_data_folder_ui = widgets.HBox([widgets.Label("Output Data Folder Selected:",
layout=widgets.Layout(width='25%')),
widgets.Label(start_dir,
layout=widgets.Layout(width='70%'))])
select_output_data_folder_ui.children[0].add_class("mylabel_key")
self.output_data_folder_ui = select_output_data_folder_ui.children[1]
display(select_output_data_folder_ui)
output_folder_ui = MyFileSelectorPanel(instruction='Location of Output Folder',
start_dir=start_dir,
type='directory',
next=select_output_data_folder,
newdir_toolbar_button=True,
stay_alive=True)
output_folder_ui.show()
def parameters_1(self):
# def cell_type_changed(value):
# centering_ui.children[1].options = self.cell_type_dict[value['new']]
# centering_ui.children[1].value = self.cell_type_dict[value['new']][0]
#
# def centering_changed(value):
# pass
# calibration files
working_dir = self.working_dir
calib_folder = os.path.dirname(working_dir)
list_of_calibration_file = glob.glob(os.path.join(calib_folder, 'shared/calibrations') + '/2017C/*.DetCal')
list_of_calibration_file.append('None')
_calibration_file = self.o_config_dict.get_parameter_value('calibration_file_1')
if not (_calibration_file is None) and os.path.exists(_calibration_file):
list_of_calibration_file = [_calibration_file] + list_of_calibration_file
# ****** Specify calibration file(s) ********
display(HTML("<h2 id='calibration_file'>Specify calibration file(s)</h2><br>SNAP requires two calibration files, one for each bank. \
If the default detector position is to be used, specify <strong>None</strong> as the calibration file name."))
calibration1_ui = widgets.HBox([widgets.Label("Calibration File:",
layout=widgets.Layout(width='15%')),
widgets.Dropdown(options=list_of_calibration_file,
layout=widgets.Layout(width='85%'))])
self.calibration_file_ui = calibration1_ui.children[1]
display(calibration1_ui)
# ****** Goniometer z Offset correction ********
display(HTML("<h2>Goniometer z Offset Correction</h2><br>Test correction for Goniometer z offset"))
_z_offset = self.o_config_dict.get_parameter_value('z_offset')
_x_offset = self.o_config_dict.get_parameter_value('x_offset')
offset_min_value = -10.0
offset_max_value = +10.0
if not (offset_min_value <= _x_offset <= offset_max_value):
_x_offset = 0.0
if not (offset_min_value <= _z_offset <= offset_max_value):
_z_offset = 0.0
zoffset_ui = widgets.HBox([widgets.Label("z_offset:",
layout=widgets.Layout(width='5%')),
widgets.FloatSlider(value=_z_offset,
min=offset_min_value,
max=offset_max_value,
readout_format='.2f',
continuous_update=False,
layout=widgets.Layout(width='30%'))])
xoffset_ui = widgets.HBox([widgets.Label("x_offset:",
layout=widgets.Layout(width='5%')),
widgets.FloatSlider(value=_x_offset,
min=offset_min_value,
max=offset_max_value,
readout_format='.2f',
continuous_update=False,
layout=widgets.Layout(width='30%'))])
self.zoffset_ui = zoffset_ui.children[1]
self.xoffset_ui = xoffset_ui.children[1]
offset_ui = widgets.VBox([zoffset_ui, xoffset_ui])
display(offset_ui)
# ****** Use Monitor Counts ?********
display(HTML("<h2>Use Monitor Counts ?</h2><br> If use_monitor_counts is True, then the integrated beam monitor \
counts will be used for scaling. <br>If use_monitor_counts is False, \
then the integrated proton charge will be used for scaling. <br><br>These \
values will be listed under MONCNT in the integrate file."))
_monitor_flag = self.o_config_dict.get_parameter_value('use_monitor_counts')
monitor_counts_flag_ui = widgets.Checkbox(value=_monitor_flag,
description='Use Monitor Counts')
self.monitor_counts_flag_ui = monitor_counts_flag_ui
display(monitor_counts_flag_ui)
# ****** TOF and Monitor ********
display(HTML("<h2>TOF and Monitor</h2><br>Min & max tof determine the range of events loaded.<br> Min & max monitor tof \
determine the range of tofs integrated in the monitor data to get the \
total monitor counts. <br>You need these even if Use Monitor Counts is False."))
_min_tof = self.o_config_dict.get_parameter_value('min_tof')
_max_tof = self.o_config_dict.get_parameter_value('max_tof')
tof_ui = widgets.HBox([widgets.Label("TOF Range",
layout=widgets.Layout(width=self.left_column_width)),
widgets.IntRangeSlider(value=[_min_tof, _max_tof],
min=500,
max=16600,
step=1,
continuous_update=False,
readout_format='d',
layout=widgets.Layout(width='50%')),
widgets.Label("\u00B5s",
layout=widgets.Layout(width='20%'))])
self.tof_ui = tof_ui.children[1]
_monitor_index = self.o_config_dict.get_parameter_value('monitor_index')
monitor_index_ui = widgets.HBox([widgets.Label("Monitor Index",
layout=widgets.Layout(width=self.left_column_width)),
widgets.Dropdown(options=['0', '1'],
value=str(_monitor_index),
layout=widgets.Layout(width='10%'))])
self.monitor_index_ui = monitor_index_ui.children[1]
_min_monitor_tof = self.o_config_dict.get_parameter_value('min_monitor_tof')
_max_monitor_tof = self.o_config_dict.get_parameter_value('max_monitor_tof')
monitor_ui = widgets.HBox([widgets.Label("Monitor TOF Range",
layout=widgets.Layout(width=self.left_column_width)),
widgets.IntRangeSlider(value=[_min_monitor_tof, _max_monitor_tof],
min=500,
max=16600,
step=1,
continuous_update=False,
readout_format='d',
layout=widgets.Layout(width='50%')),
widgets.Label("\u00B5s",
layout=widgets.Layout(width='20%'))])
self.monitor_tof_ui = monitor_ui.children[1]
tof_ui = widgets.VBox([tof_ui, monitor_index_ui, monitor_ui])
display(tof_ui)
# ****** UB ********
display(HTML("<h2 id='ub_filename'>UB</h2><br>Read the UB matrix from file. This option will be applied to each run and used for \
combined file. This option is especially helpful for 2nd frame TOPAZ data."))
_ub_flag = self.o_config_dict.get_parameter_value('read_UB')
ub_flag_ui = widgets.Checkbox(value=_ub_flag,
description='Read UB')
_ub_file = self.o_config_dict.get_parameter_value('UB_filename')
if _ub_file == '':
_ub_file = 'N/A'
ub_file_selected_ui = widgets.HBox([widgets.Label("UB File Selected:",
layout=widgets.Layout(width='20%')),
widgets.Label(_ub_file,
layout=widgets.Layout(width='80%'))])
ub_file_selected_ui.children[0].add_class("mylabel_key")
self.ub_file_selected_ui = ub_file_selected_ui
def ub_flag_changed(value):
display_file_selection_flag = value['new']
if display_file_selection_flag:
self.ub_ui.enable()
else:
self.ub_ui.disable()
# | |
name):
try:
inf_attr = getattr(_cl.image_info, name.upper())
except AttributeError:
raise AttributeError("%s has no attribute '%s'"
% (type(self), name))
else:
return self.event.get_image_info(inf_attr)
def image_shape(self):
if self.type == mem_object_type.IMAGE2D:
return (self.width, self.height)
elif self.type == mem_object_type.IMAGE3D:
return (self.width, self.height, self.depth)
else:
raise LogicError("only images have shapes")
Image.__init__ = image_init
Image.image = property(_ImageInfoGetter)
Image.shape = property(image_shape)
# }}}
# {{{ Error
def error_str(self):
val = self.what
try:
val.routine
except AttributeError:
return str(val)
else:
result = ""
if val.code() != status_code.SUCCESS:
result = status_code.to_string(
val.code(), "<unknown error %d>")
routine = val.routine()
if routine:
result = "%s failed: %s" % (routine, result)
what = val.what()
if what:
if result:
result += " - "
result += what
return result
def error_code(self):
return self.args[0].code()
def error_routine(self):
return self.args[0].routine()
def error_what(self):
return self.args[0]
Error.__str__ = error_str
Error.code = property(error_code)
Error.routine = property(error_routine)
Error.what = property(error_what)
# }}}
# {{{ MemoryMap
def memory_map_enter(self):
return self
def memory_map_exit(self, exc_type, exc_val, exc_tb):
self.release()
MemoryMap.__doc__ = """
This class may also be used as a context manager in a ``with`` statement.
The memory corresponding to this object will be unmapped when
this object is deleted or :meth:`release` is called.
.. automethod:: release
"""
MemoryMap.__enter__ = memory_map_enter
MemoryMap.__exit__ = memory_map_exit
# }}}
# {{{ SVMAllocation
if get_cl_header_version() >= (2, 0):
SVMAllocation.__doc__ = """An object whose lifetime is tied to an allocation of shared virtual memory.
.. note::
Most likely, you will not want to use this directly, but rather
:func:`svm_empty` and related functions which allow access to this
functionality using a friendlier, more Pythonic interface.
.. versionadded:: 2016.2
.. automethod:: __init__(self, ctx, size, alignment, flags=None)
.. automethod:: release
.. automethod:: enqueue_release
"""
if get_cl_header_version() >= (2, 0):
svmallocation_old_init = SVMAllocation.__init__
def svmallocation_init(self, ctx, size, alignment, flags, _interface=None):
"""
:arg ctx: a :class:`Context`
:arg flags: some of :class:`svm_mem_flags`.
"""
svmallocation_old_init(self, ctx, size, alignment, flags)
read_write = (
flags & mem_flags.WRITE_ONLY != 0
or flags & mem_flags.READ_WRITE != 0)
_interface["data"] = (
int(self._ptr_as_int()), not read_write)
self.__array_interface__ = _interface
if get_cl_header_version() >= (2, 0):
SVMAllocation.__init__ = svmallocation_init
# }}}
# {{{ SVM
if get_cl_header_version() >= (2, 0):
SVM.__doc__ = """Tags an object exhibiting the Python buffer interface (such as a
:class:`numpy.ndarray`) as referring to shared virtual memory.
Depending on the features of the OpenCL implementation, the following
types of objects may be passed to/wrapped in this type:
* coarse-grain shared memory as returned by (e.g.) :func:`csvm_empty`
for any implementation of OpenCL 2.0.
This is how coarse-grain SVM may be used from both host and device::
svm_ary = cl.SVM(
cl.csvm_empty(ctx, 1000, np.float32, alignment=64))
assert isinstance(svm_ary.mem, np.ndarray)
with svm_ary.map_rw(queue) as ary:
ary.fill(17) # use from host
prg.twice(queue, svm_ary.mem.shape, None, svm_ary)
* fine-grain shared memory as returned by (e.g.) :func:`fsvm_empty`,
if the implementation supports fine-grained shared virtual memory.
This memory may directly be passed to a kernel::
ary = cl.fsvm_empty(ctx, 1000, np.float32)
assert isinstance(ary, np.ndarray)
prg.twice(queue, ary.shape, None, cl.SVM(ary))
queue.finish() # synchronize
print(ary) # access from host
Observe how mapping (as needed in coarse-grain SVM) is no longer
necessary.
* any :class:`numpy.ndarray` (or other Python object with a buffer
interface) if the implementation supports fine-grained *system*
shared virtual memory.
This is how plain :mod:`numpy` arrays may directly be passed to a
kernel::
ary = np.zeros(1000, np.float32)
prg.twice(queue, ary.shape, None, cl.SVM(ary))
queue.finish() # synchronize
print(ary) # access from host
Objects of this type may be passed to kernel calls and
:func:`enqueue_copy`. Coarse-grain shared-memory *must* be mapped
into host address space using :meth:`map` before being accessed
through the :mod:`numpy` interface.
.. note::
This object merely serves as a 'tag' that changes the behavior
of functions to which it is passed. It has no special management
relationship to the memory it tags. For example, it is permissible
to grab a :mod:`numpy.array` out of :attr:`SVM.mem` of one
:class:`SVM` instance and use the array to construct another.
Neither of the tags need to be kept alive.
.. versionadded:: 2016.2
.. attribute:: mem
The wrapped object.
.. automethod:: __init__
.. automethod:: map
.. automethod:: map_ro
.. automethod:: map_rw
.. automethod:: as_buffer
"""
if get_cl_header_version() >= (2, 0):
svm_old_init = SVM.__init__
def svm_init(self, mem):
svm_old_init(self, mem)
self.mem = mem
def svm_map(self, queue, flags, is_blocking=True, wait_for=None):
"""
:arg is_blocking: If *False*, subsequent code must wait on
:attr:`SVMMap.event` in the returned object before accessing the
mapped memory.
:arg flags: a combination of :class:`pyopencl.map_flags`, defaults to
read-write.
:returns: an :class:`SVMMap` instance
|std-enqueue-blurb|
"""
return SVMMap(
self,
queue,
_cl._enqueue_svm_map(queue, is_blocking, flags, self, wait_for))
def svm_map_ro(self, queue, is_blocking=True, wait_for=None):
"""Like :meth:`map`, but with *flags* set for a read-only map."""
return self.map(queue, map_flags.READ,
is_blocking=is_blocking, wait_for=wait_for)
def svm_map_rw(self, queue, is_blocking=True, wait_for=None):
"""Like :meth:`map`, but with *flags* set for a read-only map."""
return self.map(queue, map_flags.READ | map_flags.WRITE,
is_blocking=is_blocking, wait_for=wait_for)
def svm__enqueue_unmap(self, queue, wait_for=None):
return _cl._enqueue_svm_unmap(queue, self, wait_for)
def svm_as_buffer(self, ctx, flags=None):
"""
:arg ctx: a :class:`Context`
:arg flags: a combination of :class:`pyopencl.map_flags`, defaults to
read-write.
:returns: a :class:`Buffer` corresponding to *self*.
The memory referred to by this object must not be freed before
the returned :class:`Buffer` is released.
"""
if flags is None:
flags = mem_flags.READ_WRITE
return Buffer(ctx, flags, size=self.mem.nbytes, hostbuf=self.mem)
if get_cl_header_version() >= (2, 0):
SVM.__init__ = svm_init
SVM.map = svm_map
SVM.map_ro = svm_map_ro
SVM.map_rw = svm_map_rw
SVM._enqueue_unmap = svm__enqueue_unmap
SVM.as_buffer = svm_as_buffer
# }}}
# ORDER DEPENDENCY: Some of the above may override get_info, the effect needs
# to be visible through the attributes. So get_info attr creation needs to happen
# after the overriding is complete.
cls_to_info_cls = {
_cl.Platform: (_cl.Platform.get_info, _cl.platform_info, []),
_cl.Device: (_cl.Device.get_info, _cl.device_info,
["PLATFORM", "MAX_WORK_GROUP_SIZE", "MAX_COMPUTE_UNITS"]),
_cl.Context: (_cl.Context.get_info, _cl.context_info, []),
_cl.CommandQueue: (_cl.CommandQueue.get_info, _cl.command_queue_info,
["CONTEXT", "DEVICE"]),
_cl.Event: (_cl.Event.get_info, _cl.event_info, []),
_cl.MemoryObjectHolder:
(MemoryObjectHolder.get_info, _cl.mem_info, []),
Image: (_cl.Image.get_image_info, _cl.image_info, []),
Program: (Program.get_info, _cl.program_info, []),
Kernel: (Kernel.get_info, _cl.kernel_info, []),
_cl.Sampler: (Sampler.get_info, _cl.sampler_info, []),
}
def to_string(cls, value, default_format=None):
for name in dir(cls):
if (not name.startswith("_") and getattr(cls, name) == value):
return name
if default_format is None:
raise ValueError("a name for value %d was not found in %s"
% (value, cls.__name__))
else:
return default_format % value
for cls in CONSTANT_CLASSES:
cls.to_string = classmethod(to_string)
# {{{ get_info attributes -------------------------------------------------
def make_getinfo(info_method, info_name, info_attr):
def result(self):
return info_method(self, info_attr)
return property(result)
def make_cacheable_getinfo(info_method, info_name, cache_attr, info_attr):
def result(self):
try:
return getattr(self, cache_attr)
except AttributeError:
pass
result = info_method(self, info_attr)
setattr(self, cache_attr, result)
return result
return property(result)
for cls, (info_method, info_class, cacheable_attrs) \
in six.iteritems(cls_to_info_cls):
for info_name, info_value in six.iteritems(info_class.__dict__):
if info_name == "to_string" or info_name.startswith("_"):
continue
info_lower = info_name.lower()
info_constant = getattr(info_class, info_name)
if info_name in cacheable_attrs:
cache_attr = intern("_info_cache_"+info_lower)
setattr(cls, info_lower, make_cacheable_getinfo(
info_method, info_lower, cache_attr, info_constant))
else:
setattr(cls, info_lower, make_getinfo(
info_method, info_name, info_constant))
# }}}
if _cl.have_gl():
def gl_object_get_gl_object(self):
return self.get_gl_object_info()[1]
GLBuffer.gl_object = property(gl_object_get_gl_object)
GLTexture.gl_object = property(gl_object_get_gl_object)
_add_functionality()
# }}}
# {{{ create_some_context
def create_some_context(interactive=None, answers=None):
import os
if answers is None:
if "PYOPENCL_CTX" in os.environ:
ctx_spec = os.environ["PYOPENCL_CTX"]
answers = ctx_spec.split(":")
if "PYOPENCL_TEST" in os.environ:
from pyopencl.tools import get_test_platforms_and_devices
for plat, devs in get_test_platforms_and_devices():
for dev in devs:
return Context([dev])
if answers is not None:
pre_provided_answers = answers
answers = answers[:]
else:
pre_provided_answers = None
user_inputs = []
if interactive is None:
interactive = True
try:
import sys
if not sys.stdin.isatty():
interactive = False
except Exception:
interactive = False
def cc_print(s):
if interactive:
print(s)
def get_input(prompt):
if answers:
return str(answers.pop(0))
elif not interactive:
return ''
else:
user_input = input(prompt)
user_inputs.append(user_input)
return user_input
# {{{ pick a platform
platforms = get_platforms()
if not platforms:
raise Error("no platforms found")
else:
if not answers:
cc_print("Choose platform:")
for i, pf in enumerate(platforms):
cc_print("[%d] %s" % (i, pf))
answer = get_input("Choice [0]:")
if not answer:
platform = platforms[0]
else:
platform = None
try:
int_choice = int(answer)
except ValueError:
pass
else:
if 0 <= int_choice < len(platforms):
platform = platforms[int_choice]
if platform is None:
answer = answer.lower()
for i, pf in enumerate(platforms):
if answer in pf.name.lower():
platform = pf
if platform is None:
raise RuntimeError("input did not match any platform")
# }}}
# |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.