input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
:ivar status: Status of the integration runtime node. Possible values
include: 'NeedRegistration', 'Online', 'Limited', 'Offline', 'Upgrading',
'Initializing', 'InitializeFailed'
:vartype status: str or
~azure.mgmt.synapse.models.SelfHostedIntegrationRuntimeNodeStatus
:ivar capabilities: The integration runtime capabilities dictionary
:vartype capabilities: dict[str, str]
:ivar version_status: Status of the integration runtime node version.
:vartype version_status: str
:ivar version: Version of the integration runtime node.
:vartype version: str
:ivar register_time: The time at which the integration runtime node was
registered in ISO8601 format.
:vartype register_time: datetime
:ivar last_connect_time: The most recent time at which the integration
runtime was connected in ISO8601 format.
:vartype last_connect_time: datetime
:ivar expiry_time: The time at which the integration runtime will expire
in ISO8601 format.
:vartype expiry_time: datetime
:ivar last_start_time: The time the node last started up.
:vartype last_start_time: datetime
:ivar last_stop_time: The integration runtime node last stop time.
:vartype last_stop_time: datetime
:ivar last_update_result: The result of the last integration runtime node
update. Possible values include: 'None', 'Succeed', 'Fail'
:vartype last_update_result: str or
~azure.mgmt.synapse.models.IntegrationRuntimeUpdateResult
:ivar last_start_update_time: The last time for the integration runtime
node update start.
:vartype last_start_update_time: datetime
:ivar last_end_update_time: The last time for the integration runtime node
update end.
:vartype last_end_update_time: datetime
:ivar is_active_dispatcher: Indicates whether this node is the active
dispatcher for integration runtime requests.
:vartype is_active_dispatcher: bool
:ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration
runtime node.
:vartype concurrent_jobs_limit: int
:ivar max_concurrent_jobs: The maximum concurrent jobs in this integration
runtime.
:vartype max_concurrent_jobs: int
"""
_validation = {
'node_name': {'readonly': True},
'machine_name': {'readonly': True},
'host_service_uri': {'readonly': True},
'status': {'readonly': True},
'capabilities': {'readonly': True},
'version_status': {'readonly': True},
'version': {'readonly': True},
'register_time': {'readonly': True},
'last_connect_time': {'readonly': True},
'expiry_time': {'readonly': True},
'last_start_time': {'readonly': True},
'last_stop_time': {'readonly': True},
'last_update_result': {'readonly': True},
'last_start_update_time': {'readonly': True},
'last_end_update_time': {'readonly': True},
'is_active_dispatcher': {'readonly': True},
'concurrent_jobs_limit': {'readonly': True},
'max_concurrent_jobs': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'node_name': {'key': 'nodeName', 'type': 'str'},
'machine_name': {'key': 'machineName', 'type': 'str'},
'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '{str}'},
'version_status': {'key': 'versionStatus', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'register_time': {'key': 'registerTime', 'type': 'iso-8601'},
'last_connect_time': {'key': 'lastConnectTime', 'type': 'iso-8601'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'last_start_time': {'key': 'lastStartTime', 'type': 'iso-8601'},
'last_stop_time': {'key': 'lastStopTime', 'type': 'iso-8601'},
'last_update_result': {'key': 'lastUpdateResult', 'type': 'str'},
'last_start_update_time': {'key': 'lastStartUpdateTime', 'type': 'iso-8601'},
'last_end_update_time': {'key': 'lastEndUpdateTime', 'type': 'iso-8601'},
'is_active_dispatcher': {'key': 'isActiveDispatcher', 'type': 'bool'},
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'},
}
def __init__(self, *, additional_properties=None, **kwargs) -> None:
super(SelfHostedIntegrationRuntimeNode, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.node_name = None
self.machine_name = None
self.host_service_uri = None
self.status = None
self.capabilities = None
self.version_status = None
self.version = None
self.register_time = None
self.last_connect_time = None
self.expiry_time = None
self.last_start_time = None
self.last_stop_time = None
self.last_update_result = None
self.last_start_update_time = None
self.last_end_update_time = None
self.is_active_dispatcher = None
self.concurrent_jobs_limit = None
self.max_concurrent_jobs = None
class SelfHostedIntegrationRuntimeStatus(IntegrationRuntimeStatus):
"""Self-hosted integration runtime status.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:ivar data_factory_name: The workspace name which the integration runtime
belong to.
:vartype data_factory_name: str
:ivar state: The state of integration runtime. Possible values include:
'Initial', 'Stopped', 'Started', 'Starting', 'Stopping',
'NeedRegistration', 'Online', 'Limited', 'Offline', 'AccessDenied'
:vartype state: str or ~azure.mgmt.synapse.models.IntegrationRuntimeState
:param type: Required. Constant filled by server.
:type type: str
:ivar create_time: The time at which the integration runtime was created,
in ISO8601 format.
:vartype create_time: datetime
:ivar task_queue_id: The task queue id of the integration runtime.
:vartype task_queue_id: str
:ivar internal_channel_encryption: It is used to set the encryption mode
for node-node communication channel (when more than 2 self-hosted
integration runtime nodes exist). Possible values include: 'NotSet',
'SslEncrypted', 'NotEncrypted'
:vartype internal_channel_encryption: str or
~azure.mgmt.synapse.models.IntegrationRuntimeInternalChannelEncryptionMode
:ivar version: Version of the integration runtime.
:vartype version: str
:param nodes: The list of nodes for this integration runtime.
:type nodes:
list[~azure.mgmt.synapse.models.SelfHostedIntegrationRuntimeNode]
:ivar scheduled_update_date: The date at which the integration runtime
will be scheduled to update, in ISO8601 format.
:vartype scheduled_update_date: datetime
:ivar update_delay_offset: The time in the date scheduled by service to
update the integration runtime, e.g., PT03H is 3 hours
:vartype update_delay_offset: str
:ivar local_time_zone_offset: The local time zone offset in hours.
:vartype local_time_zone_offset: str
:ivar capabilities: Object with additional information about integration
runtime capabilities.
:vartype capabilities: dict[str, str]
:ivar service_urls: The URLs for the services used in integration runtime
backend service.
:vartype service_urls: list[str]
:ivar auto_update: Whether Self-hosted integration runtime auto update has
been turned on. Possible values include: 'On', 'Off'
:vartype auto_update: str or
~azure.mgmt.synapse.models.IntegrationRuntimeAutoUpdate
:ivar version_status: Status of the integration runtime version.
:vartype version_status: str
:param links: The list of linked integration runtimes that are created to
share with this integration runtime.
:type links: list[~azure.mgmt.synapse.models.LinkedIntegrationRuntime]
:ivar pushed_version: The version that the integration runtime is going to
update to.
:vartype pushed_version: str
:ivar latest_version: The latest version on download center.
:vartype latest_version: str
:ivar auto_update_eta: The estimated time when the self-hosted integration
runtime will be updated.
:vartype auto_update_eta: datetime
"""
_validation = {
'data_factory_name': {'readonly': True},
'state': {'readonly': True},
'type': {'required': True},
'create_time': {'readonly': True},
'task_queue_id': {'readonly': True},
'internal_channel_encryption': {'readonly': True},
'version': {'readonly': True},
'scheduled_update_date': {'readonly': True},
'update_delay_offset': {'readonly': True},
'local_time_zone_offset': {'readonly': True},
'capabilities': {'readonly': True},
'service_urls': {'readonly': True},
'auto_update': {'readonly': True},
'version_status': {'readonly': True},
'pushed_version': {'readonly': True},
'latest_version': {'readonly': True},
'auto_update_eta': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'create_time': {'key': 'typeProperties.createTime', 'type': 'iso-8601'},
'task_queue_id': {'key': 'typeProperties.taskQueueId', 'type': 'str'},
'internal_channel_encryption': {'key': 'typeProperties.internalChannelEncryption', 'type': 'str'},
'version': {'key': 'typeProperties.version', 'type': 'str'},
'nodes': {'key': 'typeProperties.nodes', 'type': '[SelfHostedIntegrationRuntimeNode]'},
'scheduled_update_date': {'key': 'typeProperties.scheduledUpdateDate', 'type': 'iso-8601'},
'update_delay_offset': {'key': 'typeProperties.updateDelayOffset', 'type': 'str'},
'local_time_zone_offset': {'key': 'typeProperties.localTimeZoneOffset', 'type': 'str'},
'capabilities': {'key': 'typeProperties.capabilities', 'type': '{str}'},
'service_urls': {'key': 'typeProperties.serviceUrls', 'type': '[str]'},
'auto_update': {'key': 'typeProperties.autoUpdate', 'type': 'str'},
'version_status': {'key': 'typeProperties.versionStatus', 'type': 'str'},
'links': {'key': 'typeProperties.links', 'type': '[LinkedIntegrationRuntime]'},
'pushed_version': {'key': 'typeProperties.pushedVersion', 'type': 'str'},
'latest_version': {'key': 'typeProperties.latestVersion', 'type': 'str'},
'auto_update_eta': {'key': 'typeProperties.autoUpdateETA', 'type': 'iso-8601'},
}
def __init__(self, *, additional_properties=None, nodes=None, links=None, **kwargs) -> None:
super(SelfHostedIntegrationRuntimeStatus, self).__init__(additional_properties=additional_properties, **kwargs)
self.create_time = None
self.task_queue_id = None
self.internal_channel_encryption = None
self.version = None
self.nodes = nodes
self.scheduled_update_date = None
self.update_delay_offset = None
self.local_time_zone_offset = None
self.capabilities = None
self.service_urls = None
self.auto_update = None
self.version_status = None
self.links = links
self.pushed_version = None
self.latest_version = None
self.auto_update_eta = None
self.type = 'SelfHosted'
class SensitivityLabel(ProxyResource):
"""A sensitivity label.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:param label_name: The label name.
:type label_name: str
:param label_id: The label ID.
:type label_id: str
:param information_type: The information type.
:type information_type: str
:param information_type_id: The information type ID.
:type information_type_id: str
:ivar is_disabled: Is sensitivity recommendation disabled. Applicable for
recommended sensitivity label only. Specifies whether the sensitivity
recommendation on this column is disabled (dismissed) or not.
:vartype is_disabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'is_disabled': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'label_name': {'key': 'properties.labelName', 'type': 'str'},
'label_id': {'key': 'properties.labelId', 'type': 'str'},
'information_type': {'key': 'properties.informationType', 'type': 'str'},
'information_type_id': {'key': 'properties.informationTypeId', 'type': 'str'},
'is_disabled': {'key': 'properties.isDisabled', 'type': 'bool'},
}
def __init__(self, *, label_name: str=None, label_id: str=None, information_type: str=None, information_type_id: str=None, **kwargs) -> None:
super(SensitivityLabel, self).__init__(**kwargs)
self.label_name = label_name
self.label_id = label_id
self.information_type = information_type
self.information_type_id = information_type_id
self.is_disabled = None
class Sku(Model):
"""Sku.
SQL pool SKU.
:param tier: The service tier
:type tier: str
:param name: The SKU name
:type name: str
"""
_attribute_map = {
'tier': {'key': 'tier', | |
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A factory test for the audio function.
Description
-----------
This test perform tests on audio plaback and recording devices. It supports 2
loopback modes:
1. Loop from headphone out to headphone in.
2. Loop from speaker to digital microphone.
And 3 test scenarios:
1. Audiofun test, which plays different tones and checks recorded frequency.
This test can be conducted simultaneously on different devices. This test can
not be conducted with dongle inserted.
2. Sinewav test, which plays simple sine wav and checks if the recorded
frequency is in the range specified. Optionally checks the RMS and amplitude
thresholds.
3. Noise test, which plays nothing and record, then checks the RMS and amplitude
thresholds.
Since this test is sensitive to different loopback dongles, user can set a list
of output volume candidates. The test can pass if it can pass at any one of
output volume candidates.
Test Procedure
--------------
1. Operator inserts the dongle (if required).
2. The playback starts automatically, and analyze recordings afterward.
Dependency
----------
- Device API ``cros.factory.device.audio``.
Examples
--------
Here are some test list examples for different test cases. First, you need to
figure out the particular input/output device you want to perform test on. For
ALSA input devices, the command `arecord -l` can be used to list all available
input devices.
For instance, if the device showing as ``card 0: kblrt5514rt5663
[kblrt5514rt5663max], device 1: Audio Record (*)`` is what you want, the
input_dev should be set to ["kblrt5514rt5663max", "1"]. Similarly, the
output_dev might be ["kblrt5514rt5663max", "0"]. These settings are used in the
following examples.
Audiofuntest external mic (default) of input_dev and speakers of output_dev::
{
"pytest_name": "audio_loop",
"args": {
"input_dev": ["kblrt5514rt5663max", "1"],
"output_dev": ["kblrt5514rt5663max", "0"],
"output_volume": 10,
"require_dongle": false,
"check_dongle": true,
"initial_actions": [
["1", "init_speakerdmic"]
],
"tests_to_conduct": [
{
"type": "audiofun",
"threshold": 80,
"player_format": "s16"
}
]
}
}
Audiofuntest on 'mlb' mics of input_dev and speaker channel 0 of output_dev::
{
"pytest_name": "audio_loop",
"args": {
"input_dev": ["kblrt5514rt5663max", "1"],
"output_dev": ["kblrt5514rt5663max", "0"],
"output_volume": 10,
"require_dongle": false,
"check_dongle": true,
"mic_source": "MLBDmic",
"initial_actions": [
["1", "init_speakerdmic"]
],
"tests_to_conduct": [
{
"threshold": 80,
"capture_rate": 16000,
"type": "audiofun",
"output_channels": [0]
}
]
}
}
{
"pytest_name": "audio_loop",
"args": {
"input_dev": ["kblrt5514rt5663max", "1"],
"output_dev": ["kblrt5514rt5663max", "0"],
"require_dongle": false,
"check_dongle": true,
"initial_actions": [
["1", "init_speakerdmic"]
],
"tests_to_conduct": [
{
"duration": 2,
"amplitude_threshold": [-0.9, 0.9],
"type": "noise",
"rms_threshold": [null, 0.5]
}
]
}
}
{
"pytest_name": "audio_loop",
"args": {
"input_dev": ["kblrt5514rt5663max", "1"],
"output_dev": ["kblrt5514rt5663max", "0"],
"output_volume": 15,
"require_dongle": true,
"check_dongle": true,
"initial_actions": [
["1", "init_audiojack"]
],
"tests_to_conduct": [
{
"freq_threshold": 50,
"type": "sinewav",
"rms_threshold": [0.08, null]
}
]
}
}
AudioJack test using ucm config directly. Make sure your audio.json is not set
for your sound card. Otherwise the test will use audio.json instead of ucm::
{
"pytest_name": "audio_loop",
"disable_services": ["cras"],
"args": {
"input_dev": ["kblrt5514rt5663max", "Extmic"],
"output_dev": ["kblrt5514rt5663max", "Headphone"],
"output_volume": 15,
"mic_source": "Extmic",
"require_dongle": true,
"check_dongle": true,
"initial_actions": [
["kblrt5514rt5663max", null]
],
"tests_to_conduct": [
{
"freq_threshold": 50,
"type": "sinewav",
"rms_threshold": [0.08, null]
}
]
}
}
"""
import logging
import os
import re
import time
from cros.factory.device.audio import base
from cros.factory.device import device_utils
from cros.factory.test import session
from cros.factory.test import test_case
from cros.factory.test.utils import audio_utils
from cros.factory.testlog import testlog
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils.schema import JSONSchemaDict
from cros.factory.utils import file_utils
from cros.factory.utils import process_utils
# Default setting
_DEFAULT_FREQ_HZ = 1000
# the additional duration(secs) for sine tone to playback.
_DEFAULT_SINEWAV_DURATION_MARGIN = 8
# Regular expressions to match audiofuntest message.
_AUDIOFUNTEST_MIC_CHANNEL_RE = re.compile(r'.*Microphone channels:\s*(.*)$')
_AUDIOFUNTEST_SUCCESS_RATE_RE = re.compile(
r'.*channel\s*=\s*([0-9]*),.*rate\s*=\s*(.*)$')
_AUDIOFUNTEST_RUN_START_RE = re.compile('^carrier')
# Default minimum success rate of audiofun test to pass.
_DEFAULT_AUDIOFUN_TEST_THRESHOLD = 50
# Default iterations to do the audiofun test.
_DEFAULT_AUDIOFUN_TEST_ITERATION = 10
# Default channels of the output_dev to be tested.
_DEFAULT_AUDIOFUN_TEST_OUTPUT_CHANNELS = [0, 1]
# Default audio gain used for audiofuntest.
_DEFAULT_AUDIOFUN_TEST_VOLUME_GAIN = 100
# Default capture sample rate used for audiofuntest.
_DEFAULT_AUDIOFUN_TEST_SAMPLE_RATE = 48000
# Default sample format used by audiofuntest, s16 = Signed 16 Bit.
_DEFAULT_AUDIOFUN_TEST_SAMPLE_FORMAT = 's16'
# Default sample format used to play audio, s16 = Signed 16 Bit.
_DEFAULT_AUDIOFUN_TEST_PLAYER_FORMAT = 's16'
# Default channels of the input_dev to be tested.
_DEFAULT_TEST_INPUT_CHANNELS = [0, 1]
# Default channels of the output_dev to be tested.
_DEFAULT_TEST_OUTPUT_CHANNELS = [0, 1]
# Default duration to do the sinewav test, in seconds.
_DEFAULT_SINEWAV_TEST_DURATION = 2
# Default frequency tolerance, in Hz.
_DEFAULT_SINEWAV_FREQ_THRESHOLD = 50
# Default duration to do the noise test, in seconds.
_DEFAULT_NOISE_TEST_DURATION = 1
# Default RMS thresholds when checking recorded file.
_DEFAULT_SOX_RMS_THRESHOLD = (0.08, None)
# Default Amplitude thresholds when checking recorded file.
_DEFAULT_SOX_AMPLITUDE_THRESHOLD = (None, None)
# Default Max Delta thresholds when checking recorded file.
_DEFAULT_SOX_MAX_DELTA_THRESHOLD = (None, None)
# Default RMS threshold ratio relative to volume_gain when testing audiofuntest.
_DEFAULT_AUDIOFUNTEST_RMS_THRESHOLD_RATIO_RELATIVE_TO_VOLUME_GAIN = 0.0015
# Default minimum RMS threshold when testing audiofuntest.
_DEFAULT_AUDIOFUNTEST_MINIMUM_RMS_THRESHOLD = 0.04
# Default duration in seconds to trim in the beginning of recorded file.
_DEFAULT_TRIM_SECONDS = 0.5
# Default minimum frequency.
_DEFAULT_MIN_FREQUENCY = 4000
# Default maximum frequency.
_DEFAULT_MAX_FREQUENCY = 10000
_ARG_INPUT_DEVICE_SCHEMA = JSONSchemaDict('input_dev schema object', {
'type': 'array',
'items': [
{'type': 'string'},
{
'anyOf': [
{
'type': 'string',
'pattern': '^[0-9]+$'
},
{
'type': 'string',
'enum': list(base.InputDevices)
}
]
}
],
'minItems': 2,
'maxItems': 2
})
_ARG_OUTPUT_DEVICE_SCHEMA = JSONSchemaDict('output_dev schema object', {
'type': 'array',
'items': [
{'type': 'string'},
{
'anyOf': [
{
'type': 'string',
'pattern': '^[0-9]+$'
},
{
'type': 'string',
'enum': list(base.OutputDevices)
}
]
}
],
'minItems': 2,
'maxItems': 2
})
_ARG_CHANNELS_SCHEMA_DICT = {
'type': 'array',
'items': {
'type': ['number']
}
}
_ARG_RANGE_THRESHOLD_SCHEMA_DICT = {
'type': 'array',
'items': {
'type': ['number', 'null']
},
'minItems': 2,
'maxItems': 2
}
_ARG_TESTS_TO_CONDUCT_SCHEMA = JSONSchemaDict(
'tests_to_conduct schema', {
'type': 'array',
'items': {
'type':
'object',
'oneOf': [{
'properties': {
'type': {
'type': 'string',
'enum': ['audiofun']
},
'iteration': {
'type': 'integer'
},
'threshold': {
'type': 'number'
},
'input_channels': _ARG_CHANNELS_SCHEMA_DICT,
'output_channels': _ARG_CHANNELS_SCHEMA_DICT,
'volume_gain': {
'type': 'number',
'minimum': 0,
'maximum': 100
},
'input_gain': {
'type': 'number'
},
'capture_rate': {
'type': 'number'
},
'sample_format': {
'type': 'string',
'enum': ['u8', 's16', 's24', 's32']
},
'player_format': {
'type': 'string',
'enum': ['u8', 's16', 's24', 's32']
},
'min_frequency': {
'type': 'number'
},
'max_frequency': {
'type': 'number'
},
'rms_threshold': {
'type': 'number'
}
},
'additionalProperties': False,
'required': ['type']
}, {
'properties': {
'type': {
'type': 'string',
'enum': ['sinewav']
},
'duration': {
'type': 'number',
},
'input_channels': _ARG_CHANNELS_SCHEMA_DICT,
'output_channels': _ARG_CHANNELS_SCHEMA_DICT,
'freq_threshold': {
'type': 'number'
},
'rms_threshold': _ARG_RANGE_THRESHOLD_SCHEMA_DICT,
'amplitude_threshold': _ARG_RANGE_THRESHOLD_SCHEMA_DICT,
'max_delta_threshold': _ARG_RANGE_THRESHOLD_SCHEMA_DICT
},
'additionalProperties': False,
'required': ['type']
}, {
'properties': {
'type': {
'type': 'string',
'enum': ['noise']
},
'duration': {
'type': 'number'
},
'input_channels': _ARG_CHANNELS_SCHEMA_DICT,
'rms_threshold': _ARG_RANGE_THRESHOLD_SCHEMA_DICT,
'amplitude_threshold': _ARG_RANGE_THRESHOLD_SCHEMA_DICT,
'max_delta_threshold': _ARG_RANGE_THRESHOLD_SCHEMA_DICT
},
'additionalProperties': False,
'required': ['type']
}]
}
})
class AudioLoopTest(test_case.TestCase):
"""Audio Loop test to test two kind of situations.
1. Speaker to digital microphone.
2. Headphone out to headphone in.
"""
ARGS = [
Arg('audio_conf', str, 'Audio config file path', default=None),
Arg(
'initial_actions', list,
'List of [card, actions]. If actions is None, the Initialize method '
'will be invoked.', default=None),
Arg(
'input_dev', list, 'Input ALSA device. [card_name, sub_device]. '
'For example: ["audio_card", "0"]. The sub_device could be a string '
'of an integer or one of %r. If this argument is a string of an '
'integer then it represents the PCM Id. Otherwise the test will find '
'the PCM Id from UCM config using this argument as the keyword.' %
list(base.InputDevices), default=['0', '0'],
schema=_ARG_INPUT_DEVICE_SCHEMA),
Arg('num_input_channels', int, 'Number of input channels.', default=2),
Arg(
'output_dev', list, 'Output ALSA device. [card_name, sub_device]. '
'For example: ["audio_card", "0"]. The sub_device could be a string '
'of an integer or one of %r. If this argument is a string of an '
'integer then it represents the PCM Id. Otherwise the test will find '
'the PCM Id from UCM config using this argument as the keyword.' %
list(base.OutputDevices), default=['0', '0'],
schema=_ARG_OUTPUT_DEVICE_SCHEMA),
Arg('num_output_channels', int, 'Number of output channels.', default=2),
Arg('output_volume', (int, list),
'An int of output volume or a list of output volume candidates',
default=None),
Arg('autostart', bool, 'Auto start option', default=False),
Arg('require_dongle', bool, 'Require dongle option', default=False),
Arg('check_dongle', bool,
'Check dongle status whether match require_dongle', default=False),
Arg('check_cras', bool, 'Do we need to check if CRAS is running',
default=True),
Arg('cras_enabled', bool, 'Whether cras should be running or not',
default=False),
Arg('mic_source', base.InputDevices, 'Microphone source',
default=base.InputDevices.Extmic),
Arg(
'test_title', str, 'Title on the test screen.'
'It can be used to tell operators the | |
<reponame>lzanuz/django-watermark
# -*- coding: utf-8 -*-
import os
import errno
import hashlib
import logging
import pickle
import traceback
from PIL import Image
from datetime import datetime
try:
from urllib.parse import unquote
from urllib.request import url2pathname
except ImportError:
from urllib import unquote, url2pathname
from django import template
from django.conf import settings
from django.core.cache import caches
from django.utils.encoding import smart_str
from django.utils.timezone import make_aware, get_default_timezone
from watermarker import utils
from watermarker.conf import WatermarkSettings
from watermarker.models import Watermark
register = template.Library()
logger = logging.getLogger('watermarker')
QUALITY = WatermarkSettings.WATERMARK_QUALITY
OBSCURE_ORIGINAL = WatermarkSettings.WATERMARK_OBSCURE_ORIGINAL
RANDOM_POSITION_ONCE = WatermarkSettings.WATERMARK_RANDOM_POSITION_ONCE
CACHE_BACKEND_NAME = WatermarkSettings.WATERMARK_CACHE_BACKEND_NAME
class Watermarker(object):
def __call__(self, url, name, position=None, opacity=0.5,
tile=False, scale=1.0, greyscale=False, rotation=0,
obscure=OBSCURE_ORIGINAL,
quality=QUALITY,
random_position_once=RANDOM_POSITION_ONCE):
"""
Creates a watermarked copy of an image.
* ``name``: This is the name of the Watermark object that you wish to
apply to the image.
* ``position``: There are several options.
* ``R``: random placement, which is the default behavior.
* ``C``: center the watermark
* ``XxY`` where ``X`` is either a specific pixel position on the
x-axis or a percentage of the total width of the target image and
``Y`` is a specific pixel position on the y-axis of the image or
a percentage of the total height of the target image. These
values represent the location of the top and left edges of the
watermark. If either ``X`` or ``Y`` is a percentage, you must
use a percent sign. This is not used if either one of the
``tiled`` or ``scale`` parameters are true.
Examples:
* ``50%x50%``: positions the watermark at the center of the
image.
* ``50%x100``: positions the watermark at the midpoint of the
total width of the image and 100 pixels from the top of the
image
* ``100x50%``: positions the watermark at the midpoint of the
total height of the image and 100 pixels from the left edge
of the image
* ``100x100``: positions the top-left corner of the watermark
at 100 pixels from the top of the image and 100 pixels from
the left edge of the image.
* ``br``, ``bl``, ``tr``, ``tl`` where ``b`` means "bottom", ``t``
means "top", ``l`` means "left", and ``r`` means "right". This
will position the watermark at the extreme edge of the original
image with just enough room for the watermark to "fully show".
This assumes the watermark is not as big as the original image.
* ``opacity``: an integer from 0 to 100. This value represents the
transparency level of the watermark when it is applied. A value of
100 means the watermark is completely opaque while a value of 0 means
the watermark will be invisible.
* ``tile``: ``True`` or ``False`` to specify whether or not the
watermark shall be tiled across the entire image.
* ``scale``: a floating-point number above 0 to specify the scaling for
the watermark. If you want the watermark to be scaled to its maximum
without falling off the edge of the target image, use ``F``. By
default, scale is set to ``1.0``, or 1:1 scaling, meaning the
watermark will be placed on the target image at its original size.
* ``greyscale``: ``True`` or ``False`` to specify whether or not the
watermark should be converted to a greyscale image before applying it
to the target image. Default is ``False``.
* ``rotation``: 0 to 359 to specify the number of degrees to rotate the
watermark before applying it to the target image. Alternatively, you
may set ``rotation=R`` for a random rotation value.
* ``obscure``: set to ``False`` if you wish to expose the original
image's filename. Defaults to ``True``.
* ``quality``: the quality of the resulting watermarked image. Default
is 85.
"""
# look for the specified watermark by name. If it's not there, go no
# further
try:
watermark = None
cache = caches[CACHE_BACKEND_NAME] if CACHE_BACKEND_NAME else None
# use defined cache backend
if cache:
watermark = cache.get('watermark_%s' % (name))
watermark = pickle.loads(watermark) if watermark else None
# watermark not in cache
if watermark is None:
watermark = Watermark.objects.get(name=name, is_active=True)
if cache:
# set cache, never expires until change
cache.set(key='watermark_%s' % (name),
value=pickle.dumps(watermark),
timeout=None)
except Watermark.DoesNotExist:
logger.error(
'Watermark "%s" does not exist... Bailing out.' % name)
return url
# make sure URL is a string
url = smart_str(url)
basedir = '%s/watermarked/' % os.path.dirname(url)
original_basename, ext = os.path.splitext(os.path.basename(url))
# open the target image file along with the watermark image
target = Image.open(self._get_filesystem_path(url))
mark = Image.open(watermark.image.path)
# determine the actual value that the parameters provided will render
random_position =\
bool(position is None or str(position).lower() == 'r')
scale = utils.determine_scale(scale, target, mark)
mark = mark.resize(scale, resample=Image.ANTIALIAS)
rotation = utils.determine_rotation(rotation, mark)
pos = utils.determine_position(position, target, mark)
# see if we need to create only one randomly positioned watermarked
# image
if not random_position or (
not random_position_once and random_position):
logger.debug('Generating random position for watermark each time')
position = pos
else:
logger.debug('Random positioning watermark once')
params = {
'position': position,
'opacity': opacity,
'scale': scale,
'tile': tile,
'greyscale': greyscale,
'rotation': rotation,
'original_basename': original_basename,
'ext': ext,
'quality': quality,
'watermark': watermark.id,
'left': pos[0],
'top': pos[1],
'fstat': os.stat(self._get_filesystem_path(url)),
}
logger.debug('Params: %s' % params)
fname = self.generate_filename(mark, **params)
url_path = self.get_url_path(
basedir, original_basename, ext, fname, obscure)
fpath = self._get_filesystem_path(url_path)
logger.debug('Watermark name: %s; URL: %s; Path: %s' % (
fname, url_path, fpath,
))
# If the image already exists on the filesystem. If it does, use it.
if os.access(fpath, os.R_OK):
# see if the ``Watermark`` object was modified since the
# file was created
modified = make_aware(
datetime.fromtimestamp(os.path.getmtime(fpath)),
get_default_timezone())
# only return the old file if things appear to be the same
if modified >= watermark.date_updated:
logger.info(
'Watermark exists and has not changed. Bailing out.')
return url_path
# make sure the position is in our params for the watermark
params['position'] = pos
self.create_watermark(target, mark, fpath, **params)
# send back the URL to the new, watermarked image
return url_path
def _get_filesystem_path(self, url_path, basedir=settings.MEDIA_ROOT):
"""
Makes a filesystem path from the specified URL path
"""
if url_path.startswith(settings.MEDIA_URL):
# strip media root url
url_path = url_path[len(settings.MEDIA_URL):]
return os.path.normpath(os.path.join(basedir, url2pathname(url_path)))
def generate_filename(self, mark, **kwargs):
"""
Comes up with a good filename for the watermarked image
"""
kwargs = kwargs.copy()
kwargs['opacity'] = int(kwargs['opacity'] * 100)
"""
* This is difficult to explain, but I'll try.
* A user uploaded a photo and the watermarked was applied to it.
* After some time this photo is deleted, but the watermarked file
doesn't.
* Then a new photo with the same base name is uploaded, and the source
code will verify that already exists a obscure watermark file with that
hashcode and will not generate it again. So the watermark photo
returned will be a different photo from that uploaded.
* So, we added the modification time and file size to watermark name
to solve this weird situation.
"""
kwargs['st_mtime'] = kwargs['fstat'].st_mtime
kwargs['st_size'] = kwargs['fstat'].st_size
params = [
'%(original_basename)s',
'wm',
'w%(watermark)i',
'o%(opacity)i',
'gs%(greyscale)i',
'r%(rotation)i',
'fm%(st_mtime)i',
'fz%(st_size)i',
'p%(position)s',
]
scale = kwargs.get('scale', None)
if scale and scale != mark.size:
params.append('_s%i' % (
float(kwargs['scale'][0]) / mark.size[0] * 100))
if kwargs.get('tile', None):
params.append('_tiled')
# make thumbnail filename
filename = '%s%s' % ('_'.join(params), kwargs['ext'])
return filename % kwargs
def get_url_path(self, basedir, original_basename,
ext, name, obscure=True):
"""
Determines an appropriate watermark path
"""
try:
hash = hashlib.sha1(smart_str(name)).hexdigest()
except TypeError:
hash = hashlib.sha1(smart_str(name).encode('utf-8')).hexdigest()
# figure out where the watermark would be saved on the filesystem
if obscure is True:
logger.debug(
'Obscuring original image name: %s => %s' % (name, hash))
url_path = os.path.join(basedir, hash + ext)
else:
logger.debug('Not obscuring original image name.')
url_path = os.path.join(basedir, hash, original_basename + ext)
# make sure the destination directory exists
try:
fpath = self._get_filesystem_path(url_path)
os.makedirs(os.path.dirname(fpath))
except OSError as e:
if e.errno == errno.EEXIST:
pass # not to worry, directory exists
else:
logger.error(
'Error creating path: %s' % traceback.format_exc())
raise
else:
logger.debug('Created directory: %s' % os.path.dirname(fpath))
return url_path
def create_watermark(self, target, mark, fpath, quality=QUALITY, **kwargs):
"""
Create the watermarked image on the filesystem
"""
im = utils.watermark(target, mark, **kwargs)
if target.format == 'JPEG' and im.mode.endswith('A'):
im | |
<gh_stars>1-10
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.metrics import mean_absolute_error as MAE
from datetime import datetime
from fbprophet import Prophet
from matplotlib.dates import (
MonthLocator,
num2date,
AutoDateLocator,
AutoDateFormatter,
)
from matplotlib.ticker import FuncFormatter
def median_filter(df, varname = 'y', window=24, std=2.75):
"""
A simple median filter, removes (i.e. replace by np.nan) observations that exceed N (default = 3)
tandard deviation from the median over window of length P (default = 24) centered around
each observation.
Parameters
----------
df : pandas.DataFrame
The pandas.DataFrame containing the column to filter.
varname : string
Column to filter in the pandas.DataFrame. No default.
window : integer
Size of the window around each observation for the calculation
of the median and std. Default is 24 (time-steps).
std : integer
Threshold for the number of std around the median to replace
by `np.nan`. Default is 3 (greater / less or equal).
Returns
-------
dfc : pandas.Dataframe
A copy of the pandas.DataFrame `df` with the new, filtered column `varname`
"""
dfc = df.loc[:,[varname]]
dfc['median']= dfc[varname].rolling(window, center=True).median()
dfc['std'] = dfc[varname].rolling(window, center=True).std()
dfc.loc[dfc.loc[:,varname] >= dfc['median']+std*dfc['std'], varname] = np.nan
dfc.loc[dfc.loc[:,varname] <= dfc['median']-std*dfc['std'], varname] = np.nan
return dfc.loc[:, varname]
def prepare_data(data, year=2017):
"""
prepare the data for ingestion by fbprophet:
see: https://facebook.github.io/prophet/docs/quick_start.html
1) divide in training and test set, using the `year` parameter (int)
2) reset the index and rename the `datetime` column to `ds`
returns the training and test dataframes
Parameters
----------
data : pandas.DataFrame
The dataframe to prepare, needs to have a datetime index
year: integer
The year separating the training set and the test set (includes the year)
Returns
-------
data_train : pandas.DataFrame
The training set, formatted for fbprophet.
data_test : pandas.Dataframe
The test set, formatted for fbprophet.
"""
data_train = data[:str(year-1)]
data_test = data[str(year):]
data_train.reset_index(inplace=True)
data_test.reset_index(inplace=True)
data_train = data_train.rename({'date':'ds'}, axis=1)
data_test = data_test.rename({'date':'ds'}, axis=1)
return data_train, data_test
def add_regressor(data, regressor, varname=None):
"""
adds a regressor to a `pandas.DataFrame` of target (predictand) values
for use in fbprophet
Parameters
----------
data : pandas.DataFrame
The pandas.DataFrame in the fbprophet format (see function `prepare_data` in this package)
regressor : pandas.DataFrame
A pandas.DataFrame containing the extra-regressor
varname : string
The name of the column in the `regressor` DataFrame to add to the `data` DataFrame
Returns
-------
verif : pandas.DataFrame
The original `data` DataFrame with the column containing the
extra regressor `varname`
"""
data_with_regressors = data.copy()
data_with_regressors.loc[:,varname] = regressor.loc[:,varname]
return data_with_regressors
def add_regressor_to_future(future, regressors):
"""
adds extra regressors to a `future` DataFrame dataframe created by fbprophet
Parameters
----------
data : pandas.DataFrame
A `future` DataFrame created by the fbprophet `make_future` method
regressors_df: pandas.DataFrame
The pandas.DataFrame containing the regressors (with a datetime index)
Returns
-------
futures : pandas.DataFrame
The `future` DataFrame with the regressors added
"""
futures = future.copy()
futures.index = pd.to_datetime(futures.ds)
futures = futures.merge(regressors, left_index=True, right_index=True)
futures = futures.reset_index(drop = True)
return futures
def make_verif(forecast, data_train, data_test):
"""
Put together the forecast (coming from fbprophet)
and the overved data, and set the index to be a proper datetime index,
for plotting
Parameters
----------
forecast : pandas.DataFrame
The pandas.DataFrame coming from the `forecast` method of a fbprophet
model.
data_train : pandas.DataFrame
The training set, pandas.DataFrame
data_test : pandas.DataFrame
The training set, pandas.DataFrame
Returns
-------
forecast :
The forecast DataFrane including the original observed data.
"""
forecast.index = pd.to_datetime(forecast.ds)
data_train.index = pd.to_datetime(data_train.ds)
data_test.index = pd.to_datetime(data_test.ds)
data = pd.concat([data_train, data_test], axis=0)
forecast.loc[:,'y'] = data.loc[:,'y']
return forecast
def plot_verif(verif, year=2017):
"""
plots the forecasts and observed data, the `year` argument is used to visualise
the division between the training and test sets.
Parameters
----------
verif : pandas.DataFrame
The `verif` DataFrame coming from the `make_verif` function in this package
year : integer
The year used to separate the training and test set. Default 2017
Returns
-------
f : matplotlib Figure object
"""
f, ax = plt.subplots(figsize=(14, 8))
train = verif.loc[:str(year - 1),:]
ax.plot(train.index, train.y, 'ko', markersize=3)
ax.plot(train.index, train.yhat, lw=0.5)
ax.fill_between(train.index, train.yhat_lower, train.yhat_upper, alpha=0.3)
test = verif.loc[str(year):,:]
ax.plot(test.index, test.y, 'ro', markersize=3)
ax.plot(test.index, test.yhat, lw=0.5)
ax.fill_between(test.index, test.yhat_lower, test.yhat_upper, alpha=0.3)
ax.axvline(datetime(year,1,1), color='0.8', alpha=0.7)
ax.grid(ls=':', lw=0.5)
return f
def plot_verif_component(verif, component='rain', year=2017):
"""
plots a specific component of the `verif` DataFrame
Parameters
----------
verif : pandas.DataFrame
The `verif` DataFrame coming from the `make_verif` function in this package.
component : string
The name of the component (i.e. column name) to plot in the `verif` DataFrame.
year : integer
The year used to separate the training and test set. Default 2017
Returns
-------
f : matplotlib Figure object
"""
f, ax = plt.subplots(figsize=(14, 7))
train = verif.loc[:str(year - 1),:]
ax.plot(train.index, train.loc[:,component] * 100, color='0.8', lw=1, ls='-')
ax.fill_between(train.index, train.loc[:, component+'_lower'] * 100, train.loc[:, component+'_upper'] * 100, color='0.8', alpha=0.3)
test = verif.loc[str(year):,:]
ax.plot(test.index, test.loc[:,component] * 100, color='k', lw=1, ls='-')
ax.fill_between(test.index, test.loc[:, component+'_lower'] * 100, test.loc[:, component+'_upper'] * 100, color='0.8', alpha=0.3)
ax.axvline(str(year), color='k', alpha=0.7)
ax.grid(ls=':', lw=0.5)
return f
def plot_joint_plot(verif, x='yhat', y='y', title=None, fpath = '../figures/paper', fname = None):
"""
Parameters
----------
verif : pandas.DataFrame
x : string
The variable on the x-axis
Defaults to `yhat`, i.e. the forecast or estimated values.
y : string
The variable on the y-axis
Defaults to `y`, i.e. the observed values
title : string
The title of the figure, default `None`.
fpath : string
The path to save the figures, default to `../figures/paper`
fname : string
The filename for the figure to be saved
ommits the extension, the figure is saved in png, jpeg and pdf
Returns
-------
f : matplotlib Figure object
"""
g = sns.jointplot(x='yhat', y='y', data = verif, kind="reg", color="0.4")
g.fig.set_figwidth(8)
g.fig.set_figheight(8)
ax = g.fig.axes[1]
if title is not None:
ax.set_title("R = {:+4.2f}\nMAE = {:4.1f}".format(verif.loc[:,['y','yhat']].corr().iloc[0,1], MAE(verif.loc[:,'y'].values, verif.loc[:,'yhat'].values)), fontsize=16)
ax = g.fig.axes[0]
ax.set_xlabel("model's estimates", fontsize=15)
ax.set_ylabel("observations", fontsize=15)
ax.grid(ls=':')
[l.set_fontsize(13) for l in ax.xaxis.get_ticklabels()]
[l.set_fontsize(13) for l in ax.yaxis.get_ticklabels()];
ax.grid(ls=':')
if fname is not None:
for ext in ['png','jpeg','pdf']:
g.fig.savefig(os.path.join(fpath, "{}.{}".format(fname, ext)), dpi=200)
def state_plot(df, state, sector,col='pct_error', year_lim=2019):
df_plot = df[(df.state==state)&(df.sector==sector)&(df.year>=year_lim)].copy()
fig,ax = plt.subplots(figsize=(10,4))
mean_error_before = df_plot[(df_plot.date<'2020-03-01')][col].mean()
mean_error_after = df_plot[(df_plot.date>='2020-03-01')][col].mean()
# df_plot = df_plot.set_index('date').sort_index()
# df_plot[col].plot(ax=ax, marker="o")
df_plot = df_plot.sort_values('date')
plot = sns.lineplot(x='date',y=col,data=df_plot,ax=ax, marker='o', markersize=7)
x_dates = [t.strftime('%b\n%Y') if t.month==1 else t.strftime('%b') for t in df_plot[df_plot.date.dt.month%3 ==1]['date']]
ax.set_xticklabels(labels=x_dates)
# # for ind, label in enumerate(plot.get_xticklabels()):
# if ind % 3 == 0: # every 10th label is kept
# label.set_visible(True)
# else:
# label.set_visible(False)
plt.xlabel('Month')
plt.ylabel('% Error in prediction')
plt.axvline(x=datetime(2020,2,15),color='#f76d23',linestyle='dotted')
plt.axhline(y=mean_error_before, xmin=0, xmax=0.58, color='r', linestyle='--',linewidth=1.5)
plt.axhline(y=mean_error_after, xmin=0.58, xmax=1, color='g', linestyle='--',linewidth=1.5)
sign = '+' if mean_error_after-mean_error_before > 0 else '-'
text_color = 'g' if mean_error_after-mean_error_before > 0 else 'r'
plt.text(x=datetime(2020,2,10),y=(mean_error_after),s= "{}{}%".format(sign, np.round(mean_error_after-mean_error_before,2)),
fontsize=13,horizontalalignment='right', color=text_color, fontweight='bold')
plt.title("Prediction error over time for {} sector in {}".format(sector,state))
# plt.axvline()
def get_model_for_state_sector(data, state, sector, split_year=2019, plot_forecast=False, changepoint_prior_scale=0.5,samples=300):
## Defining Training Data
df_model = data[(data.state == state)&(data.sector == sector)].copy().set_index('date').sort_index()
df_train, df_test = prepare_data(df_model[['y','heating_days','cooling_days','pct_weekdays','hot','cold','wind']], year=split_year)
regressors_df = df_model[['heating_days','cooling_days','pct_weekdays','hot','cold','wind']]
## Defining Prophet Model
m = Prophet(seasonality_mode='multiplicative',
yearly_seasonality=5,daily_seasonality=False,weekly_seasonality=False,mcmc_samples=samples,
changepoint_prior_scale=changepoint_prior_scale, changepoint_range=0.95)
m.add_regressor('heating_days', mode='additive')
m.add_regressor('cooling_days', mode='additive')
m.add_regressor('pct_weekdays', mode='additive')
m.add_regressor('hot', mode='additive')
m.add_regressor('cold', mode='additive')
m.add_regressor('wind', mode='additive')
# m.add_regressor('weird_range', mode='additive')
m_fit = m.fit(df_train,control={'max_treedepth': 12})
## Getting forecasts
future = m_fit.make_future_dataframe(periods = 21, freq = 'MS')
future = add_regressor_to_future(future, regressors_df)
# future = future.merge(df_model[['date','heating_days','cooling_days','pct_weekdays']],how='left',left_on='ds',right_on='date').drop(columns=['date']).dropna()
forecast = m_fit.predict(future)
if plot_forecast:
fig = m_fit.plot(forecast)
m.plot_components(forecast)
## Validation
verif = make_verif(forecast[forecast.ds.dt.year<=2020], df_train, df_test)
print("Prediction Correlation: {}".format(verif.loc[:,['y','yhat']].corr()['y']['yhat']))
if plot_forecast:
f = plot_verif(verif,split_year)
plot_joint_plot(verif.loc['2019':'2019',:], title='test set')
def prophet_plot(
m, fcst, col, ax=None, uncertainty=True, plot_cap=True, xlabel='ds', ylabel='y',
figsize=(10, 6), include_legend=False
):
"""Plot the Prophet | |
# appearance flow params
if_froze_pwc=False,
app_occ_stop_gradient=True,
app_loss_weight=0,
app_distilation_weight=0,
if_upsample_flow=False,
if_upsample_flow_mask=False,
if_upsample_flow_output=False,
if_concat_multi_scale_feature=False,
input_or_sp_input=1,
):
super(PWCNet_unsup_irr_bi_v5_1, self).__init__()
self.input_or_sp_input = input_or_sp_input # 控制用sp crop来forward,然后用原图计算photo loss
self.if_save_running_process = False
self.save_running_process_dir = ''
self.if_test = if_test
self.multi_scale_distillation_weight = multi_scale_distillation_weight
self.multi_scale_photo_weight = multi_scale_photo_weight
self.multi_scale_distillation_style = multi_scale_distillation_style
self.multi_scale_distillation_occ = multi_scale_distillation_occ
# smooth
self.occ_check_model = tools.occ_check_model(occ_type=occ_type, occ_alpha_1=occ_alpha_1, occ_alpha_2=occ_alpha_2,
sum_abs_or_squar=occ_check_sum_abs_or_squar, obj_out_all=occ_check_obj_out_all)
self.smooth_level = smooth_level
self.smooth_type = smooth_type
self.smooth_order_1_weight = smooth_order_1_weight
self.smooth_order_2_weight = smooth_order_2_weight
# photo loss
self.photo_loss_type = photo_loss_type
self.photo_loss_census_weight = photo_loss_census_weight
self.photo_loss_use_occ = photo_loss_use_occ # if use occ mask in photo loss
self.photo_loss_delta = photo_loss_delta # delta in photo loss function
self.stop_occ_gradient = stop_occ_gradient
self.if_norm_before_cost_volume = if_norm_before_cost_volume
self.norm_moments_across_channels = norm_moments_across_channels
self.norm_moments_across_images = norm_moments_across_images
self.search_range = 4
self.num_chs = [3, 16, 32, 64, 96, 128, 196]
# 1/2 1/4 1/8 1/16 1/32 1/64
self.output_level = 4
self.num_levels = 7
self.leakyRELU = nn.LeakyReLU(0.1, inplace=True)
self.feature_pyramid_extractor = FeatureExtractor(self.num_chs)
# self.warping_layer = WarpingLayer()
self.warping_layer = WarpingLayer_no_div()
self.dim_corr = (self.search_range * 2 + 1) ** 2
self.num_ch_in = self.dim_corr + 32 + 2
self.flow_estimators = FlowEstimatorDense(self.num_ch_in)
self.context_networks = ContextNetwork(self.num_ch_in + 448 + 2)
self.if_concat_multi_scale_feature = if_concat_multi_scale_feature
if if_concat_multi_scale_feature:
self.conv_1x1_cmsf = nn.ModuleList([conv(196, 32, kernel_size=1, stride=1, dilation=1),
conv(128 + 32, 32, kernel_size=1, stride=1, dilation=1),
conv(96 + 32, 32, kernel_size=1, stride=1, dilation=1),
conv(64 + 32, 32, kernel_size=1, stride=1, dilation=1),
conv(32 + 32, 32, kernel_size=1, stride=1, dilation=1)])
else:
self.conv_1x1 = nn.ModuleList([conv(196, 32, kernel_size=1, stride=1, dilation=1),
conv(128, 32, kernel_size=1, stride=1, dilation=1),
conv(96, 32, kernel_size=1, stride=1, dilation=1),
conv(64, 32, kernel_size=1, stride=1, dilation=1),
conv(32, 32, kernel_size=1, stride=1, dilation=1)])
# flow upsample module
# flow upsample module
class _Upsample_flow(tools.abstract_model):
def __init__(self):
super(_Upsample_flow, self).__init__()
ch_in = 32
k = ch_in
ch_out = 64
self.conv1 = conv(ch_in, ch_out)
k += ch_out
ch_out = 64
self.conv2 = conv(k, ch_out)
k += ch_out
ch_out = 32
self.conv3 = conv(k, ch_out)
k += ch_out
ch_out = 16
self.conv4 = conv(k, ch_out)
k += ch_out
# ch_out = 64
# self.conv5 = conv(k, ch_out)
# k += ch_out
self.conv_last = conv(k, 2, isReLU=False)
def forward(self, x):
x1 = torch.cat([self.conv1(x), x], dim=1)
x2 = torch.cat([self.conv2(x1), x1], dim=1)
x3 = torch.cat([self.conv3(x2), x2], dim=1)
x4 = torch.cat([self.conv4(x3), x3], dim=1)
# x5 = torch.cat([self.conv5(x4), x4], dim=1)
x_out = self.conv_last(x4)
return x_out
@classmethod
def demo(cls):
from thop import profile
a = _Upsample_flow()
feature = np.zeros((1, 32, 320, 320))
feature_ = torch.from_numpy(feature).float()
flops, params = profile(a, inputs=(feature_,), verbose=False)
print('PWCNet_unsup_irr_bi_appflow_v8: flops: %.1f G, params: %.1f M' % (flops / 1000 / 1000 / 1000, params / 1000 / 1000))
@classmethod
def demo_mscale(cls):
from thop import profile
'''
320 : flops: 15.5 G, params: 0.2 M
160 : flops: 3.9 G, params: 0.2 M
80 : flops: 1.0 G, params: 0.2 M
40 : flops: 0.2 G, params: 0.2 M
20 : flops: 0.1 G, params: 0.2 M
10 : flops: 0.0 G, params: 0.2 M
5 : flops: 0.0 G, params: 0.2 M
'''
a = _Upsample_flow()
for i in [320, 160, 80, 40, 20, 10, 5]:
feature = np.zeros((1, 32, i, i))
feature_ = torch.from_numpy(feature).float()
flops, params = profile(a, inputs=(feature_,), verbose=False)
print('%s : flops: %.1f G, params: %.1f M' % (i, flops / 1000 / 1000 / 1000, params / 1000 / 1000))
class _Upsample_flow_v2(tools.abstract_model):
def __init__(self):
super(_Upsample_flow_v2, self).__init__()
class FlowEstimatorDense_temp(tools.abstract_model):
def __init__(self, ch_in, f_channels=(128, 128, 96, 64, 32)):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv_last = conv(N, 2, isReLU=False)
def forward(self, x):
x1 = torch.cat([self.conv1(x), x], dim=1)
x2 = torch.cat([self.conv2(x1), x1], dim=1)
x3 = torch.cat([self.conv3(x2), x2], dim=1)
x4 = torch.cat([self.conv4(x3), x3], dim=1)
x5 = torch.cat([self.conv5(x4), x4], dim=1)
x_out = self.conv_last(x5)
return x5, x_out
self.dense_estimator = FlowEstimatorDense_temp(32, (64, 64, 64, 32, 16))
self.upsample_output_conv = nn.Sequential(conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2), )
def forward(self, x_raw, if_output_level=False):
if if_output_level:
x = self.upsample_output_conv(x_raw)
else:
x = x_raw
_, x_out = self.dense_estimator(x)
if if_output_level:
x_out = upsample2d_flow_as(x_out, x_raw, mode="bilinear", if_rate=True)
return x_out
@classmethod
def demo(cls):
from thop import profile
a = _Upsample_flow_v2()
feature = np.zeros((1, 32, 320, 320))
feature_ = torch.from_numpy(feature).float()
flops, params = profile(a, inputs=(feature_,), verbose=False)
print('PWCNet_unsup_irr_bi_appflow_v8: flops: %.3f G, params: %.3f M' % (flops / 1000 / 1000 / 1000, params / 1000 / 1000))
@classmethod
def demo_mscale(cls):
from thop import profile
'''
320 : flops: 15.5 G, params: 0.2 M
160 : flops: 3.9 G, params: 0.2 M
80 : flops: 1.0 G, params: 0.2 M
40 : flops: 0.2 G, params: 0.2 M
20 : flops: 0.1 G, params: 0.2 M
10 : flops: 0.0 G, params: 0.2 M
5 : flops: 0.0 G, params: 0.2 M
'''
a = _Upsample_flow_v2()
for i in [320, 160, 80, 40, 20, 10, 5]:
feature = np.zeros((1, 32, i, i))
feature_ = torch.from_numpy(feature).float()
flops, params = profile(a, inputs=(feature_,), verbose=False)
print('%s : flops: %.3f G, params: %.3f M' % (i, flops / 1000 / 1000 / 1000, params / 1000 / 1000))
feature = np.zeros((1, 3, 320, 320))
feature_ = torch.from_numpy(feature).float()
flops, params = profile(a, inputs=(feature_, True), verbose=False)
print('%s : flops: %.3f G, params: %.3f M' % ('output level', flops / 1000 / 1000 / 1000, params / 1000 / 1000))
class _Upsample_flow_v3(tools.abstract_model):
def __init__(self):
super(_Upsample_flow_v3, self).__init__()
class FlowEstimatorDense_temp(tools.abstract_model):
def __init__(self, ch_in, f_channels=(128, 128, 96, 64, 32)):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, 2, isReLU=False)
def forward(self, x):
x1 = torch.cat([self.conv1(x), x], dim=1)
x2 = torch.cat([self.conv2(x1), x1], dim=1)
x3 = torch.cat([self.conv3(x2), x2], dim=1)
x4 = torch.cat([self.conv4(x3), x3], dim=1)
x5 = torch.cat([self.conv5(x4), x4], dim=1)
x_out = self.conv_last(x5)
return x5, x_out
class ContextNetwork_temp(nn.Module):
def __init__(self, num_ls=(3, 128, 128, 128, 96, 64, 32, 16)):
super(ContextNetwork_temp, self).__init__()
self.num_ls = num_ls
cnt = 0
cnt_in = num_ls[0]
self.cov1 = conv(num_ls[0], num_ls[1], 3, 1, 1)
cnt += 1 # 1
cnt_in += num_ls[cnt]
self.cov2 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 2)
cnt += 1 # 2
cnt_in += num_ls[cnt]
self.cov3 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 4)
cnt += 1 # 3
cnt_in += num_ls[cnt]
self.cov4 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 8)
cnt += 1 # 4
cnt_in += num_ls[cnt]
self.cov5 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 16)
cnt += 1 # 5
cnt_in += num_ls[cnt]
self.cov6 = conv(cnt_in, num_ls[cnt + 1], 3, 1, 1)
cnt += 1
cnt_in += num_ls[cnt]
self.final = conv(cnt_in, num_ls[cnt + 1], isReLU=False)
def forward(self, x):
x = torch.cat((self.cov1(x), x), dim=1)
x = torch.cat((self.cov2(x), x), dim=1)
x = torch.cat((self.cov3(x), x), dim=1)
x = torch.cat((self.cov4(x), x), dim=1)
x = torch.cat((self.cov5(x), x), dim=1)
x = torch.cat((self.cov6(x), x), dim=1)
x = self.final(x)
return x
class ContextNetwork_temp_2(nn.Module):
def __init__(self, num_ls=(3, 128, 128, 128, 96, 64, 32, 16)):
super(ContextNetwork_temp_2, self).__init__()
self.convs = nn.Sequential(
conv(num_ls[0], num_ls[1], 3, 1, 1),
conv(num_ls[1], num_ls[2], 3, 1, 2),
conv(num_ls[2], num_ls[3], 3, 1, 4),
conv(num_ls[3], num_ls[4], 3, 1, 8),
conv(num_ls[4], num_ls[5], 3, 1, 16),
conv(num_ls[5], num_ls[6], 3, 1, 1),
conv(num_ls[6], num_ls[7], isReLU=False)
)
def forward(self, x):
return self.convs(x)
self.dense_estimator = FlowEstimatorDense_temp(32, (64, 64, 64, 32, 16))
self.context_estimator = ContextNetwork_temp_2(num_ls=(self.dense_estimator.num_feature_channel + 2, 64, 64, 64, 32, 32, 16, 2))
# self.dense_estimator = FlowEstimatorDense_temp(32, (128, 128, 96, 64, 32))
# self.context_estimator = ContextNetwork_temp_2(num_ls=(self.dense_estimator.num_feature_channel + 2, 128, 128, 128, 96, 64, 32, 2))
self.upsample_output_conv = nn.Sequential(conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, | |
= comment
body['send_response'] = False if send_response is None else send_response
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_exception_occurrence_cancel(client,
user_id,
event_id,
event_id1,
comment=None):
body = {}
body['comment'] = comment
return client.cancel(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_exception_occurrence_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_exception_occurrence_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_user_calendar_view_exception_occurrence_forward(client,
user_id,
event_id,
event_id1,
to_recipients=None,
comment=None):
body = {}
body['to_recipients'] = to_recipients
body['comment'] = comment
return client.forward(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_exception_occurrence_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_exception_occurrence_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_instance_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_instance_cancel(client,
user_id,
event_id,
event_id1,
comment=None):
body = {}
body['comment'] = comment
return client.cancel(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_instance_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_instance_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_user_calendar_view_instance_forward(client,
user_id,
event_id,
event_id1,
to_recipients=None,
comment=None):
body = {}
body['to_recipients'] = to_recipients
body['comment'] = comment
return client.forward(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_instance_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_instance_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_calendar_view_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_calendar_view_cancel(client,
user_id,
event_id,
comment=None):
body = {}
body['comment'] = comment
return client.cancel(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_calendar_view_decline(client,
user_id,
event_id,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.decline(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_calendar_view_dismiss_reminder(client,
user_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id)
def usersactions_user_calendar_view_forward(client,
user_id,
event_id,
to_recipients=None,
comment=None):
body = {}
body['to_recipients'] = to_recipients
body['comment'] = comment
return client.forward(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_calendar_view_snooze_reminder(client,
user_id,
event_id,
new_reminder_time=None):
body = {}
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_calendar_view_tentatively_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_device_enrollment_configuration_assign(client,
user_id,
device_enrollment_configuration_id,
enrollment_configuration_assignments=None):
body = {}
body['enrollment_configuration_assignments'] = enrollment_configuration_assignments
return client.assign(user_id=user_id,
device_enrollment_configuration_id=device_enrollment_configuration_id,
body=body)
def usersactions_user_device_enrollment_configuration_has_payload_link(client,
user_id,
payload_ids=None):
body = {}
body['payload_ids'] = payload_ids
return client.has_payload_links(user_id=user_id,
body=body)
def usersactions_user_device_enrollment_configuration_set_priority(client,
user_id,
device_enrollment_configuration_id,
priority=None):
body = {}
body['priority'] = priority
return client.set_priority(user_id=user_id,
device_enrollment_configuration_id=device_enrollment_configuration_id,
body=body)
def usersactions_user_event_attachment_create_upload_session(client,
user_id,
event_id,
attachment_item=None):
body = {}
body['attachment_item'] = attachment_item
return client.create_upload_session(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_event_calendar_calendar_view_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_calendar_view_cancel(client,
user_id,
event_id,
event_id1,
comment=None):
body = {}
body['comment'] = comment
return client.cancel(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_calendar_view_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_calendar_view_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_user_event_calendar_calendar_view_forward(client,
user_id,
event_id,
event_id1,
to_recipients=None,
comment=None):
body = {}
body['to_recipients'] = to_recipients
body['comment'] = comment
return client.forward(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_calendar_view_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_calendar_view_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_event_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_event_cancel(client,
user_id,
event_id,
event_id1,
comment=None):
body = {}
body['comment'] = comment
return client.cancel(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_event_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_event_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_user_event_calendar_event_forward(client,
user_id,
event_id,
event_id1,
to_recipients=None,
comment=None):
body = {}
body['to_recipients'] = to_recipients
body['comment'] = comment
return client.forward(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_event_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_event_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_calendar_get_schedule(client,
user_id,
event_id,
schedules=None,
end_time=None,
start_time=None,
availability_view_interval=None):
body = {}
body['schedules'] = schedules
body['end_time'] = end_time
body['start_time'] = start_time
body['availability_view_interval'] = availability_view_interval
return client.get_schedule(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_event_exception_occurrence_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_exception_occurrence_cancel(client,
user_id,
event_id,
event_id1,
comment=None):
body = {}
body['comment'] = comment
return client.cancel(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_exception_occurrence_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_exception_occurrence_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_user_event_exception_occurrence_forward(client,
user_id,
event_id,
event_id1,
to_recipients=None,
comment=None):
body = {}
body['to_recipients'] = to_recipients
body['comment'] = comment
return client.forward(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_exception_occurrence_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_exception_occurrence_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_instance_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_instance_cancel(client,
user_id,
event_id,
event_id1,
comment=None):
body = {}
body['comment'] = comment
return client.cancel(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_instance_decline(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.decline(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_instance_dismiss_reminder(client,
user_id,
event_id,
event_id1):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1)
def usersactions_user_event_instance_forward(client,
user_id,
event_id,
event_id1,
to_recipients=None,
comment=None):
body = {}
body['to_recipients'] = to_recipients
body['comment'] = comment
return client.forward(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_instance_snooze_reminder(client,
user_id,
event_id,
event_id1,
new_reminder_time=None):
body = {}
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_instance_tentatively_accept(client,
user_id,
event_id,
event_id1,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.tentatively_accept(user_id=user_id,
event_id=event_id,
event_id1=event_id1,
body=body)
def usersactions_user_event_accept(client,
user_id,
event_id,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.accept(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_event_cancel(client,
user_id,
event_id,
comment=None):
body = {}
body['comment'] = comment
return client.cancel(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_event_decline(client,
user_id,
event_id,
comment=None,
send_response=None):
if send_response is None:
send_response = False
body = {}
body['comment'] = comment
body['send_response'] = False if send_response is None else send_response
return client.decline(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_event_dismiss_reminder(client,
user_id,
event_id):
return client.dismiss_reminder(user_id=user_id,
event_id=event_id)
def usersactions_user_event_forward(client,
user_id,
event_id,
to_recipients=None,
comment=None):
body = {}
body['to_recipients'] = to_recipients
body['comment'] = comment
return client.forward(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_event_snooze_reminder(client,
user_id,
event_id,
new_reminder_time=None):
body = {}
body['new_reminder_time'] = new_reminder_time
return client.snooze_reminder(user_id=user_id,
event_id=event_id,
body=body)
def usersactions_user_event_tentatively_accept(client,
user_id,
event_id,
comment=None,
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : <EMAIL>
# @Date : 2020/11/15 10:16
""" 几何、数学运算
specialist级别
"""
import copy
import numpy as np
import cv2
from pyxllib.algo.intervals import Intervals
____base = """
"""
def xywh2ltrb(p):
return [p[0], p[1], p[0] + p[2], p[1] + p[3]]
def ltrb2xywh(p):
return [p[0], p[1], p[2] - p[0], p[3] - p[1]]
def rect2polygon(src_pts):
""" 矩形对角线两个点,转成四边形四个点的模式来表达
(输入左上、右下两个顶点坐标)
:param list|np.ndarray src_pts: size 2*2
:rtype: list
>>> rect2polygon([[0, 0], [10, 20]])
[[0, 0], [10, 0], [10, 20], [0, 20]]
>>> rect2polygon(np.array([[0, 0], [10, 20]]))
[[0, 0], [10, 0], [10, 20], [0, 20]]
>>> rect2polygon([[10, 0], [0, 20]])
[[0, 0], [10, 0], [10, 20], [0, 20]]
"""
[[x1, y1], [x2, y2]] = src_pts
dst_pts = [[x1, y1], [x2, y1], [x2, y2], [x1, y2]]
dst_pts = resort_quad_points(dst_pts)
return dst_pts
def reshape_coords(coords, m, dtype=None):
""" 重置坐标点的维度
:param list coords: 这个函数主要还是封装了对list情况的处理
其实np.ndarray结构也行,但这种情况直接用np接口操作就行,不需要引用该函数
:rtype: list
# 转成 n*1 的矩阵
>>> reshape_coords([(1, 2), (3, 4)], 1)
[1, 2, 3, 4]
>>> reshape_coords(np.array([[1, 2], [3, 4]]), 1)
[1, 2, 3, 4]
>>> reshape_coords([1, 2, 3, 4], 1)
[1, 2, 3, 4]
>>> reshape_coords([[1.5, 2], [3.5, 4]], 1)
[1.5, 2.0, 3.5, 4.0]
# 这种情况,[3,4]、[5,6,7]都是一个整体
# VisibleDeprecationWarning
>>> reshape_coords([1, 2, [3, 4], [5, 6, 7]], 1)
[1, 2, [3, 4], [5, 6, 7]]
>>> reshape_coords([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], 1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
# 变成 n*2 的矩阵
>>> reshape_coords([1, 2, 3, 4], 2)
[[1, 2], [3, 4]]
>>> reshape_coords(np.array([1, 2, 3, 4]), 2)
[[1, 2], [3, 4]]
>>> reshape_coords([[1, 2], [3, 4]], 2)
[[1, 2], [3, 4]]
>>> reshape_coords([1.5, 2, 3.5, 4], 2)
[[1.5, 2.0], [3.5, 4.0]]
>>> reshape_coords([1.5, 2, 3.5, 4], 2, dtype=int) # 数据类型转换
[[1, 2], [3, 4]]
"""
if m == 1:
return np.array(coords, dtype=dtype).reshape(-1).tolist()
else:
return np.array(coords, dtype=dtype).reshape((-1, m)).tolist()
def rect_bounds(coords):
""" 多边形的最大外接矩形
:param coords: 支持list、np等类型,支持1d、2d两种维度表达方式
:return: rect的两个点坐标,同时也是 [left, top, right, bottom]
"""
pts = np.array(coords).reshape(-1).tolist() # tolist不能删,不然int类型就变了。比如int64不能json.dump
p = [min(pts[::2]), min(pts[1::2]), max(pts[::2]), max(pts[1::2])]
return [v for v in p]
def resort_quad_points(src_pts):
""" 重置四边形点集顺序,确保以左上角为起点,顺时针罗列点集
算法:先确保pt1、pt2在上面,然后再确保pt1在pt2左边
:param list|tuple|np.ndarray src_pts: 点集
:rtype: list|np.ndarray
>>> pts = [[100, 50], [200, 0], [100, 0], [0, 50]]
>>> resort_quad_points(pts)
[[100, 0], [200, 0], [100, 50], [0, 50]]
>>> pts # 原来的点不会被修改
[[100, 50], [200, 0], [100, 0], [0, 50]]
>>> pts = np.array([[100, 50], [200, 0], [100, 0], [0, 50]])
>>> resort_quad_points(pts)
array([[100, 0],
[200, 0],
[100, 0],
[ 0, 50]])
>>> pts # 原来的点不会被修改
array([[100, 50],
[200, 0],
[100, 0],
[ 0, 50]])
"""
pts = copy.copy(src_pts)
if pts[0][1] > pts[2][1]:
pts[0], pts[2] = pts[2], pts[0]
if pts[1][1] > pts[3][1]:
pts[1], pts[3] = pts[3], pts[1]
if pts[0][0] > pts[1][0]:
pts[0], pts[1] = pts[1], pts[0]
pts[2], pts[3] = pts[3], pts[2]
return pts
def ltrb_border(ltrb, border, size=None):
""" 给原来的ltrb定位扩展border像素
Args:
ltrb:
border: 可以一个数字,表示统一添加的像素值
也可以四个数字,表示每个维度分别加的像素值
size:
原图的 (width, height),防止越界
可以不填,默认不考虑越界问题
Returns: 新的ltrb坐标
"""
if isinstance(border, int):
border = [border] * 4
l = max(0, ltrb[0] - border[0])
t = max(0, ltrb[1] - border[1])
r = min(size[0], ltrb[2] + border[2])
b = min(size[1], ltrb[3] + border[3])
return [l, t, r, b]
____warp_perspective = """
仿射、透视变换相关功能
https://www.yuque.com/xlpr/pyxllib/warpperspective
"""
def warp_points(pts, warp_mat):
""" 透视等点集坐标转换
:param list|tuple|np.ndarray pts: 支持1d、2d的维度
其实这个坐标变换就是一个简单的矩阵乘法,只是pts的数据结构往往比较特殊,
并不是一个n*3的矩阵结构,所以需要进行一些简单的格式转换
例如 [x1, y1, x2, y2, x3, y3] --> [[x1, x2, x3], [y1, y2, y3], [1, 1, 1]]
:param list|tuple|np.ndarray warp_mat: 变换矩阵,一般是个3*3的矩阵,但是只输入2*3的矩阵也行,因为第3行并用不到(点集只要取前两个维度X'Y'的结果值)
TODO 不过这里我有个点也没想明白,如果不用第3行,本质上不是又变回仿射变换了,如何达到透视变换效果?第三维的深度信息能完全舍弃?
:rtype: np.ndarray
>>> warp_mat = [[0, 1, 0], [1, 0, 0], [0, 0, 1]] # 对换x、y
>>> warp_points([[1, 2], [11, 22]], warp_mat) # 处理两个点
array([[ 2, 1],
[22, 11]])
>>> warp_points([[1, 2], [11, 22]], [[0, 1, 0], [1, 0, 0]]) # 输入2*3的变换矩阵也可以
array([[ 2, 1],
[22, 11]])
>>> warp_points([1, 2, 11, 22], warp_mat) # 也可以用一维的结构来输入点集
array([[ 2, 1],
[22, 11]])
>>> warp_points([1, 2, 11, 22, 111, 222], warp_mat) # 点的数量任意,返回的结构同输入的结构形式
array([[ 2, 1],
[ 22, 11],
[222, 111]])
>>> warp_points(np.array([1, 2, 11, 22, 111, 222]), warp_mat) # 也可以用np.ndarray等结构
array([[ 2, 1],
[ 22, 11],
[222, 111]])
>>> warp_points([1, 2, 11, 22], warp_mat) # 也可以用一维的结构来输入点集
array([[ 2, 1],
[22, 11]])
"""
pts1 = np.array(pts).reshape(-1, 2).T
pts1 = np.concatenate([pts1, [[1] * pts1.shape[1]]], axis=0)
pts2 = np.dot(warp_mat[:2], pts1)
pts2 = pts2.T
return pts2
def get_warp_mat(src, dst):
""" 从前后点集计算仿射变换矩阵
:param src: 原点集,支持多种格式输入
:param dst: 变换后的点集
:return np.ndarray: 3*3的变换矩阵
"""
def cvt_data(pts):
# opencv的透视变换,输入的点集有类型限制,必须使用float32
return np.array(pts, dtype='float32').reshape((-1, 2))
src, dst = cvt_data(src), cvt_data(dst)
n = src.shape[0]
if n == 3:
# 只有3个点,则使用仿射变换
warp_mat = cv2.getAffineTransform(src, dst)
warp_mat = np.concatenate([warp_mat, [[0, 0, 1]]], axis=0)
elif n == 4:
# 有4个点,则使用透视变换
warp_mat = cv2.getPerspectiveTransform(src, dst)
else:
raise ValueError('点集数量过多')
return warp_mat
def quad_warp_wh(pts, method='average'):
""" 四边形转为矩形的宽、高
:param pts: 四个点坐标
TODO 暂时认为pts是按点集顺时针顺序输入的
TODO 暂时认为pts[0]就是第一个坐标点
:param method:
记四条边分别为w1, h1, w2, h2
average: 平均宽、高
max: 最大宽、高
min: 最小宽、高
:return: (w, h) 变换后的矩形宽、高
"""
# 1 计算四边长
from math import hypot
# pts = ReshapeCoords.list_2d(pts)
lens = [0] * 4
for i in range(4):
pt1, pt2 = pts[i], pts[(i + 1) % 4]
lens[i] = hypot(pt1[0] - pt2[0], pt1[1] - pt2[1])
# 2 目标宽、高
if method is True:
method = 'average'
if method == 'average':
w, h = (lens[0] + lens[2]) / 2, (lens[1] + lens[3]) / 2
elif method == 'max':
w, h = max(lens[0], lens[2]), max(lens[1], lens[3])
elif method == 'min':
w, h = min(lens[0], lens[2]), min(lens[1], lens[3])
else:
raise ValueError(f'不支持的方法 {method}')
# 这个主要是用于图像变换的,而图像一般像素坐标要用整数,所以就取整运算了
return round(w), round(h)
def warp_quad_pts(pts, method='average'):
""" 将不规则四边形转为矩形
:param pts: 不规则四边形的四个点坐标
:param method: 计算矩形宽、高的算法
:return: 返回时,仍然用四个点的坐标表达,规则矩形的四个点坐标
>>> warp_quad_pts([[89, 424], [931, 424], [399, 290], [621, 290]])
[[0, 0], [532, 0], [532, 549], [0, 549]]
"""
w, h = quad_warp_wh(pts, method)
return rect2polygon([[0, 0], [w, h]])
____polygon = """
"""
class ComputeIou:
""" 两个多边形的交并比 Intersection Over Union """
@classmethod
def ltrb(cls, pts1, pts2):
""" https://gist.github.com/meyerjo/dd3533edc97c81258898f60d8978eddc
"""
# determine the (x, y)-coordinates of the intersection rectangle
x_a = max(pts1[0], pts2[0])
y_a = max(pts1[1], pts2[1])
x_b = min(pts1[2], pts2[2])
y_b = min(pts1[3], pts2[3])
# compute the area of intersection rectangle
inter_area = abs(max((x_b - x_a, 0)) * max((y_b - y_a), 0))
if inter_area == 0:
return 0
# compute the area of both the prediction and ground-truth
# rectangles
box_a_area = abs((pts1[2] - pts1[0]) * (pts1[3] - pts1[1]))
box_b_area = abs((pts2[2] - pts2[0]) * (pts2[3] - pts2[1]))
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = inter_area / float(box_a_area + box_b_area - inter_area)
# return the intersection over union value
return iou
@classmethod
def polygon(cls, pts1, pts2):
inter_area = pts1.intersection(pts2).area
union_area = pts1.area + pts2.area - inter_area
return (inter_area / union_area) if union_area else 0
@classmethod
def polygon2(cls, pts1, pts2):
""" 会强制转为polygon对象再处理
>>> ComputeIou.polygon2([[0, 0], [10, 10]], [[5, 5], [15, 15]])
0.14285714285714285
"""
from pyxllib.algo.shapely_ import ShapelyPolygon
polygon1, polygon2 = ShapelyPolygon.gen(pts1), ShapelyPolygon.gen(pts2)
return cls.polygon(polygon1, polygon2)
@classmethod
def nms_basic(cls, boxes, func, iou=0.5, *, key=None, index=False):
""" 假设boxes已经按权重从大到小排过序
:param boxes: 支持输入一组box列表 [box1, box2, box3, ...]
:param key: 将框映射为可计算对象
:param index: 返回不是原始框,而是对应的下标 [i1, i2, i3, ...]
"""
# 1 映射到items来操作
if callable(key):
items = list(enumerate([key(b) for b in boxes]))
else:
items = list(enumerate(boxes))
# 2 正常nms功能
idxs = []
while items:
# 1 加入权值大的框
i, b = items[0]
idxs.append(i)
# 2 抑制其他框
left_items = []
for j in range(1, len(items)):
if func(b, items[j][1]) < iou:
left_items.append(items[j])
items = left_items
# 3 返回值
if index:
return idxs
else:
return [boxes[i] for i in idxs]
@classmethod
def nms_ltrb(cls, boxes, iou=0.5, *, key=None, index=False):
return cls.nms_basic(boxes, cls.ltrb, iou, key=key, index=index)
@classmethod
def nms_xywh(cls, boxes, iou=0.5, *, key=None, index=False):
if callable(key):
func = lambda x: xywh2ltrb(key(x))
else:
func = xywh2ltrb
return cls.nms_ltrb(boxes, iou, key=func, index=index)
@classmethod
def nms_polygon(cls, boxes, iou=0.5, *, key=None, index=False):
# ShapelyPolygon.gen
return cls.nms_basic(boxes, cls.polygon, iou, key=key, index=index)
____other = """
"""
def divide_quadrangle(coords, r1=0.5, r2=None):
""" 切分一个四边形为两个四边形
:param coords: 4*2的坐标
:param r1: 第一个切分比例,0.5相当于中点(即第一个四边形右边位置)
:param r2: 第二个切分比例,即第二个四边形左边位置
:return: 返回切割后所有的四边形
一般用在改标注结果中,把一个框拆成两个框
TODO 把接口改成切分一个四边形为任意多个四边形?即把r1、r2等整合为一个list参数输入
"""
# 1 计算分割点工具
def segment_point(pt1, pt2, rate=0.5):
""" 两点间的分割点
:param rate: 默认0.5是二分点,rate为0时即pt1,rate为1时为pt2,取值可以小于0、大于-1
:return:
"""
x1, y1 = pt1
x2, y2 = pt2
x, y = x1 + rate * (x2 - x1), | |
number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
user_accounts:
description:
- Comparison between findings belonging to User Accounts category.
returned: on success
type: complex
contains:
current:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
baseline:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"baseline_id": "ocid1.baseline.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "CREATING",
"time_created": "2013-10-20T19:20:30+01:00",
"targets": [{
"baseline_target_id": "ocid1.baselinetarget.oc1..xxxxxxEXAMPLExxxxxx",
"current_target_id": "ocid1.currenttarget.oc1..xxxxxxEXAMPLExxxxxx",
"auditing": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"authorization_control": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"data_encryption": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"db_configuration": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"fine_grained_access_control": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"privileges_and_roles": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"user_accounts": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}]
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_safe import DataSafeClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataSafeSecurityAssessmentComparisonFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"security_assessment_id",
"comparison_security_assessment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_security_assessment_comparison,
security_assessment_id=self.module.params.get("security_assessment_id"),
comparison_security_assessment_id=self.module.params.get(
| |
import numpy as np
HILL_KEY = [[21, 109, 119, 23, 88, 15, 116, 66], [22, 119, 70, 118, 111, 82, 121, 98], [79, 86, 2, 96, 90, 54, 95, 83], [22, 100, 113, 122, 92, 6, 52, 60], [1, 9, 9, 4, 112, 13, 26, 74],
[3, 100, 92, 83, 51, 122, 102, 63], [71, 110, 92, 74, 26, 96, 92, 24], [30, 10, 85, 92, 47, 91, 114, 108]]
HILL_KEY_REVERSE = [[138, 124, 28, 104, 136, 176, 193, 182], [65, 229, 101, 214, 103, 57, 4, 224], [140, 138, 214, 71, 46, 62, 148, 184], [77, 64, 202, 44, 119, 246, 60, 86],
[69, 173, 41, 8, 106, 175, 255, 119], [105, 45, 131, 23, 116, 193, 29, 114], [190, 79, 82, 26, 81, 22, 187, 253], [70, 99, 51, 2, 221, 248, 152, 59]]
DES_KEY = [65, 66, 67, 68, 69, 70, 71, 72]
def get_content():
content = input("Enter the word to encrypt:")
return content
def string_to_ascii_list(content):
out = []
for letter in content:
out.append(ord(letter))
return out
def ascii_list_to_bin_list(asciiList, binLen=8):
out = []
for ascii in asciiList:
itemBin = bin(ascii)
for i in range(binLen + 2 - len(itemBin)):
out.append(0)
for b in itemBin[2:]:
out.append(int(b))
return out
def bin_to_string(binList, binFormatLen=8):
out = ""
for i in range(int(len(binList) / binFormatLen)):
ascii = ""
for j in range(binFormatLen):
ascii += str(binList[i * binFormatLen + j])
out += chr(int(ascii, 2))
return out
def ascii_list_to_string(list):
str = ""
for item in list:
str += chr(item)
return str
def padding_content(content, blocksize=64):
for i in range(int((len(content) - 1) / blocksize + 1) * blocksize - len(content)):
content.append(0)
return content
def drop_padding(content):
for i in range(len(content)):
if content[i] == 0:
return content[:i]
return content
def content_to_block_array(content):
contentBlockArray = []
for i in range(0, int(len(content) / 64)):
contentBlock = []
for j in range(0, 8):
contentLine = []
for k in range(0, 8):
contentLine.append(content[i * 8 * 8 + j * 8 + k])
contentBlock.append(contentLine)
contentBlockArray.append(contentBlock)
return contentBlockArray
def content_to_des_block_array(content):
contentBlockArray = []
for i in range(0, int(len(content) / 64)):
contentBlock = []
for j in range(0, 64):
contentBlock.append(content[i * 64 + j])
contentBlockArray.append(contentBlock)
return contentBlockArray
def block_array_to_content(contentBlockArray, block_height=8, block_length=8):
content = []
for contentBlock in contentBlockArray:
for contentLine in contentBlock:
for contentItem in contentLine:
content.append(contentItem)
return content
def des_block_array_to_content(contentBlockArray):
content = []
for contentBlock in contentBlockArray:
for contentLine in contentBlock:
content.append(contentLine)
return content
def block_to_content(contentBlock, block_height=8, block_length=8):
content = []
for contentLine in contentBlock:
for contentItem in contentLine:
content.append(contentItem)
return content
def hill_encrypt_block_array(contentBlockArray, keyBlock, field):
cipherBlockArray = []
keyBlockNum = 0
for contentBlock in contentBlockArray:
outMetrix = hill_encrypt_block(contentBlock, keyBlock, field)
cipherBlockArray.append(outMetrix)
return cipherBlockArray
def hill_decrypt_block_array(contentBlockArray, keyBlock, field):
plainBlockArray = []
for contentBlock in contentBlockArray:
outMetrix = hill_decrypt_block(contentBlock, keyBlock, field)
plainBlockArray.append(outMetrix)
return plainBlockArray
def hill_encrypt_block(contentBlock, keyBlock, field):
cipherBlock = []
contentArray = np.array(contentBlock)
keyArray = np.array(keyBlock)
cipherBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field)
return cipherBlock
def hill_decrypt_block(contentBlock, keyBlock, field):
plainBlock = []
contentArray = np.array(contentBlock)
keyArray = np.array(keyBlock)
plainBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field)
return plainBlock
def des_string_proc(content):
return content_to_des_block_array(padding_content(ascii_list_to_bin_list(string_to_ascii_list(content))))
def des_ascii_list_proc(content, formatBase=8):
return content_to_des_block_array(padding_content(ascii_list_to_bin_list(content, formatBase)))
# def des_encypt_block_array(content,keyBlock):
# cipherBlockArray = []
# contentBlockArray=des_content_proc(content)
# keyBlockNum = 0
# for contentBlock in contentBlockArray:
# outMetrix = des_encypt_block(contentBlock, keyBlock)
# cipherBlockArray.append(outMetrix)
# return cipherBlockArray
def des_encypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8):
cipherBlockArray = []
subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase)
file = open("debug.txt", "a")
file.write("\n加密子密钥:\n")
file.writelines(str(subKeyArray))
file.close()
for contentBlock in contentBlockArray:
outMetrix = des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase)
cipherBlockArray.append(outMetrix)
return cipherBlockArray
def des_decrypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8):
cipherBlockArray = []
subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase)
subDecryptKeyArray = subKeyArray[::-1]
file = open("debug.txt", "a")
file.write("\n解密子密钥:\n")
file.writelines(str(subDecryptKeyArray))
file.close()
for contentBlock in contentBlockArray:
outMetrix = des_encypt_block(contentBlock, subDecryptKeyArray, keyBlockFormatBase)
cipherBlockArray.append(outMetrix)
return cipherBlockArray
def list_xor(list1, list2):
out = []
for i in range(len(list1)):
out.append(list1[i] ^ list2[i])
return out
# def des_key_proc(keyBlock):
# return ascii_list_to_bin_list(keyBlock)
def get_sub_key(keyBlock, keyBlockFormatBase=8):
key = ascii_list_to_bin_list(keyBlock, keyBlockFormatBase)
file = open("debug.txt", "a")
file.write("\n密钥:\n")
file.writelines(str(key))
file.close()
key56 = des_key_do_pc_1(key)
keyBlock = des_key_do_shift_pc_2(key56)
return keyBlock
def des_do_extend_permutation(content32List):
'''扩展置换:将32位输入置换成48位输出。'''
'''扩展置置换目标是IP置换后获得的右半部分R0,将32位输入扩展为48位(分为4位×8组)输出。'''
E = [32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1]
return [content32List[E[i] - 1] for i in range(48)]
def des_key_do_pc_1(keyList):
'''密钥置换:不考虑每个字节的第8位,DES的密钥由64位减至56位,每个字节的第8位作为奇偶校验位。'''
PC = [
57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4
]
return [keyList[PC[i] - 1] for i in range(56)]
def des_key_do_shift_pc_2(keyList):
'''在DES的每一轮中,从56位密钥产生出不同的48位子密钥'''
'''该处输出为所有轮次的子密钥'''
PC = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32]
MOV = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
result = []
key56=keyList
for i in range(16):
# 每28位为一部分,分别进行循环左移
key0 = des_do_shift(key56[:28], MOV[i])
key1 = des_do_shift(key56[28:], MOV[i])
key56 = key0 + key1
# 对56位密钥进行 PC-2 变换,将其压缩为48位
key48 = [key56[PC[j] - 1] for j in range(48)]
result.append(key48)
return result
def des_do_shift(keyList, mov):
return keyList[mov:] + keyList[:mov]
def des_do_s_box(list48):
'''S-盒置换:将48位输入均分成长度为6的8个小组,每个小组按顺序进入相应的S盒各得到4位输出,返回合并后的32位结果。'''
# S 盒
S_BOX = [[
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
],
[
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
],
[
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
],
[
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
],
[
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
],
[
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
],
[
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, | |
database, then add a webauthn
credential to the test user, then get the state from the db and modify it
as if the user had chosen extra security via the webauthn token,
and finally send the necessary data to actually reset the password.
:param data1: to control what email is sent to create the state and start the process
:param credential_data: to control the data set as webauthn credential on the test user
:param data2: to control the data POSTed to finally reset the password
:param fido2state: to control the fido state kept in the session
"""
mock_request_user_sync.side_effect = self.request_user_sync
mock_get_vccs_client.return_value = TestVCCSClient()
mock_verify.return_value = True
credential = sample_credential.to_dict()
if credential_data:
credential.update(credential_data)
webauthn_credential = Webauthn.from_dict(credential)
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
user.credentials.add(webauthn_credential)
self.app.central_userdb.save(user, check_sync=False)
response = self._post_email_address(data1=data1)
state = self.app.password_reset_state_db.get_state_by_eppn(self.test_user_eppn)
assert isinstance(state, ResetPasswordEmailState)
with self.app.test_request_context():
state.extra_security = get_extra_security_alternatives(user, 'dummy.session.prefix')
state.email_code.is_verified = True
self.app.password_reset_state_db.save(state)
if fido2state is None:
fido2state = {
'challenge': '3h_EAZpY25xDdSJCOMx1ABZEA5Odz3yejUI3AUNTQWc',
'user_verification': 'preferred',
}
with self.app.test_request_context():
url = url_for('reset_password.set_new_pw_extra_security_token', _external=True)
with self.session_cookie_anon(self.browser) as c:
with c.session_transaction() as sess:
sess['eduid_webapp.reset_password.views.webauthn.state'] = json.dumps(fido2state)
new_password = generate_suggested_password(self.app.conf.password_length)
sess.reset_password.generated_password_hash = <PASSWORD>password(<PASSWORD>)
data = {
'email_code': state.email_code.code,
'password': <PASSWORD> <PASSWORD>,
'csrf_token': response.json['payload']['csrf_token'],
}
data.update(SAMPLE_WEBAUTHN_REQUEST)
if data2 == {}:
data = {}
elif data2 is not None:
data.update(data2)
return c.post(url, data=json.dumps(data), content_type=self.content_type_json)
def _get_email_code_backdoor(self, data1: Optional[dict] = None):
"""
Create a password rest state for the test user, grab the created verification code from the db,
and use it to get configuration for the reset form.
:param data1: to control the data (email) sent to create the reset state
"""
self._post_email_address(data1=data1)
state = self.app.password_reset_state_db.get_state_by_eppn(self.test_user_eppn)
assert isinstance(state, ResetPasswordEmailState)
with self.session_cookie_anon(self.browser) as client:
client.set_cookie('localhost', key=self.app.conf.magic_cookie_name, value=self.app.conf.magic_cookie)
eppn = quote_plus(self.test_user_eppn)
return client.get(f'/get-email-code?eppn={eppn}')
@patch('eduid_common.authn.vccs.get_vccs_client')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def _get_phone_code_backdoor(
self,
mock_sendsms: Any,
mock_request_user_sync: Any,
mock_get_vccs_client: Any,
sendsms_side_effect: Any = None,
):
"""
Test choosing extra security via a confirmed phone number to reset the password,
and getting the generated phone verification code through the backdoor
"""
mock_request_user_sync.side_effect = self.request_user_sync
mock_get_vccs_client.return_value = TestVCCSClient()
mock_sendsms.return_value = True
if sendsms_side_effect:
mock_sendsms.side_effect = sendsms_side_effect
response = self._post_email_address()
state = self.app.password_reset_state_db.get_state_by_eppn(self.test_user_eppn)
assert isinstance(state, ResetPasswordEmailState)
with self.app.test_request_context():
config_url = url_for('reset_password.verify_email', _external=True)
extra_security_phone_url = url_for('reset_password.choose_extra_security_phone', _external=True)
with self.session_cookie_anon(self.browser) as client:
data = {
'email_code': state.email_code.code,
'csrf_token': response.json['payload']['csrf_token'],
}
response = client.post(config_url, data=json.dumps(data), content_type=self.content_type_json)
self.assertEqual(200, response.status_code)
with self.session_cookie_anon(self.browser) as client:
data = {
'csrf_token': response.json['payload']['csrf_token'],
'email_code': state.email_code.code,
'phone_index': '0',
}
response = client.post(extra_security_phone_url, data=json.dumps(data), content_type=self.content_type_json)
self.assertEqual(200, response.status_code)
client.set_cookie('localhost', key=self.app.conf.magic_cookie_name, value=self.app.conf.magic_cookie)
eppn = quote_plus(self.test_user_eppn)
return client.get(f'/get-phone-code?eppn={eppn}')
# actual tests
def test_get_zxcvbn_terms(self):
with self.app.test_request_context():
terms = get_zxcvbn_terms(self.test_user)
self.assertEqual(['John', 'Smith', 'John', 'Smith', 'johnsmith', 'johnsmith2'], terms)
def test_get_zxcvbn_terms_no_given_name(self):
with self.app.test_request_context():
self.test_user.given_name = ''
self.app.central_userdb.save(self.test_user, check_sync=False)
terms = get_zxcvbn_terms(self.test_user)
self.assertEqual(['John', 'Smith', 'Smith', 'johnsmith', 'johnsmith2'], terms)
def test_get_zxcvbn_terms_no_surname(self):
with self.app.test_request_context():
self.test_user.surname = ''
self.app.central_userdb.save(self.test_user, check_sync=False)
terms = get_zxcvbn_terms(self.test_user)
self.assertEqual(['John', 'Smith', 'John', 'johnsmith', 'johnsmith2'], terms)
def test_get_zxcvbn_terms_no_display_name(self):
with self.app.test_request_context():
self.test_user.display_name = ''
self.app.central_userdb.save(self.test_user, check_sync=False)
terms = get_zxcvbn_terms(self.test_user)
self.assertEqual(['John', 'Smith', 'johnsmith', 'johnsmith2'], terms)
def test_app_starts(self):
self.assertEqual('reset_password', self.app.conf.app_name)
def test_post_email_address(self):
response = self._post_email_address()
self._check_success_response(response, msg=ResetPwMsg.reset_pw_initialized, type_='POST_RESET_PASSWORD_SUCCESS')
state = self.app.password_reset_state_db.get_state_by_eppn(self.test_user_eppn)
self.assertEqual(state.email_address, '<EMAIL>')
def test_post_email_address_sendmail_fail(self):
from eduid_common.api.exceptions import MailTaskFailed
response = self._post_email_address(sendmail_return=False, sendmail_side_effect=MailTaskFailed)
self._check_error_response(response, msg=ResetPwMsg.email_send_failure, type_='POST_RESET_PASSWORD_FAIL')
@patch('eduid_userdb.userdb.UserDB.get_user_by_mail')
def test_post_email_uncomplete_signup(self, mock_get_user: Mock):
mock_get_user.side_effect = UserHasNotCompletedSignup('incomplete signup')
response = self._post_email_address()
self._check_error_response(response, msg=ResetPwMsg.invalid_user, type_='POST_RESET_PASSWORD_FAIL')
def test_post_unknown_email_address(self):
data = {'email': '<EMAIL>'}
response = self._post_email_address(data1=data)
self._check_error_response(response, msg=ResetPwMsg.user_not_found, type_='POST_RESET_PASSWORD_FAIL')
def test_post_invalid_email_address(self):
data = {'email': 'invalid-address'}
response = self._post_email_address(data1=data)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_FAIL', payload={'error': {'email': ['Not a valid email address.']}},
)
def test_post_reset_code(self):
response = self._post_reset_code()
self._check_success_response(
response,
type_='POST_RESET_PASSWORD_VERIFY_EMAIL_SUCCESS',
payload={
'email_address': '<EMAIL>smith<EMAIL>',
'extra_security': {'phone_numbers': [{'index': 0, 'number': 'XXXXXXXXXX09'}]},
'password_entropy': 25,
'password_length': 12,
'password_service_url': '/services/reset-password/',
'success': True,
'zxcvbn_terms': ['John', 'Smith', 'John', 'Smith', 'johnsmith', 'johnsmith2'],
},
)
def test_post_reset_code_unknown_email(self):
data1 = {'email': '<EMAIL>'}
with self.assertRaises(DocumentDoesNotExist):
self._post_reset_code(data1=data1)
def test_post_reset_code_no_extra_sec(self):
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
# Unverify phone numbers
for number in user.phone_numbers.verified.to_list():
user.phone_numbers.remove(number.key)
self.app.central_userdb.save(user)
response = self._post_reset_code()
self._check_success_response(
response,
type_='POST_RESET_PASSWORD_VERIFY_EMAIL_SUCCESS',
payload={
'email_address': '<EMAIL>',
'extra_security': {},
'password_entropy': 25,
'password_length': 12,
'password_service_url': '/services/reset-password/',
'success': True,
'zxcvbn_terms': ['John', 'Smith', 'John', 'Smith', 'johnsmith', 'johnsmith2'],
},
)
def test_post_reset_wrong_code(self):
data2 = {'email_code': 'wrong-code'}
response = self._post_reset_code(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_VERIFY_EMAIL_FAIL', msg=ResetPwMsg.state_not_found
)
def test_post_reset_wrong_csrf(self):
data2 = {'csrf_token': 'wrong-code'}
response = self._post_reset_code(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_VERIFY_EMAIL_FAIL', error={'csrf_token': ['CSRF failed to validate']},
)
def test_post_reset_password(self):
response = self._post_reset_password()
self._check_success_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_SUCCESS', msg=ResetPwMsg.pw_reset_success
)
# check that the user no longer has verified data
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
verified_phone_numbers = user.phone_numbers.verified.to_list()
self.assertEqual(len(verified_phone_numbers), 0)
verified_nins = user.nins.verified.to_list()
self.assertEqual(len(verified_nins), 0)
# check that the password is marked as generated
self.assertTrue(user.credentials.to_list()[0].is_generated)
def test_post_reset_password_no_data(self):
response = self._post_reset_password(data2={})
self._check_error_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_FAIL',
error={
'email_code': ['Missing data for required field.'],
'csrf_token': ['Missing data for required field.'],
'password': ['<PASSWORD>.'],
},
)
def test_post_reset_password_weak(self):
data2 = {'password': 'pw'}
response = self._post_reset_password(data2=data2)
self._check_error_response(response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_FAIL', msg=ResetPwMsg.resetpw_weak)
def test_post_reset_password_no_csrf(self):
data2 = {'csrf_token': ''}
response = self._post_reset_password(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_FAIL', error={'csrf_token': ['CSRF failed to validate'],},
)
def test_post_reset_password_wrong_code(self):
data2 = {'email_code': 'wrong-code'}
response = self._post_reset_password(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_FAIL', msg=ResetPwMsg.state_not_found
)
# check that the user still has verified data
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
verified_phone_numbers = user.phone_numbers.verified.to_list()
self.assertEqual(len(verified_phone_numbers), 1)
verified_nins = user.nins.verified.to_list()
self.assertEqual(len(verified_nins), 2)
def test_post_reset_password_custom(self):
data2 = {'password': '<PASSWORD>'}
response = self._post_reset_password(data2=data2)
self._check_success_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_SUCCESS', msg=ResetPwMsg.pw_reset_success
)
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertFalse(user.credentials.to_list()[0].is_generated)
def test_post_choose_extra_sec(self):
response = self._post_choose_extra_sec()
self._check_success_response(
response, type_='POST_RESET_PASSWORD_EXTRA_SECURITY_PHONE_SUCCESS', msg=ResetPwMsg.send_sms_success
)
def test_post_choose_extra_sec_sms_fail(self):
self.app.conf.throttle_sms_seconds = 300
from eduid_common.api.exceptions import MsgTaskFailed
response = self._post_choose_extra_sec(sendsms_side_effect=MsgTaskFailed())
self._check_error_response(
response, type_='POST_RESET_PASSWORD_EXTRA_SECURITY_PHONE_FAIL', msg=ResetPwMsg.send_sms_failure
)
def test_post_choose_extra_sec_throttled(self):
self.app.conf.throttle_sms_seconds = 300
response = self._post_choose_extra_sec(repeat=True)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_EXTRA_SECURITY_PHONE_FAIL', msg=ResetPwMsg.send_sms_throttled
)
def test_post_choose_extra_sec_not_throttled(self):
self.app.conf.throttle_sms_seconds = 0
response = self._post_choose_extra_sec(repeat=True)
self._check_success_response(
response, type_='POST_RESET_PASSWORD_EXTRA_SECURITY_PHONE_SUCCESS', msg=ResetPwMsg.send_sms_success
)
def test_post_choose_extra_sec_wrong_code(self):
data2 = {'email_code': 'wrong-code'}
response = self._post_choose_extra_sec(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_EXTRA_SECURITY_PHONE_FAIL', msg=ResetPwMsg.email_not_validated
)
def test_post_choose_extra_sec_bad_phone_index(self):
data3 = {'phone_index': '3'}
response = self._post_choose_extra_sec(data3=data3)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_EXTRA_SECURITY_PHONE_FAIL', msg=ResetPwMsg.unknown_phone_number
)
def test_post_choose_extra_sec_wrong_csrf_token(self):
data3 = {'csrf_token': 'wrong-token'}
response = self._post_choose_extra_sec(data3=data3)
self._check_error_response(
response,
type_='POST_RESET_PASSWORD_EXTRA_SECURITY_PHONE_FAIL',
error={'csrf_token': ['CSRF failed to validate']},
)
def test_post_choose_extra_sec_wrong_final_code(self):
data3 = {'email_code': 'wrong-code'}
response = self._post_choose_extra_sec(data3=data3)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_EXTRA_SECURITY_PHONE_FAIL', msg=ResetPwMsg.state_not_found
)
def test_post_reset_password_secure_phone(self):
response = self._post_reset_password_secure_phone()
self._check_success_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_SUCCESS',
msg=ResetPwMsg.pw_reset_success,
)
# check that the user still has verified data
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
verified_phone_numbers = user.phone_numbers.verified.to_list()
self.assertEqual(1, len(verified_phone_numbers))
verified_nins = user.nins.verified.to_list()
self.assertEqual(2, len(verified_nins))
@patch('eduid_webapp.reset_password.views.reset_password.verify_phone_number')
def test_post_reset_password_secure_phone_verify_fail(self, mock_verify: Any):
mock_verify.return_value = False
response = self._post_reset_password_secure_phone()
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_FAIL', msg=ResetPwMsg.phone_invalid
)
def test_post_reset_password_secure_phone_wrong_csrf_token(self):
data2 = {'csrf_token': '<PASSWORD>'}
response = self._post_reset_password_secure_phone(data2=data2)
self._check_error_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_FAIL',
error={'csrf_token': ['CSRF failed to validate']},
)
def test_post_reset_password_secure_phone_wrong_email_code(self):
data2 = {'email_code': 'wrong-code'}
response = self._post_reset_password_secure_phone(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_FAIL', msg=ResetPwMsg.state_not_found
)
def test_post_reset_password_secure_phone_wrong_sms_code(self):
data2 = {'phone_code': 'wrong-code'}
response = self._post_reset_password_secure_phone(data2=data2)
self._check_error_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_FAIL',
msg=ResetPwMsg.unknown_phone_code,
)
def test_post_reset_password_secure_phone_weak_password(self):
data2 = {'password': 'pw'}
response = self._post_reset_password_secure_phone(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_FAIL', msg=ResetPwMsg.resetpw_weak
)
def test_post_reset_password_secure_token(self):
response = self._post_reset_password_secure_token()
self._check_success_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_TOKEN_SUCCESS',
msg=ResetPwMsg.pw_reset_success,
)
# check that the user still has verified data
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
verified_phone_numbers = user.phone_numbers.verified.to_list()
self.assertEqual(1, len(verified_phone_numbers))
verified_nins = user.nins.verified.to_list()
self.assertEqual(2, len(verified_nins))
def test_post_reset_password_secure_token_custom_pw(self):
response = self._post_reset_password_secure_token(custom_password='<PASSWORD>')
self._check_success_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_TOKEN_SUCCESS',
msg=ResetPwMsg.pw_reset_success,
)
user = self.app.central_userdb.get_user_by_eppn(self.test_user_eppn)
for cred in user.credentials.filter(Password).to_list():
self.assertFalse(cred.is_generated)
def test_post_reset_password_secure_token_no_data(self):
response = self._post_reset_password_secure_token(data2={})
self._check_error_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_TOKEN_FAIL',
error={
'email_code': ['Missing data for required field.'],
'csrf_token': ['Missing data for required field.'],
'password': ['Missing data for required field.'],
},
)
def test_post_reset_password_secure_token_wrong_credential(self):
credential_data = {
'credential_data': '<KEY>'
}
response = self._post_reset_password_secure_token(credential_data=credential_data)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_TOKEN_FAIL', msg=ResetPwMsg.fido_token_fail
)
def test_post_reset_password_secure_token_wrong_request(self):
data2 = {'authenticatorData': 'Wrong-authenticatorData----UMmBLDxB7n3apMPQAAAAAAA'}
response = self._post_reset_password_secure_token(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_TOKEN_FAIL', msg=ResetPwMsg.fido_token_fail
)
def test_post_reset_password_secure_token_wrong_csrf(self):
data2 = {'csrf_token': 'wrong-<PASSWORD>'}
response = self._post_reset_password_secure_token(data2=data2)
self._check_error_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_TOKEN_FAIL',
error={'csrf_token': ['CSRF failed to validate']},
)
def test_post_reset_password_secure_token_wrong_code(self):
data2 = {'email_code': 'wrong-code'}
response = self._post_reset_password_secure_token(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_TOKEN_FAIL', msg=ResetPwMsg.state_not_found
)
def test_post_reset_password_secure_token_weak_password(self):
data2 = {'password': 'pw'}
response = self._post_reset_password_secure_token(data2=data2)
self._check_error_response(
response, type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_TOKEN_FAIL', msg=ResetPwMsg.resetpw_weak
)
def test_post_reset_password_secure_email_timeout(self):
self.app.conf.email_code_timeout = 0
response = self._post_reset_password_secure_phone()
self._check_error_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_FAIL',
msg=ResetPwMsg.expired_email_code,
)
def test_post_reset_password_secure_phone_timeout(self):
self.app.conf.phone_code_timeout = 0
response = self._post_reset_password_secure_phone()
self._check_error_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_FAIL',
msg=ResetPwMsg.expired_phone_code,
)
def test_post_reset_password_secure_phone_custom(self):
data2 = {'password': '<PASSWORD>'}
response = self._post_reset_password_secure_phone(data2=data2)
self._check_success_response(
response,
type_='POST_RESET_PASSWORD_NEW_PASSWORD_EXTRA_SECURITY_PHONE_SUCCESS',
msg=ResetPwMsg.pw_reset_success,
)
# check that the password is marked as generated
user = self.app.private_userdb.get_user_by_eppn(self.test_user_eppn)
self.assertFalse(user.credentials.to_list()[0].is_generated)
def test_get_code_backdoor(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
resp = self._get_email_code_backdoor()
state = self.app.password_reset_state_db.get_state_by_eppn(self.test_user_eppn)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, state.email_code.code.encode('ascii'))
def test_get_code_no_backdoor_in_pro(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'pro'
resp = self._get_email_code_backdoor()
self.assertEqual(resp.status_code, 400)
def test_get_code_no_backdoor_misconfigured1(self):
self.app.conf.magic_cookie | |
[
{
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
},
]
}
"""
pass
def create_instances_from_snapshot(instanceNames=None, availabilityZone=None, instanceSnapshotName=None, bundleId=None, userData=None, keyPairName=None):
"""
Uses a specific snapshot as a blueprint for creating one or more new instances that are based on that identical configuration.
See also: AWS API Documentation
:example: response = client.create_instances_from_snapshot(
instanceNames=[
'string',
],
availabilityZone='string',
instanceSnapshotName='string',
bundleId='string',
userData='string',
keyPairName='string'
)
:type instanceNames: list
:param instanceNames: [REQUIRED]
The names for your new instances.
(string) --
:type availabilityZone: string
:param availabilityZone: [REQUIRED]
The Availability Zone where you want to create your instances. Use the following formatting: us-east-1a (case sensitive). You can get a list of availability zones by using the get regions operation. Be sure to add the include availability zones parameter to your request.
:type instanceSnapshotName: string
:param instanceSnapshotName: [REQUIRED]
The name of the instance snapshot on which you are basing your new instances. Use the get instance snapshots operation to return information about your existing snapshots.
:type bundleId: string
:param bundleId: [REQUIRED]
The bundle of specification information for your virtual private server (or instance ), including the pricing plan (e.g., micro_1_0 ).
:type userData: string
:param userData: You can create a launch script that configures a server with additional user data. For example, apt-get y update .
Note
Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum , Debian and Ubuntu use apt-get , and FreeBSD uses pkg . For a complete list, see the Dev Guide .
:type keyPairName: string
:param keyPairName: The name for your key pair.
:rtype: dict
:return: {
'operations': [
{
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
},
]
}
"""
pass
def create_key_pair(keyPairName=None):
"""
Creates sn SSH key pair.
See also: AWS API Documentation
:example: response = client.create_key_pair(
keyPairName='string'
)
:type keyPairName: string
:param keyPairName: [REQUIRED]
The name for your new key pair.
:rtype: dict
:return: {
'keyPair': {
'name': 'string',
'arn': 'string',
'supportCode': 'string',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'fingerprint': 'string'
},
'publicKeyBase64': 'string',
'privateKeyBase64': 'string',
'operation': {
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
}
}
"""
pass
def delete_domain(domainName=None):
"""
Deletes the specified domain recordset and all of its domain records.
See also: AWS API Documentation
:example: response = client.delete_domain(
domainName='string'
)
:type domainName: string
:param domainName: [REQUIRED]
The specific domain name to delete.
:rtype: dict
:return: {
'operation': {
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
}
}
"""
pass
def delete_domain_entry(domainName=None, domainEntry=None):
"""
Deletes a specific domain entry.
See also: AWS API Documentation
:example: response = client.delete_domain_entry(
domainName='string',
domainEntry={
'id': 'string',
'name': 'string',
'target': 'string',
'type': 'string',
'options': {
'string': 'string'
}
}
)
:type domainName: string
:param domainName: [REQUIRED]
The name of the domain entry to delete.
:type domainEntry: dict
:param domainEntry: [REQUIRED]
An array of key-value pairs containing information about your domain entries.
id (string) --The ID of the domain recordset entry.
name (string) --The name of the domain.
target (string) --The target AWS name server (e.g., ns-111.awsdns-22.com. ).
type (string) --The type of domain entry (e.g., SOA or NS ).
options (dict) --The options for the domain entry.
(string) --
(string) --
:rtype: dict
:return: {
'operation': {
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
}
}
"""
pass
def delete_instance(instanceName=None):
"""
Deletes a specific Amazon Lightsail virtual private server, or instance .
See also: AWS API Documentation
:example: response = client.delete_instance(
instanceName='string'
)
:type instanceName: string
:param instanceName: [REQUIRED]
The name of the instance to delete.
:rtype: dict
:return: {
'operations': [
{
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
},
]
}
"""
pass
def delete_instance_snapshot(instanceSnapshotName=None):
"""
Deletes a specific snapshot of a virtual private server (or instance ).
See also: AWS API Documentation
:example: response = client.delete_instance_snapshot(
instanceSnapshotName='string'
)
:type instanceSnapshotName: string
:param instanceSnapshotName: [REQUIRED]
The name of the snapshot to delete.
:rtype: dict
:return: {
'operations': [
{
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
},
]
}
"""
pass
def delete_key_pair(keyPairName=None):
"""
Deletes a specific SSH key pair.
See also: AWS API Documentation
:example: response = client.delete_key_pair(
keyPairName='string'
)
:type keyPairName: string
:param keyPairName: [REQUIRED]
The name of the key pair to delete.
:rtype: dict
:return: {
'operation': {
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
}
}
"""
pass
def detach_static_ip(staticIpName=None):
"""
Detaches a static IP from the Amazon Lightsail instance to which it is attached.
See also: AWS API Documentation
:example: response = client.detach_static_ip(
staticIpName='string'
)
:type staticIpName: string
:param staticIpName: [REQUIRED]
The name of the static IP to detach from the instance.
:rtype: dict
:return: {
'operations': [
{
'id': 'string',
'resourceName': 'string',
'resourceType': 'Instance'|'StaticIp'|'KeyPair'|'InstanceSnapshot'|'Domain'|'PeeredVpc',
'createdAt': datetime(2015, 1, 1),
'location': {
'availabilityZone': 'string',
'regionName': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'eu-central-1'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'
},
'isTerminal': True|False,
'operationDetails': 'string',
'operationType': 'DeleteInstance'|'CreateInstance'|'StopInstance'|'StartInstance'|'RebootInstance'|'OpenInstancePublicPorts'|'PutInstancePublicPorts'|'CloseInstancePublicPorts'|'AllocateStaticIp'|'ReleaseStaticIp'|'AttachStaticIp'|'DetachStaticIp'|'UpdateDomainEntry'|'DeleteDomainEntry'|'CreateDomain'|'DeleteDomain'|'CreateInstanceSnapshot'|'DeleteInstanceSnapshot'|'CreateInstancesFromSnapshot',
'status': 'NotStarted'|'Started'|'Failed'|'Completed',
'statusChangedAt': datetime(2015, 1, 1),
'errorCode': 'string',
'errorDetails': 'string'
},
]
}
"""
pass
def download_default_key_pair():
"""
Downloads the default SSH key pair from the user's account.
See also: AWS API Documentation
:example: response = client.download_default_key_pair()
:rtype: dict
:return: {
'publicKeyBase64': 'string',
'privateKeyBase64': 'string'
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_active_names(pageToken=None):
"""
Returns the names of all active (not deleted) resources.
See also: AWS API Documentation
:example: response = client.get_active_names(
pageToken='string'
)
:type pageToken: string
:param pageToken: A token used for paginating results from your get active names request.
:rtype: dict
:return: {
'activeNames': [
'string',
],
'nextPageToken': 'string'
}
"""
pass
def get_blueprints(includeInactive=None, pageToken=None):
"""
Returns the list of available instance images, or blueprints . You can use a blueprint to create a new virtual private server already | |
now contains the list of 'good' tilelocid
#read in the big combined data file
dz = Table.read(zf)
#find the rows that satisfy the target type
wtype = ((dz[desitarg] & bit) > 0)
if notqso == 'notqso':
print('removing QSO targets')
wtype &= ((dz[desitarg] & qsobit) == 0)
#find the rows that are 'good' tilelocid
wg = np.isin(dz['TILELOCID'],gtl)
print(len(dz[wtype]))
print(len(dz[wg]))
#down-select to target type of interest and good tilelocid
dz = dz[wtype&wg]
print('length after selecting type and fiberstatus == 0 '+str(len(dz)))
print('length of unique targetid after selecting type and fiberstatus == 0 '+str(len(np.unique(dz['TARGETID']))))
#find targets that were never available at the same location as a target of the same type that got assigned to a good location
#those that were never available are assumed to have 0 probability of assignment so we want to veto this location
lznp = find_znotposs(dz)
wk = ~np.isin(dz['TILELOCID'],lznp)#dz['ZPOSS'] == 1
dz = dz[wk] #0 probability locations now vetoed
print('length after priority veto '+str(len(dz)))
print('joining to full imaging')
ftar = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+pd+'_targets.fits')
ftar.keep_columns(['TARGETID','EBV','FLUX_G','FLUX_R','FLUX_Z','FLUX_IVAR_G','FLUX_IVAR_R','FLUX_IVAR_Z','MW_TRANSMISSION_G','MW_TRANSMISSION_R',\
'MW_TRANSMISSION_Z','FRACFLUX_G','FRACFLUX_R','FRACFLUX_Z','FRACMASKED_G','FRACMASKED_R','FRACMASKED_Z','FRACIN_G','FRACIN_R',\
'FRACIN_Z','NOBS_G','NOBS_R','NOBS_Z','PSFDEPTH_G','PSFDEPTH_R','PSFDEPTH_Z','GALDEPTH_G','GALDEPTH_R','GALDEPTH_Z','FLUX_W1',\
'FLUX_W2','FLUX_IVAR_W1','FLUX_IVAR_W2','MW_TRANSMISSION_W1','MW_TRANSMISSION_W2','ALLMASK_G','ALLMASK_R','ALLMASK_Z','FIBERFLUX_G',\
'FIBERFLUX_R','FIBERFLUX_Z','FIBERTOTFLUX_G','FIBERTOTFLUX_R','FIBERTOTFLUX_Z','WISEMASK_W1','WISEMASK_W2','MASKBITS',\
'RELEASE','BRICKID','BRICKNAME','BRICK_OBJID','MORPHTYPE','PHOTSYS'])
dz = join(dz,ftar,keys=['TARGETID'])
print('length after join to full targets (should be same) '+str(len(dz)))
#apply imaging veto mask
dz = cutphotmask(dz,imbits)
#load in file with information about where repeats occurred and join it
dtl = Table.read(ftiles)
dtl.keep_columns(['TARGETID','NTILE','TILES','TILELOCIDS'])
dz = join(dz,dtl,keys='TARGETID')
#find the rows where we have spectroscopic observations
wz = dz['ZWARN'] != 999999 #this is what the null column becomes
wz &= dz['ZWARN']*0 == 0 #just in case of nans
#mark them as having LOCATION_ASSIGNED
dz['LOCATION_ASSIGNED'] = np.zeros(len(dz)).astype('bool')
dz['LOCATION_ASSIGNED'][wz] = 1
#find the TILELOCID that were assigned and mark them as so
tlids = np.unique(dz['TILELOCID'][wz])
wtl = np.isin(dz['TILELOCID'],tlids)
dz['TILELOCID_ASSIGNED'] = 0
dz['TILELOCID_ASSIGNED'][wtl] = 1
print('number of unique targets at assigned tilelocid:')
print(len(np.unique(dz[wtl]['TARGETID'])))
#get OII flux info for ELGs
if tp == 'ELG' or tp == 'ELG_HIP':
if azf != '':
arz = fitsio.read(azf,columns=[fbcol,'TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR','SUBSET','DELTACHI2'])
st = []
for i in range(0,len(arz)):
st.append(arz['SUBSET'][i][:4])
st = np.array(st)
wg = arz[fbcol] == 0
wg &= st == "thru"
arz = arz[wg]
o2c = np.log10(arz['OII_FLUX'] * np.sqrt(arz['OII_FLUX_IVAR']))+0.2*np.log10(arz['DELTACHI2'])
w = (o2c*0) != 0
w |= arz['OII_FLUX'] < 0
o2c[w] = -20
#arz.keep_columns(['TARGETID','LOCATION','TILEID','o2c','OII_FLUX','OII_SIGMA'])#,'Z','ZWARN','TSNR2_ELG'])
arz = Table(arz)
arz['o2c'] = o2c
dz = join(dz,arz,keys=['TARGETID','LOCATION','TILEID'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['', '_OII'])
dz.remove_columns(['SUBSET','DELTACHI2_OII',fbcol+'_OII'])
print('check length after merge with OII strength file:' +str(len(dz)))
if tp[:3] == 'QSO':
if azf != '':
arz = Table.read(azf)
arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','ZERR','Z_QN'])
print(arz.dtype.names)
#arz['TILE'].name = 'TILEID'
dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF'])
dz['Z'].name = 'Z_RR' #rename the original redrock redshifts
dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead
#sort and then cut to unique targetid; sort prioritizes observed targets and then TSNR2
dz['sort'] = dz['LOCATION_ASSIGNED']*dz[tscol]+dz['TILELOCID_ASSIGNED']
dz.sort('sort')
dz = unique(dz,keys=['TARGETID'],keep='last')
if tp == 'ELG' or tp == 'ELG_HIP':
print('number of masked oII row (hopefully matches number not assigned) '+ str(np.sum(dz['o2c'].mask)))
if tp == 'QSO':
print('number of good z according to qso file '+str(len(dz)-np.sum(dz['Z'].mask)))
print('length after cutting to unique targetid '+str(len(dz)))
print('LOCATION_ASSIGNED numbers')
print(np.unique(dz['LOCATION_ASSIGNED'],return_counts=True))
print('TILELOCID_ASSIGNED numbers')
print(np.unique(dz['TILELOCID_ASSIGNED'],return_counts=True))
probl = np.zeros(len(dz))
#get completeness based on unique sets of tiles
compa = []
tll = []
ti = 0
print('getting completenes')
#sorting by tiles makes things quicker with while statements below
dz.sort('TILES')
nts = len(np.unique(dz['TILES']))
tlsl = dz['TILES']
tlslu = np.unique(tlsl)
laa = dz['LOCATION_ASSIGNED']
i = 0
while i < len(dz):
tls = []
tlis = []
nli = 0
nai = 0
while tlsl[i] == tlslu[ti]:
nli += 1 #counting unique targetids within the given TILES value
nai += laa[i] #counting the number assigned
i += 1
if i == len(dz):
break
if ti%1000 == 0:
print('at tiles '+str(ti)+' of '+str(nts))
cp = nai/nli #completeness is number assigned over number total
compa.append(cp)
tll.append(tlslu[ti])
ti += 1
#turn the above into a dictionary and apply it
comp_dicta = dict(zip(tll, compa))
fcompa = []
for tl in dz['TILES']:
fcompa.append(comp_dicta[tl])
dz['COMP_TILE'] = np.array(fcompa)
wc0 = dz['COMP_TILE'] == 0
print('number of targets in 0 completeness regions '+str(len(dz[wc0])))
#get counts at unique TILELOCID
locl,nlocl = np.unique(dz['TILELOCID'],return_counts=True)
#do same after cutting to only the data with location_assigned
wz = dz['LOCATION_ASSIGNED'] == 1
dzz = dz[wz]
loclz,nloclz = np.unique(dzz['TILELOCID'],return_counts=True)
natloc = ~np.isin(dz['TILELOCID'],loclz)
print('number of unique targets left around unassigned locations is '+str(np.sum(natloc)))
locs = np.copy(dz['TILELOCID'])
#
#
print('reassigning TILELOCID for duplicates and finding rosette')
#re-assigning "naked" targets; if we gave a targetid a tilelocid that was not assigned
#by the same target was available at a location that was assigned, we re-assign its tilelocid
nch = 0
nbl = 0
tlids = dz['TILELOCIDS']
ros = np.zeros(len(dz))
rosr = np.zeros(len(dz))
for ii in range(0,len(dz['TILEID'])): #not sure why, but this only works when using loop for Table.read but array option works for fitsio.read
ti = dz[ii]['TILEID']
rosn = tile2rosette(ti) #get rosette id
rosr[ii] = calc_rosr(rosn,dz[ii]['RA'],dz[ii]['DEC']) #calculates distance in degrees from rosette center
ros[ii] = rosn
if natloc[ii]:# == False:
nbl += 1
s = 0
tids = tlids[ii].split('-')
if s == 0:
for tl in tids:
ttlocid = int(tl)
if np.isin(ttlocid,loclz):
locs[ii] = ttlocid
nch += 1
s = 1
break
if ii%10000 == 0:
print(ii,len(dz['TILEID']),ti,ros[ii],nch,nbl)
dz['TILELOCID'] = locs
#get numbers again after the re-assignment
locl,nlocl = np.unique(dz['TILELOCID'],return_counts=True)
loclz,nloclz = np.unique(dzz['TILELOCID'],return_counts=True)
dz['rosette_number'] = ros
dz['rosette_r'] = rosr
print('rosette number and the number on each rosette')
print(np.unique(dz['rosette_number'],return_counts=True))
print('getting fraction assigned for each tilelocid')
#should be one (sometimes zero, though) assigned target at each tilelocid and we are now counting how many targets there are per tilelocid
#probability of assignment is then estimated as 1/n_tilelocid
nm = 0
nmt =0
pd = []
for i in range(0,len(locl)):
if i%10000 == 0:
print('at row '+str(i))
nt = nlocl[i]
loc = locl[i]
w = loclz == loc
nz = 0
if len(loclz[w]) == 1:
nz = nloclz[w] #these are supposed all be 1...
else:
nm += 1.
nmt += nt
if len(loclz[w]) > 1:
print('why is len(loclz[w]) > 1?') #this should never happen
pd.append((loc,nz/nt))
pd = dict(pd)
for i in range(0,len(dz)):
probl[i] = pd[dz['TILELOCID'][i]]
print('number of fibers with no observation, number targets on those fibers')
print(nm,nmt)
dz['FRACZ_TILELOCID'] = probl
print('sum of 1/FRACZ_TILELOCID, 1/COMP_TILE, and length of input; dont quite match because some tilelocid still have 0 assigned')
print(np.sum(1./dz[wz]['FRACZ_TILELOCID']),np.sum(1./dz[wz]['COMP_TILE']),len(dz),len(dz[wz]))
#dz['WEIGHT_ZFAIL'] = np.ones(len(dz))
oct = np.copy(dz['COMP_TILE'])
if bitweightfile is not None:
fb = fitsio.read(bitweightfile)
dz = join(dz,fb,keys=['TARGETID'])
wz = dz['LOCATION_ASSIGNED'] == 1 #join re-ordered array, reget mask for assigned locations and check comp_tile
print('length after join with bitweight file and sum of 1/comp_tile',len(dz),np.sum(1./dz[wz]['COMP_TILE']),len(dz[wz]))
#print('check comp_tile array',np.array_equal(oct,dz['COMP_TILE']))
#for debugging writeout
for col in dz.dtype.names:
to = Table()
to[col] = dz[col]
#print(col)
try:
to.write('temp.fits',format='fits', overwrite=True)
except:
print(col+' failed!')
dz.write(outf,format='fits', overwrite=True)
def mkclusdat(fl,weightmd='tileloc',zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=None,ntilecut=0,ccut=None,ebits=None,nreal=128):
'''
fl is the root of the input/output file
weighttileloc determines whether to include 1/FRACZ_TILELOCID as a completeness weight
zmask determines whether to apply a mask at some given redshift
tp is the target type
dchi2 is the threshold for keeping as a good redshift
tnsrcut determines where to mask based on the tsnr2 value (defined below per tracer)
'''
ff = Table.read(fl+'full_noveto.dat.fits')
if ebits is not None:
print('number before imaging mask '+str(len(ff)))
ff = cutphotmask(ff,ebits)
print('number after imaging mask '+str(len(ff)))
ff.write(fl+'full.dat.fits',overwrite=True,format='fits')
wzm = ''
if zmask:
wzm = 'zmask_'
if rcut is not None:
wzm += 'rmin'+str(rcut[0])+'rmax'+str(rcut[1])+'_'
if ntilecut > 0:
wzm += 'ntileg'+str(ntilecut)+'_'
if ccut is not None:
wzm += ccut+'_' #you could change this to however you want the file names to turn out
if ccut == 'main':
if tp != 'LRG':
print('this is only defined for LRGs!' )
else:
lrgmaintar = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/LRGtargetsDR9v1.1.1.fits',columns=['TARGETID'])
sel = np.isin(ff['TARGETID'],lrgmaintar['TARGETID'])
print('numbers before/after cut:')
print(len(ff),len(ff[sel]))
ff = ff[sel]
ff.write(fl+wzm+'full.dat.fits',format='fits',overwrite='True')
'''
This is where redshift failure weights go
'''
ff['WEIGHT_ZFAIL'] = np.ones(len(ff))
#The LRGs just have this fairly ad hoc model that AJR fit in the notebook, definitely needs refinement/automation
if tp == 'LRG':
fibfluxz = ff['FIBERFLUX_Z']/ff['MW_TRANSMISSION_Z']
| |
<filename>psychic/plots.py
#encoding=utf-8
import matplotlib.pyplot as plt
import numpy as np
from .scalpplot import plot_scalp
from .positions import POS_10_5
from .markers import markers_to_events
import psychic
import scipy
import matplotlib
import matplotlib.pyplot as plot
import matplotlib.ticker as ticker
from matplotlib.lines import Line2D
from matplotlib import mlab
import matplotlib.transforms as transforms
import math
from . import trials
from . import stat
def plot_timeseries(frames, time=None, offset=None, color='k', linestyle='-'):
frames = np.asarray(frames)
if offset is None:
offset = np.max(np.std(frames, axis=0)) * 3
if time is None:
time = np.arange(frames.shape[0])
plt.plot(time, frames - np.mean(frames, axis=0) +
np.arange(frames.shape[1]) * offset, color=color, ls=linestyle)
def plot_scalpgrid(scalps, sensors, locs=None, width=None,
clim=None, cmap=None, titles=None, smark='k.', plot_contour=True):
'''
Plots a grid with scalpplots. Scalps contains the different scalps in the
rows, sensors contains the names for the columns of scalps, locs is a dict
that maps the sensor-names to locations.
Width determines the width of the grid that contains the plots. Cmap selects
a colormap, for example plt.cm.RdBu_r is very useful for AUC-ROC plots.
Clim is a list containing the minimim and maximum value mapped to a color.
Titles is an optional list with titles for each subplot.
Returns a list with subplots for further manipulation.
'''
scalps = np.asarray(scalps)
assert scalps.ndim == 2
nscalps = scalps.shape[0]
subplots = []
if not width:
width = int(min(8, np.ceil(np.sqrt(nscalps))))
height = int(np.ceil(nscalps/float(width)))
if not clim:
cmax = np.max(np.abs(scalps))
clim = [-cmax, cmax]
plt.clf()
for i in range(nscalps):
subplots.append(plt.subplot(height, width, i + 1))
plot_scalp(scalps[i], sensors, locs, clim=clim, cmap=cmap, smark=smark, plot_contour=plot_contour)
if titles:
plt.title(titles[i])
# plot colorbar next to last scalp
bb = plt.gca().get_position()
plt.colorbar(cax=plt.axes([bb.xmax + bb.width/10, bb.ymin, bb.width/10,
bb.height]), ticks=np.linspace(clim[0], clim[1], 5).round(2))
return subplots
def _draw_eeg_frame(
num_channels,
vspace,
timeline,
feat_lab=None,
mirror_y=False,
draw_scale=True):
axes = plot.gca()
plot.xlim([np.min(timeline), np.max(timeline)])
plot.ylim([-0.75*vspace, num_channels*vspace - 0.25*vspace])
plot.grid()
majorLocator = ticker.FixedLocator(vspace*np.arange(num_channels))
axes.yaxis.set_major_locator(majorLocator)
if feat_lab:
majorFormatter = ticker.FixedFormatter(feat_lab[::-1])
axes.yaxis.set_major_formatter(majorFormatter)
if draw_scale:
# Draw scale
trans = transforms.blended_transform_factory(axes.transAxes, axes.transData)
scale_top = vspace/2.0 # In data coordinates
scale_bottom = -vspace/2.0 # In data coordinates
scale_xpos = 1.02 # In axes coordinates
scale = Line2D(
[scale_xpos-0.01, scale_xpos+0.01, scale_xpos, scale_xpos, scale_xpos-0.01, scale_xpos+0.01],
[scale_top, scale_top, scale_top, scale_bottom, scale_bottom, scale_bottom],
transform=trans, linewidth=1, color='k')
scale.set_clip_on(False)
axes.add_line(scale)
axes.text(scale_xpos+0.02, 0, '%.4g \u00B5V' % vspace,
transform=trans, va='center')
axes.text(scale_xpos+0.02, scale_top, '+' if not mirror_y else '-', transform=trans, va='center')
axes.text(scale_xpos+0.02, scale_bottom, '-' if not mirror_y else '+', transform=trans, va='center')
for y in (vspace * np.arange(num_channels)):
plot.axhline(y, color='k', linewidth=1, alpha=0.25)
plot.gcf().subplots_adjust(right=0.85)
def plot_eeg(
data,
samplerate=None,
vspace=None,
draw_markers=True,
mirror_y=False,
fig=None,
mcolors=['b', 'r', 'g', 'c', 'm', 'y', 'k', '#ffaa00'],
mlinestyles=['-','-','-','-','-','-','-','-'],
mlinewidths=[1,1,1,1,1,1,1,1],
start=0):
'''
Plot EEG data contained in a golem dataset.
Parameters
----------
data : :class:`psychic.DataSet`
The data to plot. Assumed to be continuous data (channels x time)
samplerate : float (optional)
The sample rate of the data. When omitted,
:func:`psychic.get_samplerate` is used to estimate it.
vspace : float (optional)
The amount of vertical spacing between channels. When omitted, the
minimum value is taken so that no channels overlap.
draw_markers : bool (default=True)
When set, event markers are drawn as vertical lines in the plot.
mirror_y : bool (default=False)
When set, negative is plotted up. Some publications use this style
of plotting.
fig : :class:`matplotlib.Figure` (optional)
Normally, a new figure is created to hold the plot. However, the user
can specify the figure in which to draw. This is useful if the user
wants to remain in control of the size of the figure and the location
of the axes.
mcolors : list (optional)
Sets a color for each marker type. The vertical lines and text labels for
events of a given type will be drawn in the specified color. Values are given
as matplotlib color specifications.
See: http://matplotlib.org/api/colors_api.html
mlinestyles : list (optional)
Line style specifications for each marker type.
See: http://matplotlib.org/1.3.0/api/pyplot_api.html#matplotlib.pyplot.plot
mlinewidths : list (optional)
Line width specifications for each marker type. Vertical lines are
drawn at the specified widths. Values are given in points.
start : float (default=0)
Time which is to be taken as t=0. Normally when plotting a time range,
the time axis will reflect absolute time. For example, when plotting
the time range 2 to 4 seconds, the time axis will start at 2 seconds.
Setting the ``start`` parameter to 2 will in this case make the time
axis range from 0 to 2 seconds, and setting this parameter to 3 will
make the time axis range from -1 to 1 seconds.
Returns
-------
fig : :class:`matplotlib.Figure`
The figure object containing the plot. When a figure is specified with
the ``fig`` parameter, the same figure is returned.
'''
assert data.data.ndim == 2
num_channels, num_samples = data.data.shape
# Spread out the channels
if vspace is None:
vspace = np.max(np.max(data.data, axis=1) - np.min(data.data, axis=1))
bases = vspace * np.arange(0, num_channels)[::-1] - np.mean(data.data, axis=1)
to_plot = data.data + np.tile( bases, (num_samples,1) ).T
if fig is None:
fig = plot.figure()
# Plot EEG
fig.subplots_adjust(right=0.85)
axes = plot.subplot(111)
_draw_eeg_frame(num_channels, vspace, data.ids.T-start, data.feat_lab[0], mirror_y)
plot.plot(data.ids.T-start, to_plot.T)
# Draw markers
if draw_markers:
trans = transforms.blended_transform_factory(axes.transData, axes.transAxes)
events, offsets, _ = markers_to_events(data.labels[0,:])
eventi = {}
for i,e in enumerate(np.unique(events)):
eventi[e] = i
for e,o in zip(events, offsets):
i = eventi[e]
x = data.ids[0,o] # In data coordinates
y = 1.01 # In axes coordinates
plot.axvline(x,
color=mcolors[i%len(mcolors)],
linestyle=mlinestyles[i%len(mlinestyles)],
linewidth=mlinewidths[i%len(mlinewidths)])
plot.text(x, y, str(e), transform=trans, ha='center', va='bottom')
plot.ylabel('Channels')
plot.xlabel('Time (s)')
plot.grid()
return fig
def plot_specgrams(
data,
samplerate=None,
NFFT=256,
freq_range=[0.1, 50],
fig=None):
''' For each channel, plot a spectogram. '''
if fig is None:
fig = plot.figure()
if samplerate is None:
samplerate = psychic.get_samplerate(data)
num_channels = data.nfeatures
num_cols = max(1, num_channels/8)
num_rows = min(num_channels, 8)
fig.subplots_adjust(hspace=0)
for channel in range(num_channels):
col = channel / num_rows
row = channel % num_rows
ax = plot.subplot(num_rows, num_cols, num_cols*row+col+1)
s,freqs,_,_ = plot.specgram(data.data[channel,:], NFFT, samplerate, noverlap=NFFT/2, xextent=(np.min(data.ids), np.max(data.ids)))
selection = np.logical_and(freqs >= freq_range[0], freqs <= freq_range[1])
s = s[selection,:]
freqs = freqs[selection]
plot.ylim(freq_range[0], freq_range[1])
plot.clim(np.min(np.log(s)), np.max(np.log(s)))
ax.xaxis.grid(True, which='major', color='w')
ax.yaxis.grid(False)
if data.feat_lab is not None:
plot.ylabel(data.feat_lab[channel])
else:
plot.ylabel('CH%02d' % (channel+1))
if row == num_rows-1 or channel == num_channels-1:
plot.xlabel('Time (s)')
else:
[label.set_visible(False) for label in ax.get_xticklabels()]
[tick.set_visible(False) for tick in ax.get_xticklines()]
return fig
def plot_erp(
d,
samplerate=None,
classes=None,
vspace=None,
cl_lab=None,
ch_lab=None,
draw_scale=True,
ncols=None,
start=0,
fig=None,
mirror_y=False,
colors=['b', 'r', 'g', 'c', 'm', 'y', 'k', '#ffaa00'],
linestyles=['-','-','-','-','-','-','-','-'],
linewidths=[1, 1, 1, 1, 1, 1, 1, 1],
pval=0.05,
fwer=None,
np_test=False,
np_iter=1000,
conf_inter=None,
enforce_equal_n=True,
):
'''
Create an Event Related Potential plot which aims to be as informative as
possible. The result is aimed to be a publication ready figure, therefore
this function supplies a lot of customization. The input can either be a
sliced dataset (``d.data`` = [channels x samples x trials]) or a readily computed
ERP given by :class:`psychic.nodes.ERP` or :func:`psychic.erp`.
When possible, regions where ERPs differ significantly are shaded. This is
meant to be an early indication of area's of interest and not meant as
sound statistical evidence of an actual difference. When a sliced dataset
is given, which contains two classes (or two classes are specified using
the ``classes`` parameter) t-tests are performed for each sample.
Significant sections (see the ``pval`` parameter) are drawn shaded.
P-values are corrected using the Benjamini-Hochberg method. See the
``fwer`` parameter for other corrections (or to disable it). See the
``np_test`` parameter for a better (but slower) non-parametric test to
determine significant regions.
Parameters
----------
d : :class:`psychic.DataSet`
A sliced Golem dataset that will be displayed.
classes : list (default=all)
When specified, ERPs will be drawn only for the classes with the given
indices.
vspace : float (optional)
Amount of vertical space between the ERP traces, by default the minumum
value so traces don't overlap.
samplerate : float (optional)
By default determined through ``d.feat_lab[1]``, but can be
specified when missing.
cl_lab : list (optional)
List with a label for each class, by default taken from
``d.cl_lab``, but can be specified if missing.
ch_lab : list (optional)
List of channel labels, by default taken from ``d.feat_lab[0]``,
but can be specified if missing.
draw_scale : bool (default=True)
Whether to draw a scale next to the plot.
| |
<gh_stars>1-10
"""Bookmarks Converter, is a package that converts the webpage bookmarks
from DB/HTML/JSON to DB/HTML/JSON.
The DB files supported are custom (self made) sqlite database files,
to see the exact format of the database you can check the .db file found
in the data folder.
The HTML files supported are Netscape-Bookmark files from either Chrome or
Firefox. The output HTML files adhere to the firefox format.
The JSON files supported are the Chrome bookmarks file, the Firefox
.json bookmarks export file, and the custom json file created by this package"""
import json
import re
import time
from pathlib import Path
from bs4 import BeautifulSoup, Tag
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .models import Base, Bookmark, HTMLBookmark, JSONBookmark
class DBMixin:
"""Mixing containing all the DB related functions."""
def _parse_db(self):
"""Import the DB bookmarks file into self._tree as an object."""
database_path = f"sqlite:///{str(self.filepath)}"
engine = create_engine(database_path, encoding="utf-8")
Session = sessionmaker(bind=engine)
session = Session()
self._tree = session.query(Bookmark).get(1)
def _convert_to_db(self):
"""Convert the imported bookmarks to database objects."""
self.bookmarks = []
self._stack = [self._tree]
while self._stack:
self._stack_item = self._stack.pop()
self._iterate_folder_db()
def _iterate_folder_db(self):
"""Iterate through each item in the hierarchy tree and create
a database object, appending any folders that contain children to
the stack for further processing."""
folder = self._stack_item._convert_folder_to_db()
self.bookmarks.append(folder)
parent_id = folder.id
for child in self._stack_item:
child.parent_id = parent_id
if child.type == "folder":
if child.children:
self._stack.append(child)
else:
folder = child._convert_folder_to_db()
self.bookmarks.append(folder)
else:
url = child._convert_url_to_db()
self.bookmarks.append(url)
def _save_to_db(self):
"""Function to export the bookmarks as SQLite3 DB."""
database_path = f"sqlite:///{str(self.output_filepath.with_suffix('.db'))}"
engine = create_engine(database_path, encoding="utf-8")
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
session.commit()
session.bulk_save_objects(self.bookmarks)
session.commit()
class HTMLMixin:
"""Mixing containing all the HTML related functions."""
def _parse_html(self):
"""Imports the HTML Bookmarks file into self._tree as a modified soup
object using the TreeBuilder class HTMLBookmark, which adds property
access to the html attributes of the soup object."""
self.format_html_file(self.filepath, self.temp_filepath)
with open(self.temp_filepath, "r", encoding="utf-8") as file_:
soup = BeautifulSoup(
markup=file_,
features="html.parser",
from_encoding="Utf-8",
element_classes={Tag: HTMLBookmark},
)
self.temp_filepath.unlink()
HTMLBookmark.reset_id_counter()
tree = soup.find("h3")
self._restructure_root(tree)
self._add_index()
@staticmethod
def format_html_file(filepath, output_filepath):
"""Takes in an absolute path to a HTML Bookmarks file, it creates a new
Bookmarks file with the text "output_" prepended to the filename.
where;
- The main "<H1>" tag is converted to "<H3>" and acts as the root folder
- All "<DT>" tags are removed.
- "<H3>" acts as folders and list containers instead of "<DL>".
- All "<H3>" and "<A>" tag's inner text are added as a "title"
attribute within the html element.
filepath: str
absolute path to bookmarks html file.
output_filepath: str
absolute path and name for output file."""
with open(filepath, "r", encoding="utf-8") as input_file, open(
output_filepath, "w", encoding="utf-8"
) as output_file:
# regex to select an entire H1/H3/A HTML element
element = re.compile(r"(<(H1|H3|A))(.*?(?=>))>(.*)(<\/\2>)\n")
for line in input_file:
if "<DL><p>" in line:
continue
line = element.sub(r'\1\3 TITLE="\4">\5', line)
line = (
line.replace("<DT>", "")
.replace("<H1", "<H3")
.replace("</H1>", "")
.replace("</H3>", "")
.replace("</DL><p>\n", "</H3>")
.replace("\n", "")
.strip()
)
output_file.write(line)
def _restructure_root(self, tree):
"""Restructure the root of the HTML parsed tree to allow for an easier
processing.
If the tree title is 'Bookmarks Menu' we need to extract the two folders
'Bookmarks Toolbar' and 'Other Bookmarks', then insert them into the
root folders children.
If the tree title is 'Bookmarks' we need to extract the 'Bookmarks bar'
folder and insert it at the beginning of the root children. Then we need
to rename the 'Bookmarks' folder to 'Other Bookmarks'.
tree: :class: `bs4.element.Tag`
BeautifulSoup object containing the first <H3> tag found in the
html file."""
self._tree = HTMLBookmark(
name="h3",
attrs={
"id": 1,
"index": 0,
"parent_id": 0,
"title": "root",
"date_added": round(time.time() * 1000),
},
)
self._tree.children.append(tree)
if tree.title == "Bookmarks Menu":
for i, child in enumerate(tree):
if child.title in ("Bookmarks Toolbar", "Other Bookmarks"):
self._tree.children.append(tree.children.pop(i))
elif tree.title == "Bookmarks":
tree.title = "Other Bookmarks"
for i, child in enumerate(tree):
if child.title == "Bookmarks bar":
self._tree.children.insert(0, tree.children.pop(i))
break
def _convert_to_html(self):
"""Convert the imported bookmarks to HTML."""
header = """<!DOCTYPE NETSCAPE-Bookmark-file-1>
<!-- This is an automatically generated file.
It will be read and overwritten.
DO NOT EDIT! -->
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
<TITLE>Bookmarks</TITLE>
<H1>Bookmarks Menu</H1>
<DL><p>
"""
footer = "</DL>"
self._stack = self._tree.children[::-1]
body = []
while self._stack:
self._stack_item = self._stack.pop()
folder = self._iterate_folder_html()
if folder:
self._create_placeholder(body, folder)
self.bookmarks = "".join([header, *body, footer])
def _iterate_folder_html(self):
"""Iterate through each item in the hierarchy tree and convert it to
HTML. If a folder has children, it is added to the stack and a
placeholder is left in its place so it can be inserted back to its
position after processing."""
folder = [self._stack_item._convert_folder_to_html(), "<DL><p>\n"]
list_end = "</DL><p>\n"
for child in self._stack_item:
if child.type == "folder":
item = f"<folder{child.id}>"
self._stack.append(child)
else:
item = child._convert_url_to_html()
folder.append(item)
folder.append(list_end)
result = "".join(folder)
return result
def _create_placeholder(self, body, folder):
placeholder = f"<folder{self._stack_item.id}>"
if body and (placeholder in body[-1]):
body[-1] = body[-1].replace(placeholder, folder)
else:
body.append(folder)
def _save_to_html(self):
"""Export the bookmarks as HTML."""
output_file = self.output_filepath.with_suffix(".html")
with open(output_file, "w", encoding="utf-8") as file_:
file_.write(self.bookmarks)
class JSONMixin:
"""Mixing containing all the JSON related functions."""
def _parse_json(self):
"""Imports the JSON Bookmarks file into self._tree as a
JSONBookmark object."""
self.format_json_file(self.filepath, self.temp_filepath)
# with object_hook the json tree is loaded as JSONBookmark object tree.
with open(self.temp_filepath, "r", encoding="utf-8") as file_:
self._tree = json.load(file_, object_hook=self._json_to_object)
self.temp_filepath.unlink()
if self._tree.source == "Chrome":
self._add_index()
@staticmethod
def _json_to_object(jdict):
"""Helper function used as object_hook for json load."""
return JSONBookmark(**jdict)
@staticmethod
def format_json_file(filepath, output_filepath):
"""Reads Chrome/Firefox/Bookmarkie JSON bookmarks file (at filepath),
and modifies it to a standard format to allow for easy
parsing/converting.
Exporting the result to a new JSON file (output_filepath) with
a prefix of 'output_'."""
with open(filepath, "r", encoding="utf-8") as file_:
tree = json.load(file_)
if tree.get("checksum"):
tree = {
"name": "root",
"id": 0,
"index": 0,
"parent_id": 0,
"type": "folder",
"date_added": 0,
"children": list(tree.get("roots").values()),
}
tree["children"][1]["name"] = "Other Bookmarks"
elif tree.get("root"):
tree["title"] = "root"
folders = {
"menu": "Bookmarks Menu",
"toolbar": "Bookmarks Toolbar",
"unfiled": "Other Bookmarks",
"mobile": "Mobile Bookmarks",
}
for child in tree.get("children"):
child["title"] = folders[child.get("title")]
with open(output_filepath, "w", encoding="utf-8") as file_:
json.dump(tree, file_, ensure_ascii=False)
def _convert_to_json(self):
"""Convert the imported bookmarks to JSON."""
self._stack = []
self.bookmarks = self._tree._convert_folder_to_json()
self._stack.append((self.bookmarks, self._tree))
while self._stack:
self._stack_item = self._stack.pop()
folder, node = self._stack_item
children = folder.get("children")
for child in node:
if child.type == "folder":
item = child._convert_folder_to_json()
if child.children:
self._stack.append((item, child))
else:
item = child._convert_url_to_json()
children.append(item)
def _save_to_json(self):
"""Function to export the bookmarks as JSON."""
output_file = self.output_filepath.with_suffix(".json")
with open(output_file, "w", encoding="utf-8") as file_:
json.dump(self.bookmarks, file_, ensure_ascii=False)
class BookmarksConverter(DBMixin, HTMLMixin, JSONMixin):
"""Bookmarks Converter class that converts the bookmarks to DB/HTML/JSON,
using Iteration and Stack.
Usage:
1- Instantiate a class and pass in the filepath as string or `Path` object:
- `instance = BookmarksConverter(filepath)`.
2- Import and Parse the bookmarks file passing the source format as a string in lower case:
- `instance.parse("db")`, for a database file.
- `instance.parse("html")`, for a html file.
- `instance.parse("json")`, for a json file.
3- Convert the data to the desired format passing the format as a lower
case string:
- `instance.convert("db")`, convert to database.
- `instance.convert("html")`, convert to html.
- `instance.convert("json")`, convert to json.
4- At this point the bookmarks are stored in the `bookmarks` attribute
accessible through `instance.bookmarks`.
5- Export the bookmarks to a file using the save method `instance.save()`.
Parameters:
-----------
filepath : str or Path
path to the file to be converted using BookmarksConverter
Attributes:
-----------
bookmarks : list or dict or str
list, dict or str containing the bookmarks converted using BookmarksConverter.
- list of database objects if converted to database
- dict tree with bookmarks if converted to json
- str of the tree if converted to html
filepath : str or Path
path to the file to be converted using BookmarksConverter
output_filepath : Path
path to the output file exported using `.save()` method"""
_formats = ("db", "html", "json")
def __init__(self, filepath):
self._export = None
self._format = None
self._stack = None
self._stack_item = None
self._tree = None
self.bookmarks = None
self.filepath = Path(filepath)
self._prepare_filepaths()
def _prepare_filepaths(self):
"""Takes in filepath, and creates the following filepaths:
-temp_filepath: | |
#!/usr/bin/env python
# coding: utf-8
# # The Boundary Element Method (BEM)
#
#
# You can run this code directly in your browser by clicking on the rocket logo ( <i class="fas fa-rocket"></i> ) at the top of the page, and clicking 'Binder'. This will open a Jupyter Notebook in a [Binder](https://mybinder.org/) environment which is set up to contain everything you need to run the code. **Don't forget to save a local copy if you make any changes!**
#
# If you prefer, you can download the Jupyter Notebook file to run locally, by clicking the download logo ( <i class="fas fa-download"></i> ) at the top of the page and selecting '.ipynb'.
#
# If you are new to using Jupyter Notebooks, [this guide](https://www.dataquest.io/blog/jupyter-notebook-tutorial/) will help you get started.
#
#
# ## Prerequisites
#
# To understand the basic principles explained in this tutorial, you should:
# * Have an understanding of the exterior Helmholtz scattering problem
# * Have a basic understanding of the midpoint rule, for approximating integrals
# * Be comfortable using ```numpy```
#
# ## Introduction
#
# This notebook introduces the **Boundary Element Method** (BEM), by a simple numerical example. The main idea behind is to use Green's identities to represent the (unknown) scattered wave in terms of an unknown which is defined only on the boundary of our domain. For example, if we are modelling scattering by a sphere, the unknown is a function which lives on the surface of the sphere. BEM involves applying a finite element method to approximate this unknown on the boundary, hence the name. This is advantagous for the following two reasons:
# * We have reduced the dimension of our problem by one, so meshing is considerably simpler, and fewer degrees of freedom are required
# * We do not need to worry about meshing an unbounded domian, constructing an artificial boundary, etc.
#
#
# ## Setup
#
# This example is intended to be from first principles, so we will only use methods from three of the main python libraries:
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
# ## Step one: Obtain a representation for the solution in terms of boundary data
#
# For our scattering problem, we consider an incident wave $u^i(x)$ impinging on an obstacle $\Omega\subset\mathbb{R}^n$, where $n=2,3$. Our starting point is the Helmholtz equation
#
# $$\label{eq:1}
# (\Delta+k^2)u=0,
# $$
#
# where $u$ denotes the total field, and $u^s=u-u^i$ is the scattered field, which satisfies the Sommerfeld radiation condition.
#
# Applying [Green's third identity](https://en.wikipedia.org/wiki/Green's_identities#Green's_third_identity) we obtain the representation:
#
# $$
# u(x) = u^i(x) - \int_\Gamma \left[\Phi(x,y)\frac{\partial u}{\partial n}(y) - \frac{\partial \Phi(x,y)}{\partial n(y)}u(y)\right]~\mathrm{d}s(y),\quad x\in\mathbb{R}^n\setminus\Omega,
# $$
#
# where $\frac{\partial}{\partial n}$ denotes the outward normal derivative, $\Gamma$ denotes the boundary of $\Omega$, and $\Phi$ denotes the fundamental solution
#
# $$
# \Phi(x,y) = \left\{
# \begin{array}{ll}
# \frac{\mathrm{i}}{4}H^{(1)}_0(k|x-y|),&\quad n=2,\\
# \frac{\mathrm{e}^{\mathrm{i}k|x-y|}}{4\pi|x-y|},&\quad n=3,
# \end{array}
# \right.
# $$
#
# where $H^{(1)}_0$ is the [Hankel function](https://mathworld.wolfram.com/HankelFunctionoftheFirstKind.html) of the first kind order zero. You probably recognise the function for $n=3$, but if you haven't seen $H^{(1)}_0$ before, it looks like the ripples on the surface of a lake after you drop a pebble into the water:
# In[2]:
from scipy.special import hankel1 as H1
t = np.linspace(-50,50,1000)
X,Y = np.meshgrid(t,t)
ripples = plt.imshow(np.real(H1(0,np.sqrt(X**2+Y**2))),extent =[t.min(), t.max(), t.min(), t.max()])
plt.colorbar(ripples)
plt.title('Real part of H_0^{(1)}(|x|))');
# For this simple example, we will consider scattering by a circle in two-dimensions, with sound-soft aka Dirichlet boundary conditions. This means that $u=0$ on $\Gamma$, so the BIE above simplifies to
#
# $$
# u(x) = u^i(x) - \int_\Gamma \Phi(x,y)\frac{\partial u}{\partial n}(y)~\mathrm{d}s(y),\quad x\in\mathbb{R}^2\setminus\Omega.
# $$
#
# The integral may be interpreted as lots of tiny speakers $\Phi(x,y)$ on our surface $\Gamma$, whilst $\frac{\partial u}{\partial n}(y)$ can be interpreted as the volume of these speakers. We will choose our incoming wave to be an incident plane wave, $u^i(x):=\mathrm{e}^{\mathrm{i} k x\cdot d}$, where $d\in\mathbb{R}^2$ is a unit vector which represents the direction of propagation.
# In[3]:
k = 5.0 # wavenumber
d = np.array([1.0,0.0]) # incident direction
# In[4]:
Phi = lambda x,y: 1j/4*H1(0,k*np.linalg.norm(np.array(x)-np.array(y)))
ui = lambda x: np.exp(1j*k*np.dot(x,d))
# ## Step two: Reformulate as a problem on the boundary $\Gamma$
# Remember, our long-term aim is to approximate $\frac{\partial u}{\partial n}$, then we can plug that approximation into the above equation, to obtain an approximation for $u(x)$. To get an equation we can solve, we take the limit of the above equation as $x$ tends to $\Gamma$ and rearrange, to obtain a **boundary integral equation** (BIE):
#
# $$
# \int_\Gamma \Phi(x,y)\frac{\partial u}{\partial n}(y)~\mathrm{d}s(y)=u^i(x),\quad x\in\Gamma.
# $$
#
# A BEM is an approximation of an equation of this type, defined on the boundary $\Gamma$. Before approximating, we can parametrise the circle $\Gamma$ by $\theta\in[0,2\pi)\to x\in\Gamma$ in the natural way, $x(\theta)=[\cos(\theta),\sin(\theta)]$, to rewrite the above BIE in terms of a one-dimensional parameter
#
# $$
# \int_0^{2\pi} \tilde\Phi(\theta,\vartheta)\frac{\partial u}{\partial n}(y(\vartheta))~\mathrm{d}\vartheta=u^i(x(\theta)),\quad \theta\in[0,2\pi),
# $$
#
# where $\tilde\Phi(\theta,\vartheta):=\Phi(x(\theta),y(\vartheta))$ is purely to keep things a bit simpler.
# There are many BEMs, but for the purpose of this example, I will choose the simplest one I can think of.
# In[5]:
circle_map = lambda theta: [np.cos(theta), np.sin(theta)]
ui_angular = lambda theta: ui(circle_map(theta))
Phi_tilde = lambda theta,vartheta: Phi(circle_map(theta),circle_map(vartheta))
# ## Step three: Approximate the boundary data
#
# Choose $N$ equispaced points on the circle $\theta_n=nh$ where $h:=2\pi/N$ for $n=0,\ldots,N-1$. For our approximation, we specify that the above BIE must hold exactly at these points. This is known as a **collocation BEM**, and $\theta_n$ are the **collocation points**:
#
# $$
# \int_0^{2\pi} \tilde\Phi(\theta_n,\vartheta)v_h(\vartheta)~\mathrm{d}\vartheta=u^i(x(\theta_n)),\quad n=0,\ldots,N-1,
# $$
#
# where we choose the details of our approximation $v^{(h)}(\theta)\approx\frac{\partial u}{\partial n}(y(\theta))$ next.
# In[6]:
N=80 # number of collocation points
theta = np.linspace(0,2*np.pi,N,endpoint=False) # equispaced points on circle
h = 2*np.pi/N # meshwidth
plt.plot(np.cos(theta),np.sin(theta),'k.-')
plt.title('Collocation points on circle');
# We will use a [piecewise constant](https://mathworld.wolfram.com/PiecewiseConstantFunction.html) approximation $v^{(h)}(\theta)$, such that $v^{(h)}(\theta)=v_m$ for $\theta\in[\theta_m-h/2,\theta_m+h/2]$ and $v^{(h)}(\theta)=0$ otherwise, for $m=1,\ldots,N$. Note that the values $v_m$ are currently unknown. A piecewise constant approximation is sometimes referred to as $h$-BEM. So the full name of this method is **collocation $h$-BEM**, and it can be expressed in the following form:
#
# $$
# \sum_{m=1}^Nv_m\int_{\theta_m-h/2}^{\theta_m+h/2} \tilde\Phi(\theta_n,\vartheta)~\mathrm{d}\vartheta=u^i(x(\theta_n)),\quad n=0,\ldots,N-1.
# $$
#
# We can represent the above equation as a linear system for the unknowns $v_m$:
#
# $$A\mathbf{v}=\mathbf{u},$$
#
# where $A_{mn}:=\int_{\theta_m-h/2}^{\theta_m+h/2}\tilde\Phi(\theta_m,\vartheta)~\mathrm{d}\vartheta$, and $u_n := u^i(x(\theta_n))$. Even in this simple example, I hope it is clear that efficient methods for evaluating singular integrals play a key role in BEMs. The Nystrom variant of BEM is fast and simple (perfect for this example). The idea is to approximate (almost) each integral $A_{mn}:=\int_{\theta_m-h/2}^{\theta_m+h/2}\tilde\Phi(\theta_n,\vartheta)~\mathrm{d}\vartheta$ by a one-point quadrature rule, which means we can use our collocation points as our quadrature points. This gives
#
# $$A_{mn}= h\tilde\Phi(\theta_n,\theta_m)+O(h^2),\quad\text{for }m\neq n,$$
#
# where $O(h^2)$ means the error is bounded above by $Ch^2$, for some constant $C$ and sufficiently small $h$.
#
# But we must be careful, a one-point quadrature rule for $m=n$ gives $h\Phi(\theta_n,\theta_m)=\infty$, since the Hankel function is unbounded at zero! So we need something a little more sophisticated for the diagonal elements.
#
# From DLMF [(10.4.3)](https://dlmf.nist.gov/10.4.3), [(10.8.2)](https://dlmf.nist.gov/10.8.2), [(10.2.2)](https://dlmf.nist.gov/10.2#E2), we can consider the first term in the asymptotic expansion of the Hankel function, integrate the $\log$ exactly, to write
#
# $$
# \int_{\theta_n-h/2}^{\theta_m+h/2}\tilde\Phi(\theta_m,\vartheta)~\mathrm{d}\vartheta
# =
# \frac{\mathrm{i}h\left(2\mathrm{i}\gamma+2\mathrm{i}\log(hk/4)+\mathrm{i}\pi-2\mathrm{i}\right)}{4\pi}+O(h^{2-\epsilon})
# $$
#
# where $\gamma\approx0.577$ is Euler's number and $\epsilon$ is any number in $(0,1)$.
#
# Now we can construct the matrix $A$:
# In[7]:
eulergamma = 0.57721566
singular_diagonal = lambda h: 1j*h*(2j*eulergamma + 2j*np.log(h*k/4)+1j*np.pi-2j)/(4*np.pi)
# construct matrix
A = np.zeros((N,N),dtype=complex)
for n in range(N):
for m in range(N):
if n==m:
A[m,n] = singular_diagonal(h)
else:
A[m,n] = h*Phi_tilde(theta[n],theta[m])
# construct right-hand side vector
u = np.zeros(N,dtype=complex)
for n in range(N):
u[n] = ui_angular(theta[n])
# solve linear system to get values of piecewise constant approximation:
v = np.linalg.solve(A,u)
# In[8]:
plt.plot(theta,np.real(v))
plt.title('Real part of approximation to normal derivative');
# ## Step four: Use approximate boundary data in representation formula
#
# Specifically: we will use the approximate boundary data **from step three** in the representation formula **from step one**.
#
# Plugging $v_h$ into the representation formula, and paramerising in the same way as before, gives
#
# $$
# u(x) \approx u_h(x) := u^i(x) - \int_0^{2\pi}\Phi(x,y(\theta))v_h(\theta)~\mathrm{d}\theta,\quad x\in\mathbb{R}^2\setminus\Omega.
# $$
#
# | |
unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]]
def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
""" Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile.
"""
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False
def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
""" Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
| |
reserved capacity.
"""
if reserved is None:
self.reserved = zero_capacity()
elif isinstance(reserved, int):
assert reserved == 0
self.reserved = zero_capacity()
elif isinstance(reserved, float):
assert reserved == 0.0
self.reserved = zero_capacity()
elif isinstance(reserved, list):
assert len(reserved) == DIMENSION_COUNT
self.reserved = np.array(reserved, dtype=float)
elif isinstance(reserved, np.ndarray):
self.reserved = reserved
else:
assert 'Unsupported type: %r' % type(reserved)
def update(self, reserved, rank, rank_adjustment, max_utilization=None):
"""Updates allocation.
"""
if rank is not None:
self.rank = rank
else:
self.rank = DEFAULT_RANK
if rank_adjustment is not None:
self.rank_adjustment = rank_adjustment
self.set_reserved(reserved)
self.set_max_utilization(max_utilization)
def set_max_utilization(self, max_utilization):
"""Sets max_utilization, accounting for default None value.
"""
if max_utilization is not None:
self.max_utilization = max_utilization
else:
self.max_utilization = _MAX_UTILIZATION
def set_traits(self, traits):
"""Set traits, account for default None value.
"""
if not traits:
self.traits = 0
else:
self.traits = traits
def add(self, app):
"""Add application to the allocation queue.
Once added, the scheduler will make an attempt to place the app on one
of the cell nodes.
"""
# Check that there are no duplicate app names.
if app.name in self.apps:
_LOGGER.warning(
'Duplicate app on alllocation queue: %s', app.name
)
return
app.allocation = self
self.apps[app.name] = app
def remove(self, name):
"""Remove application from the allocation queue.
"""
if name in self.apps:
self.apps[name].allocation = None
del self.apps[name]
def priv_utilization_queue(self):
"""Returns tuples for sorted by global utilization.
Apps in the queue are ordered by priority, insertion order.
Adding or removing maintains invariant that apps utilization
monotonically increases as well.
Returns local prioritization queue in a tuple where first element is
utilization ratio, so that this queue is suitable for merging into
global priority queue.
"""
def _app_key(app):
"""Compares apps by priority, state, global index
"""
return (-app.priority, 0 if app.server else 1,
app.global_order, app.name)
prio_queue = sorted(six.viewvalues(self.apps), key=_app_key)
acc_demand = zero_capacity()
available = self.reserved + np.finfo(float).eps
util_before = utilization(acc_demand, self.reserved, available)
for app in prio_queue:
acc_demand = acc_demand + app.demand
util_after = utilization(acc_demand, self.reserved, available)
# Priority 0 apps are treated specially - utilization is set to
# max float.
#
# This ensures that they are at the end of the all queues.
if app.priority == 0:
util_before = _MAX_UTILIZATION
util_after = _MAX_UTILIZATION
# All things equal, already scheduled applications have priority
# over pending.
pending = 0 if app.server else 1
if util_after <= self.max_utilization - 1:
rank = self.rank
if util_before < 0:
rank -= self.rank_adjustment
else:
rank = _UNPLACED_RANK
entry = (rank, util_before, util_after, pending, app.global_order,
app)
util_before = util_after
yield entry
def utilization_queue(self, free_capacity, visitor=None):
"""Returns utilization queue including the sub-allocs.
All app queues from self and sub-allocs are merged in standard order,
and then utilization is recalculated based on total reserved capacity
of this alloc and sub-allocs combined.
The function maintains invariant that any app (self or inside sub-alloc
with utilization < 1 will remain with utilzation < 1.
"""
total_reserved = self.total_reserved()
queues = [
alloc.utilization_queue(free_capacity, visitor)
for alloc in six.itervalues(self.sub_allocations)
]
queues.append(self.priv_utilization_queue())
acc_demand = zero_capacity()
available = total_reserved + free_capacity + np.finfo(float).eps
util_before = utilization(acc_demand, total_reserved, available)
for item in heapq.merge(*queues):
rank, _u_before, _u_after, pending, order, app = item
acc_demand = acc_demand + app.demand
util_after = utilization(acc_demand, total_reserved, available)
if app.priority == 0:
util_before = _MAX_UTILIZATION
util_after = _MAX_UTILIZATION
# - lower rank allocations take precedence.
# - for same rank, utilization takes precedence
# - False < True, so for apps with same utilization we prefer
# those that already running (False == not pending)
# - Global order
entry = (rank, util_before, util_after, pending, order, app)
if visitor:
visitor(self, entry, acc_demand)
util_before = util_after
yield entry
def total_reserved(self):
"""Total reserved capacity including sub-allocs.
"""
return six.moves.reduce(
lambda acc, alloc: acc + alloc.total_reserved(),
six.itervalues(self.sub_allocations),
self.reserved
)
def add_sub_alloc(self, name, alloc):
"""Add child allocation.
"""
self.sub_allocations[name] = alloc
assert not alloc.path
alloc.path = self.path + [name]
alloc.label = self.label
def remove_sub_alloc(self, name):
"""Remove chlid allocation.
"""
if name in self.sub_allocations:
del self.sub_allocations[name]
def get_sub_alloc(self, name):
"""Return sub allocation, create empty if it does not exist.
"""
if name not in self.sub_allocations:
self.add_sub_alloc(name, Allocation())
return self.sub_allocations[name]
def all_apps(self):
"""Return all apps in allocation and sub-allocations."""
all_apps = list(six.itervalues(self.apps))
for alloc in six.itervalues(self.sub_allocations):
all_apps.extend(alloc.all_apps())
return all_apps
class Partition:
"""Cell partition.
"""
__slots__ = (
'allocation',
'max_server_uptime',
'max_lease',
'threshold',
'label',
'_reboot_buckets',
'_reboot_dates',
'_reboot_last',
)
def __init__(self, max_server_uptime=None, max_lease=None, threshold=None,
label=None, reboot_schedule=None, now=None):
self.label = label
self.allocation = Allocation(partition=label)
# Default -
if not max_server_uptime:
max_server_uptime = DEFAULT_SERVER_UPTIME
if not max_lease:
max_lease = DEFAULT_MAX_APP_LEASE
if not threshold:
threshold = DEFAULT_THRESHOLD
self.max_server_uptime = max_server_uptime
self.max_lease = max_lease
self.threshold = threshold
if not reboot_schedule:
# reboot every day
reboot_schedule = {day: (23, 59, 59) for day in range(7)}
if not now:
now = time.time()
self._reboot_dates = reboot_dates(
reboot_schedule,
start_date=datetime.date.fromtimestamp(now)
)
self._reboot_buckets = []
self._reboot_last = now
self.tick(now)
def _find_bucket(self, timestamp):
"""Try to find bucket with given timestamp.
"""
for bucket in self._reboot_buckets:
if bucket.timestamp == timestamp:
return bucket
return None
def add(self, server, timestamp=None):
"""Add server.
"""
bucket = None
if timestamp:
bucket = self._find_bucket(timestamp)
# servers with larger than max lifetime should be rebooted at
# the next opportunity
if (self._reboot_buckets[0].timestamp >
server.up_since + DEFAULT_SERVER_UPTIME):
bucket = self._reboot_buckets[0]
if not bucket:
bucket = min(reversed(self._reboot_buckets),
key=lambda b: b.cost(server))
bucket.add(server)
def remove(self, server):
"""Remove server.
"""
for bucket in self._reboot_buckets:
bucket.remove(server)
def tick(self, now):
"""Do per-tick-bookkeeping.
"""
while self._reboot_last <= now + DEFAULT_SERVER_UPTIME:
bucket = RebootBucket(next(self._reboot_dates))
self._reboot_buckets.append(bucket)
self._reboot_last = bucket.timestamp
while self._reboot_buckets[0].timestamp < now:
self._reboot_buckets.pop(0)
class PartitionDict(dict):
"""Dict that creates partitions on demand.
We use this instead of collections.defaultdict so that we can provide
the new partition with its label, to be propagated to its allocations.
"""
def __missing__(self, label):
"""Create a new partition, passing the label to its constructor.
"""
self[label] = Partition(label=label)
return self[label]
# pylint: disable=invalid-name
def reboot_dates(schedule, start_date=None):
"""Generate list of valid reboot dates.
"""
date = datetime.date.today()
if start_date:
date = start_date
while True:
weekday = date.weekday()
if weekday in schedule:
h, m, s = schedule[weekday]
yield time.mktime((date.year, date.month, date.day,
h, m, s, 0, 0, 0))
date += datetime.timedelta(days=1)
class RebootBucket:
"""Bucket of servers to be rebooted at the same time.
"""
__slots__ = (
'timestamp',
'servers',
)
def __init__(self, timestamp):
self.timestamp = timestamp
self.servers = []
def add(self, server):
"""Add server to this bucket.
"""
self.servers.append(server)
server.valid_until = self.timestamp
_LOGGER.info('Setting valid until on server: %s %s',
server.name, server.valid_until)
def remove(self, server):
"""Remove server from this bucket.
"""
try:
self.servers.remove(server)
except ValueError:
pass
def cost(self, server):
"""The cost of adding server to this bucket.
"""
if self.timestamp > server.up_since + DEFAULT_SERVER_UPTIME:
return float('inf')
if self.timestamp < server.up_since + MIN_SERVER_UPTIME:
return float('inf')
return len(self.servers)
class PlacementFeasibilityTracker:
"""Tracks similar apps placement failures."""
def __init__(self):
self.recorder = dict()
def feasible(self, app):
"""Checks if it is feasible to satisfy demand."""
constraints, demand = app.shape()
if constraints in self.recorder:
# If demand is >= than recorded failure, placement is not feasible.
if _all_ge(demand, self.recorder[constraints]):
return False
return True
def adjust(self, app):
"""Adjust info about failed placement."""
constraints, demand = app.shape()
if constraints not in self.recorder:
self.recorder[constraints] = demand
else:
if _all_le(demand, self.recorder[constraints]):
self.recorder[constraints] = demand
class Cell(Bucket):
"""Top level node.
"""
__slots__ = (
'partitions',
'next_event_at',
'apps',
'identity_groups',
)
def __init__(self, name):
super(Cell, self).__init__(name, traits=0, level='cell')
self.partitions = PartitionDict()
self.apps = dict()
self.identity_groups = collections.defaultdict(IdentityGroup)
self.next_event_at = np.inf
def add_app(self, allocation, app):
"""Adds application to the scheduled list.
"""
assert allocation is not None
if app.allocation:
app.allocation.remove(app.name)
allocation.add(app)
self.apps[app.name] = app
if app.identity_group:
app.identity_group_ref = self.identity_groups[app.identity_group]
def remove_app(self, appname):
"""Remove app from scheduled list.
"""
if appname not in self.apps:
return
app = self.apps[appname]
servers = self.members()
if app.server in servers:
servers[app.server].remove(app.name)
if app.allocation:
app.allocation.remove(app.name)
app.release_identity()
del self.apps[appname]
def configure_identity_group(self, name, count):
"""Add identity group to the cell.
"""
if name not in self.identity_groups:
self.identity_groups[name] = IdentityGroup(count)
else:
self.identity_groups[name].adjust(count)
def remove_identity_group(self, name):
"""Remove identity group.
"""
ident_group = self.identity_groups.get(name)
if ident_group:
in_use = False
for app in six.itervalues(self.apps):
if | |
not apps:
caller.msg("Application manager not found! Please inform the admins.")
return
char_ob = roster.get_character(char_name)
if not char_ob:
caller.msg("No such character on the roster.")
return
if char_ob.roster.roster.name != "Available":
caller.msg("That character is not marked as available for applications.")
return
apps.add_app(char_ob, email, app_string)
mess = "Successfully applied to play %s. " % char_name.capitalize()
mess += "You will receive a response by email once your application has been approved or declined."
caller.msg(mess)
message = "{wNew character application by [%s] for %s" % (caller.key.capitalize(), char_name.capitalize())
inform_staff(message)
return
if ('family' in args or 'fealty' in args or 'concept' in args) and not self.rhs:
caller.msg("The filters of 'family', 'fealty', 'social rank', " +
"or 'concept' require an argument after an '='.")
return
if not self.rhs:
filters = args.split(",")
if 'all' in switches:
match_list = roster.search_by_filters(filters)
list_characters(caller, match_list, "Active Characters", roster, False)
match_list = roster.search_by_filters(filters, "available")
list_characters(caller, match_list, "Available Characters", roster, False)
return
rhslist = self.rhslist
lhslist = self.lhslist
keynames = []
for attr_filter in lhslist:
if attr_filter in ['family', 'fealty', 'concept', 'social rank']:
keynames.append(attr_filter)
if len(keynames) != len(rhslist):
caller.msg("Not enough arguments provided for the given filters.")
return
filter_dict = dict(zip(keynames, rhslist))
family = filter_dict.get('family', "None")
fealty = filter_dict.get('fealty', "None")
concept = filter_dict.get('concept', "None")
social_rank = filter_dict.get('social rank', "None")
if 'all' in switches:
match_list = roster.search_by_filters(lhslist, "active", concept, fealty, social_rank, family)
list_characters(caller, match_list, "Active Characters", roster, False)
match_list = roster.search_by_filters(lhslist, "available", concept, fealty, social_rank, family)
list_characters(caller, match_list, "Available Characters", roster, False)
return
class CmdAdminRoster(ArxPlayerCommand):
"""
@chroster - Changes the roster. Admin commands.
Usage:
@chroster/move <entry>=<new roster area>
@chroster/note <entry>=<Added note>
@chroster/email <entry>=<new email>
@chroster/retire <entry>=<notes>
@chroster/gone <entry>=<notes>
@chroster/view <entry>
@chroster/markavailable <entry>
Admin for roster commands. Added characters go in unavailable
and inactive section until moved to active section.
"""
key = "@chroster"
help_category = "Admin"
locks = "cmd:perm(chroster) or perm(Wizards)"
@staticmethod
def award_alt_xp(alt, xp, history, current):
if xp > current.total_xp:
xp = current.total_xp
altchar = alt.entry.character
if xp > history.xp_earned:
xp = history.xp_earned
if not altchar.db.xp:
altchar.db.xp = 0
altchar.db.xp += xp
def func(self):
caller = self.caller
args = self.args
switches = self.switches
if not args or not switches:
caller.msg("Usage: @chroster/switches <arguments>")
return
from web.character.models import RosterEntry, Roster, AccountHistory
if "markavailable" in switches:
try:
entry = RosterEntry.objects.get(character__db_key__iexact=self.lhs)
if entry.roster.name == "Active":
self.msg("They are currently played. Use /retire instead.")
return
roster = Roster.objects.available
entry.roster = roster
entry.save()
try:
bb = BBoard.objects.get(db_key__iexact="Roster Changes")
msg = "%s has been placed on the roster and is now available for applications." % entry.character
url = "http://play.arxmush.org" + entry.character.get_absolute_url()
msg += "\nCharacter page: %s" % url
subject = "%s now available" % entry.character
bb.bb_post(self.caller, msg, subject=subject, poster_name="Roster")
except BBoard.DoesNotExist:
self.msg("Board not found for posting announcement")
except RosterEntry.DoesNotExist:
self.msg("Could not find a character by that name.")
# try to delete any apps
from .jobs import get_apps_manager
apps = get_apps_manager()
if not apps:
return
apps_for_char = apps.view_all_apps_for_char(args)
if not apps_for_char:
caller.msg("No applications found.")
return
pend_list = [ob for ob in apps_for_char if ob[9]]
if not pend_list:
caller.msg("No pending applications found.")
return
for pending_app in pend_list:
app_num = pending_app[0]
apps.delete_app(caller, app_num)
return
if 'add' in switches:
from typeclasses.characters import Character
try:
character = Character.objects.get(db_key__iexact=self.lhs)
except Character.DoesNotExist:
self.msg("No character by that name.")
return
try:
RosterEntry.objects.get(character=character)
caller.msg("Character already is in the roster.")
return
except RosterEntry.DoesNotExist:
active = Roster.objects.active
targ = caller.search(self.lhs)
if not targ:
return
active.entries.create(player=targ, character=character)
caller.msg("Character added to active roster.")
return
if 'move' in switches:
lhs = self.lhs
rhs = self.rhs
try:
entry = RosterEntry.objects.get(character__db_key__iexact=lhs)
roster = Roster.objects.get(name__iexact=rhs)
entry.roster = roster
entry.save()
inform_staff("%s moved %s to %s roster." % (caller, lhs, rhs))
caller.msg("Moved %s to %s roster." % (lhs, rhs))
return
except Exception as err:
caller.msg("Move failed: %s" % err)
return
if 'retire' in switches or 'gone' in self.switches:
active = Roster.objects.active
if 'retire' in self.switches:
new_roster = Roster.objects.available
else: # character is dead/gone
new_roster = Roster.objects.gone
try:
entry = active.entries.get(character__db_key__iexact=self.lhs)
except RosterEntry.DoesNotExist:
caller.msg("Character not found in active roster.")
return
entry.roster = new_roster
current = entry.current_account
xp = entry.character.db.xp or 0
if current:
try:
history = AccountHistory.objects.get(account=current, entry=entry)
if xp < 0:
xp = 0
try:
alt = AccountHistory.objects.get(Q(account=current) & ~Q(entry=entry) &
Q(end_date__isnull=True))
self.award_alt_xp(alt, xp, history, current)
except AccountHistory.DoesNotExist:
if xp > current.total_xp:
xp = current.total_xp
# null check
if not current.gm_notes:
current.gm_notes = ""
current.gm_notes += "\n\nUnspent xp: %s" % xp
current.save()
except AccountHistory.MultipleObjectsReturned:
caller.msg("ERROR: Found more than one account. Using the first.")
alt = AccountHistory.objects.filter(Q(account=current) & ~Q(entry=entry)).exclude(
end_date__isnull=False).first()
self.award_alt_xp(alt, xp, history, current)
except Exception as err:
import traceback
print("{rEncountered this error when trying to transfer xp{n:\n%s" % err)
traceback.print_exc()
entry.character.db.xp = 0
entry.character.db.total_xp = 0
except AccountHistory.DoesNotExist:
history = AccountHistory.objects.create(account=current, entry=entry)
except AccountHistory.MultipleObjectsReturned:
history = AccountHistory.objects.filter(account=current, entry=entry).last()
entry.current_account = None
entry.action_points = 100
date = datetime.now()
history.end_date = date
if not history.gm_notes and self.rhs:
history.gm_notes = self.rhs
elif self.rhs:
history.gm_notes += self.rhs
history.save()
# set up password
# noinspection PyBroadException
try:
import string
import random
newpass = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
entry.player.set_password(<PASSWORD>)
entry.player.save()
caller.msg("Random password generated for %s." % entry.player)
except Exception:
import traceback
traceback.print_exc()
caller.msg("Error when setting new password. Logged.")
inform_staff("%s has returned %s to the %s roster." % (caller, self.lhs, new_roster.name))
entry.save()
if "retire" in self.switches:
try:
bb = BBoard.objects.get(db_key__iexact="Roster Changes")
msg = "%s no longer has an active player and is now available for applications." % entry.character
url = "http://play.arxmush.org" + entry.character.get_absolute_url()
msg += "\nCharacter page: %s" % url
subject = "%s now available" % entry.character
bb.bb_post(self.caller, msg, subject=subject, poster_name="Roster")
except BBoard.DoesNotExist:
self.msg("Board not found for posting announcement")
from server.utils.arx_utils import post_roster_cleanup
post_roster_cleanup(entry)
return
if 'view' in switches:
try:
entry = RosterEntry.objects.get(character__db_key__iexact=self.args)
except RosterEntry.DoesNotExist:
caller.msg("No character found by that name.")
return
caller.msg("{w" + "-"*20 + "{n")
caller.msg("{wPlayer Object:{n %s {wID:{n %s" % (entry.player.key, entry.player.id))
caller.msg("{wEmail{n: %s" % entry.player.email)
line = "{wCharacter: {n"
line += entry.character.key
line += " {wID:{n %s" % entry.character.id
caller.msg(line)
line = "{wGM Notes:{n " + entry.gm_notes
caller.msg(line)
if entry.current_account:
caller.msg("{wCurrent Account:{n %s" % entry.current_account)
caller.msg("{wAlts:{n %s" % ", ".join(str(ob) for ob in entry.alts))
return
if 'email' in switches:
lhs, rhs = self.lhs, self.rhs
if not lhs or not rhs:
caller.msg("Usage: @chroster/email user=email")
return
change_email(lhs, rhs, caller)
inform_staff("%s changed email for %s in roster." % (caller, lhs))
caller.msg("Email for %s changed to %s." % (lhs, rhs))
return
if 'note' in switches:
lhs = self.lhs
if not lhs:
caller.msg("Usage: @chroster/note <character>=<note>")
return
if not self.rhs:
caller.msg("Cannot add an empty note.")
return
add_note(lhs, self.rhs, caller)
inform_staff("%s added a note to %s in roster." % (caller, lhs))
caller.msg("New note added.")
return
def display_header(caller, character, show_hidden=False):
"""
Header information. Name, Desc, etc.
"""
if not caller or not character:
return
longname = character.db.longname
if not longname:
longname = character.key
if not longname:
longname = "Unknown"
longname.capitalize()
longname = longname.center(60)
quote = character.db.quote
if not quote:
quote = ""
else:
quote = '"' + quote + '"'
quote = quote.center(60)
srank = character.db.social_rank
if not srank:
srank = "Unknown"
concept = character.db.concept
if not concept:
concept = "Unknown"
fealty = character.db.fealty
if not fealty:
fealty = "Unknown"
fealty = fealty.capitalize()
family = character.db.family
if not family:
family = "Unknown"
family = family.capitalize()
gender = character.db.gender
if not gender:
gender = "Unknown"
gender = gender.capitalize()
age = character.db.age
if not age:
age = "Unknown"
else:
age = str(age)
birth = character.db.birthday
if not birth:
birth = "Unknown"
religion = character.db.religion
if not religion:
religion = "Unknown"
vocation = character.db.vocation
if not vocation:
vocation = "Unknown"
vocation = vocation.capitalize()
height = character.db.height or ""
eyecolor = character.db.eyecolor or ""
eyecolor = eyecolor.title()
haircolor = character.db.haircolor or ""
haircolor = haircolor.title()
skintone = character.db.skintone or ""
skintone = skintone.title()
marital_status = character.db.marital_status or "Single"
header = \
"""
{w%(longname)s{n
%(quote)s
{w==================================================================={n
{wSocial Rank:{n %(srank)-20s {wConcept:{n %(concept)-20s
{wFealty:{n %(fealty)-25s {wFamily:{n %(family)-20s
{wGender:{n %(gender)-25s {wAge:{n %(age)-20s
{wBirthday:{n %(birth)-23s {wReligion:{n %(religion)-20s
{wVocation:{n %(vocation)-23s {wHeight:{n %(height)-20s
{wEye Color:{n %(eyecolor)-22s {wHair Color:{n %(haircolor)-20s
{wSkin Tone:{n %(skintone)-22s {wMarital Status:{n %(marital_status)-20s
""" % {'longname': longname, 'quote': utils.fill(quote), 'srank': srank,
'concept': concept, 'fealty': | |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 2021, last edited 27 Oct 2021
Fiber flow emissions calculations module - class version
Inputs:
Excel file with old PPI market & emissions data ('FiberModelAll_Python_v3-yields.xlsx')
Outputs:
Dict of keys 'old','new','forest','trade' with emissions calcs
(*testing inputs*
x = 'FiberModelAll_Python_v2.xlsx'
f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
pbpVolOld.columns = [x[:-2] for x in pbpVolOld.columns]
consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=34, nrows=3, index_col=0)
rLevel = pd.read_excel(x, 'Demand', usecols="F:K", skiprows=16, nrows=5)
rLevel = {t: list(rLevel[t][np.isfinite(rLevel[t])].values) for t in fProd}
fProd = [t for t in f2pVolOld.iloc[:,:6].columns]
fProdM = [t for t in f2pVolOld.iloc[:,:7].columns]
rFiber = f2pVolOld.index[:16]
vFiber = f2pVolOld.index[16:]
rPulp = [p for p in pbpVolOld.index if 'Rec' in p]
vPulp = [q for q in pbpVolOld.index if 'Vir' in q]
fPulp = [f for f in pbpVolOld.index]
import numpy as np
f2pYld = pd.read_excel(x, 'Fiber', usecols="I:O", skiprows=1, nrows=21)
f2pYld.index = np.concatenate([rFiber.values, vFiber.values], axis=0)
pulpYld = pd.read_excel(x, 'Pulp', usecols="D", skiprows=1, nrows=14)
pulpYld.index = rPulp + vPulp
transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
rsdlbio = rsdlbio.fillna(0)
rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
rsdlfos = rsdlfos.fillna(0)
woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
exportOld.iloc[:,:-1] = exportOld.iloc[:,:-1]
exportNew = exportOld.iloc[:,:-1] * 1.5
exportNew.columns = ['exportNew']
exportNew = exportNew.assign(TransCode=exportOld['TransCode'].values)
fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
)
@author: <NAME>
"""
import pandas as pd
import numpy as np
class en_emissions(): # energy & emissions
def __init__(cls,xls,fProd,rLevel,f2pYld,pulpYld,f2pVolNew,pbpVolNew,consCollNew,exportNew,demandNew):
# xls (str) - name of Excel spreadsheet to pull data from
# fProd (list) - list of products in current scenario
# rLevel (df) - recycled content level by product
# f2pYld (df) - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld (df) - pulp to product yield; pulp as index
# f2pVolNew (df) - fiber to pulp volume (in short tons); indexed by pulp name
# pbpVolNew (df) - pulp by product volume; indexed by pulp name
# consCollNew (df) - domestic consumption, collection, and recovery by product
# demandNew (df) - new demand by product; indexed by rec level
uC = 0.907185 # unit conversion of MM US ton to Mg/metric ton
cls.fProd = fProd
cls.fProdM = fProd + ['Market']
cls.rLevel = rLevel
cls.f2pYld = f2pYld
cls.pulpYld = pulpYld
cls.f2pVolNew = f2pVolNew * uC
cls.pbpVolNew = pbpVolNew * uC
cls.consCollNew = consCollNew * uC
cls.exportNew = exportNew * uC
cls.demandNew = {t: demandNew[t] * uC for t in demandNew.keys()}
with pd.ExcelFile(xls) as x:
# Old data
cls.f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
cls.f2pVolOld.iloc[:,:-1] = cls.f2pVolOld.iloc[:,:-1] * uC * 1000
cls.f2pVolNew = cls.f2pVolNew.assign(TransCode=cls.f2pVolOld['TransCode'].values)
cls.pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
cls.pbpVolOld.columns = [x[:-2] for x in cls.pbpVolOld.columns] # has .1 after column names for pandas duplicate
cls.pbpVolOld.iloc[:,:-1] = cls.pbpVolOld.iloc[:,:-1] * uC * 1000
cls.pbpVolNew = cls.pbpVolNew.assign(TransCode=cls.pbpVolOld['TransCode'].values)
cls.prodLD = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=19, nrows=5, index_col=0) * uC * 1000
cls.prodDemand = pd.read_excel(x, 'OldData', usecols="A:G", skiprows=26, nrows=1, index_col=0) * uC * 1000
cls.consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=29, nrows=3, index_col=0) * uC * 1000
cls.exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
cls.exportOld.iloc[:,:-1] = cls.exportOld.iloc[:,:-1] * uC * 1000
cls.exportNew = cls.exportNew.assign(TransCode=cls.exportOld['TransCode'].values)
cls.fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
cls.rFiber = cls.f2pVolOld.index[:16]
cls.vFiber = cls.f2pVolOld.index[16:]
cls.rPulp = [p for p in cls.pbpVolOld.index if 'Rec' in p]
cls.vPulp = [q for q in cls.pbpVolOld.index if 'Vir' in q]
cls.fPulp = [f for f in cls.pbpVolOld.index]
# Emissions Info
cls.chemicals = pd.read_excel(x, 'nonFiber', usecols="A:B,E:L", skiprows=2, nrows=42, index_col=0)
cls.eolEmissions = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=2, nrows=3, index_col=0)
cls.bfEI = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=2, nrows=3, index_col=0)
cls.bfEI.columns = [x[:-2] for x in cls.bfEI.columns] # has .1 after column names for some reason
cls.bioPct = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=8, nrows=2, index_col=0)
cls.pwpEI = pd.read_excel(x, 'EmTables', usecols="O:P", skiprows=14, nrows=5, index_col=0)
cls.bfCO2 = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=9, nrows=2, index_col=0)
cls.fuelTable = pd.read_excel(x, 'EmTables', usecols="A:M", skiprows=15, nrows=13, index_col=0)
cls.fuelTable = cls.fuelTable.fillna(0)
cls.rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
cls.rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
cls.rsdlbio = cls.rsdlbio.fillna(0)
cls.rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
cls.rsdlfos = cls.rsdlfos.fillna(0)
cls.transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
cls.transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
cls.transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
cls.woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
cls.wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
cls.wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
cls.wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
cls.wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
cls.wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
cls.wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
cls.chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
cls.chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
cls.fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
def calculateTrans(cls,transVol):
# transVol [df] - item, volume (in Mg) by product, TransCode; indexed by fiberCode or other label
# transPct [df] - % traversed for transMode by transCode; indexed by transCode
# transKM [df] - distance traversed for transMode by transCode; indexed by transCode
# transUMI [s] - unit impact by mode (truck, train, boat); indexed by "transUMI"
transImpact = pd.Series(0, index = cls.fProd)
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for t in cls.fProd:
for m in cls.transUMI.columns:
transImpact[t] += sum(transVol[t] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values * 1)
return transImpact
def calculateChem(cls,chemicals,prodDemand):
# chemicals [df] - nonfiber name, % use by product, transCode, impact factor; indexed by number
# prodDemand [df] - total demand; indexed by product
chemImpact = pd.Series(0, index = cls.fProd, name = 'chemImp')
chemVol = pd.DataFrame(0, index = chemicals.index, columns = cls.fProd)
for t in cls.fProd:
chemImpact[t] = sum(prodDemand[t].values * chemicals[t] * chemicals['Impact Factor'])
chemVol[t] = chemicals[t] * prodDemand[t].values
chemVol = chemVol.join(chemicals['TransCode'])
chemTrans = pd.Series(cls.calculateTrans(chemVol), name = 'chemTrans')
chemImpact = pd.DataFrame(chemImpact)
return pd.concat([chemImpact, chemTrans], axis=1)
def calculateEoL(cls,eolEmissions,consColl):
# eolEmissions [df] - biogenic and fossil CO2 emission factors & transportation code by product; indexed by bio/fosCO2
# consColl [df] - domestic consumption, collection, and recovery by product; indexed by name
prod2landfill = pd.Series(consColl.loc['Domestic Consumption'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'prod2landfill')
mrf2landfill = pd.Series(consColl.loc['Collection Volume'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'mrf2landfill')
bioEoL = pd.Series(prod2landfill * eolEmissions.loc['bioCO2'], index = cls.fProd, name = 'bioEoL')
mrf2landfill = pd.DataFrame(mrf2landfill) # works b/c all prods have same TransCode
transEoL = pd.Series(cls.calculateTrans(mrf2landfill.T.assign(TransCode=eolEmissions.loc['TransCode'].values[0])),
index = cls.fProd, name = 'eolTrans')
fesTransEoL = pd.Series(prod2landfill * eolEmissions.loc['fossilCO2'] + transEoL, index = cls.fProd,
name = 'fesTransEoL')
bftEoL = pd.Series(bioEoL + fesTransEoL, name = 'bftEoL')
return pd.concat([bioEoL, fesTransEoL, bftEoL, transEoL], axis=1)
def getEnergyYldCoeff(cls,f2pVol,pbpVol):
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# PYCoeff [s] - pulp yield coeffient; indexed by pulp
f2pByPulp = pd.Series(0, index = pbpVol.index, name = 'fiber2pulp')
for p in cls.rPulp:
f2pByPulp[p] = sum([f2pVol.loc[cls.rFiber,t].sum() for t in | |
auth_token
self.product_instance_id = product_instance_id
# 厂商名称
self.corp_name = corp_name
# 数据模型id
self.peripheral_data_model_id = peripheral_data_model_id
# 外围设备ID
self.peripheral_id = peripheral_id
# 外围设备名称
self.peripheral_name = peripheral_name
# 场景码
self.scene = scene
# 设备类型编码,必填,对应资管平台中的设备类型
#
# 枚举值:
#
# 车辆 1000
# 车辆 四轮车 1001
# 车辆 四轮车 纯电四轮车 1002
# 车辆 四轮车 混动四轮车 1003
# 车辆 四轮车 燃油四轮车 1004
# 车辆 两轮车 1011
# 车辆 两轮车 两轮单车 1012
# 车辆 两轮车 两轮助力车 1013
#
# 换电柜 2000
# 换电柜 二轮车换电柜 2001
#
# 电池 3000
# 电池 磷酸铁电池 3001
# 电池 三元锂电池 3002
#
# 回收设备 4000
#
# 垃圾分类回收 4001
#
# 洗车机 5000
self.device_type_code = device_type_code
# 设备单价 单位:分
self.initial_price = initial_price
# 出厂时间
self.factory_time = factory_time
# 投放时间
self.release_time = release_time
def validate(self):
self.validate_required(self.peripheral_data_model_id, 'peripheral_data_model_id')
self.validate_required(self.peripheral_id, 'peripheral_id')
self.validate_required(self.scene, 'scene')
if self.factory_time is not None:
self.validate_pattern(self.factory_time, 'factory_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
if self.release_time is not None:
self.validate_pattern(self.release_time, 'release_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.corp_name is not None:
result['corp_name'] = self.corp_name
if self.peripheral_data_model_id is not None:
result['peripheral_data_model_id'] = self.peripheral_data_model_id
if self.peripheral_id is not None:
result['peripheral_id'] = self.peripheral_id
if self.peripheral_name is not None:
result['peripheral_name'] = self.peripheral_name
if self.scene is not None:
result['scene'] = self.scene
if self.device_type_code is not None:
result['device_type_code'] = self.device_type_code
if self.initial_price is not None:
result['initial_price'] = self.initial_price
if self.factory_time is not None:
result['factory_time'] = self.factory_time
if self.release_time is not None:
result['release_time'] = self.release_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('corp_name') is not None:
self.corp_name = m.get('corp_name')
if m.get('peripheral_data_model_id') is not None:
self.peripheral_data_model_id = m.get('peripheral_data_model_id')
if m.get('peripheral_id') is not None:
self.peripheral_id = m.get('peripheral_id')
if m.get('peripheral_name') is not None:
self.peripheral_name = m.get('peripheral_name')
if m.get('scene') is not None:
self.scene = m.get('scene')
if m.get('device_type_code') is not None:
self.device_type_code = m.get('device_type_code')
if m.get('initial_price') is not None:
self.initial_price = m.get('initial_price')
if m.get('factory_time') is not None:
self.factory_time = m.get('factory_time')
if m.get('release_time') is not None:
self.release_time = m.get('release_time')
return self
class ImportPeripheralResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
chain_peripheral_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 链上外围设备Id
#
#
self.chain_peripheral_id = chain_peripheral_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.chain_peripheral_id is not None:
result['chain_peripheral_id'] = self.chain_peripheral_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('chain_peripheral_id') is not None:
self.chain_peripheral_id = m.get('chain_peripheral_id')
return self
class GetPeripheralBychainperipheralidRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
chain_peripheral_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 链上外围设备Id
self.chain_peripheral_id = chain_peripheral_id
def validate(self):
self.validate_required(self.chain_peripheral_id, 'chain_peripheral_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.chain_peripheral_id is not None:
result['chain_peripheral_id'] = self.chain_peripheral_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('chain_peripheral_id') is not None:
self.chain_peripheral_id = m.get('chain_peripheral_id')
return self
class GetPeripheralBychainperipheralidResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
peripheral: Peripheral = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 外围设备信息
self.peripheral = peripheral
def validate(self):
if self.peripheral:
self.peripheral.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.peripheral is not None:
result['peripheral'] = self.peripheral.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('peripheral') is not None:
temp_model = Peripheral()
self.peripheral = temp_model.from_map(m['peripheral'])
return self
class GetPeripheralByperipheralidRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
peripheral_id: str = None,
scene: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 外围设备ID
self.peripheral_id = peripheral_id
# 场景码
#
self.scene = scene
def validate(self):
self.validate_required(self.peripheral_id, 'peripheral_id')
self.validate_required(self.scene, 'scene')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.peripheral_id is not None:
result['peripheral_id'] = self.peripheral_id
if self.scene is not None:
result['scene'] = self.scene
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('peripheral_id') is not None:
self.peripheral_id = m.get('peripheral_id')
if m.get('scene') is not None:
self.scene = m.get('scene')
return self
class GetPeripheralByperipheralidResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
peripheral: Peripheral = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 外围设备信息
#
#
self.peripheral = peripheral
def validate(self):
if self.peripheral:
self.peripheral.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.peripheral is not None:
result['peripheral'] = self.peripheral.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('peripheral') is not None:
temp_model = Peripheral()
self.peripheral = temp_model.from_map(m['peripheral'])
return self
class ListPeripheralBysceneRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
scene: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 场景码
#
self.scene = scene
def validate(self):
self.validate_required(self.scene, 'scene')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.scene is not None:
result['scene'] = self.scene
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('scene') is not None:
self.scene = m.get('scene')
return self
class ListPeripheralBysceneResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
peripheral_list: List[Peripheral] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 外围设备信息列表
#
#
self.peripheral_list = peripheral_list
def validate(self):
if self.peripheral_list:
for k in self.peripheral_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['peripheral_list'] = []
if self.peripheral_list is not None:
for k in self.peripheral_list:
result['peripheral_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.peripheral_list = []
if m.get('peripheral_list') is not None:
for k in m.get('peripheral_list'):
temp_model = Peripheral()
self.peripheral_list.append(temp_model.from_map(k))
return self
class CreateDistributedeviceByperipheralidRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
corp_name: str = None,
peripheral_data_model_id: str = None,
peripheral_id: str = None,
peripheral_name: str = None,
scene: str = None,
device_type_code: int = | |
<gh_stars>1-10
# Post processing tools for PyRFQ bunch data
# <NAME> July 2019
import numpy as np
from scipy import constants as const
from scipy import stats
import h5py
import matplotlib.pyplot as plt
from temp_particles import IonSpecies
from dans_pymodules import FileDialog, ParticleDistribution
from bunch_particle_distribution import BunchParticleDistribution
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
import time
import pickle
import os
import hdbscan
__author__ = "<NAME>"
__doc__ = """Post processing utilities for bunches made using PyRFQ module"""
# Some constants
clight = const.value("speed of light in vacuum") # (m/s)
amu_kg = const.value("atomic mass constant") # (kg)
echarge = const.value("elementary charge")
DIHYDROGENMASS = 3.34615e-27
# Class for handling bunch related post processing actions
class BunchPostProcessor(object):
def __init__(self, rfq_freq, bunch_filename=None, sim_data_filename=None):
# bunch_filename: filename of the pickle dump with the bunch data in it
# sim_data_filename: filename of the hdf5 dump with the simulation data in it
self._rfq_end = None # end of the rfq
self._rfq_freq = rfq_freq # rfq frequency
self._bunch_filename = bunch_filename # Pickle file with bunch data
self._sim_data_filename = sim_data_filename # hdf5 file with simulation data
# Internal variables
self._bunch_found = False
self._velocity_calculated = False
self._velocity_array = []
self._zclose = self._rfq_end
self._zfar = None
self._particledistribution = None
self._distribution_data = None
self._z_ekin = None
self._z_phase = None
def find_bunch(self, max_steps, start_step, rfq_end, velocity_min_sample=7500, bunch_species_name=None, filename=None, step_interval=1, plot_bunch_name=None):
# Finds and dumps a bunch given hdf5 simulation dump
# max_steps: max # steps code should try to find a bunch
# start_step: step at which bunch finding should begin
# rfq_end: (m) value of the end of the rfq
# velocity_min_sample: minunum number of velocity samples to calculate wavelength
# bunch_species_name: name (string) of the species that should be used to find the bunch
# filename: hdf5 dump filename
# step_interval: interval at which the original simulation was run at (how many steps in between saved steps)
# plot_bunch_name: name of the species to plot after the bunch has been found. If None, will not plot
#
# if bunch_species_name is None, bunch will be found using all species.
#
self._rfq_end = rfq_end
self._zclose = self._rfq_end
if (self._sim_data_filename == None):
if(filename == None):
fd = FileDialog()
filename = fd.get_filename()
else:
filename = self._sim_data_filename
f = h5py.File(filename, 'r')
max_steps_find_bunch = start_step + max_steps
# Creating the list of species to measure a bunch from
# If passed in species name is None, then assume measure all species.
species_list = f['SpeciesList']
species_dict = {}
if (bunch_species_name is None):
for key in species_list:
species_dict[key] = tuple(species_list[key])
else:
species_dict[bunch_species_name] = tuple(species_list[bunch_species_name])
steps_only_list = list(f.keys())
steps_only_list.remove("SpeciesList")
step_array_int = np.array([int(elem[5:]) for elem in np.array(steps_only_list)])
# ensure start_step has data
if (start_step not in step_array_int):
if (start_step < step_array_int.min()):
print("Data has not started to be collected at that starting step yet. Exiting.")
exit()
if (start_step > step_array_int.max()):
print("Requested start step is past last data collected step. Exiting")
exit()
else:
print("Requested step is within collected data steps but not congruent with step interval. Finding nearest step as starting point")
idx = (np.abs(step_array_int - start_step)).argmin()
start_step = step_array_int[idx]
print("New start step: {}".format(start_step))
if (np.max(f["Step#"+str(start_step)]['z']) < self._rfq_end):
print("Particles have not yet reached the end of the RFQ. Abandoning bunch finding.")
return None
for i in range(0, max_steps):
# Only calls measure_bunch for steps with data in them
print("Currently at: {}".format((start_step+(i*step_interval))))
self.measure_bunch(f["Step#"+str((start_step)+i*step_interval)], velocity_min_sample=velocity_min_sample, species_dict=species_dict, plot_bunch_name=plot_bunch_name)
if (self._bunch_found):
break
def plot_bunch(self, xdata, ydata, velocity_data=None, mass=None):
# Plots the particles down the rfq with the found bunch highlighted
# xdata: data on the x axis
# ydata: data on the y axis
# velocity_data: z velocity of particles
# mass: mass of particles
# if mass == None, energy will not be displayed
fig, ax = plt.subplots(figsize = (20, 8))
ax.axvline(x=self._zclose, color='green')
ax.axvline(x=self._zfar, color='green')
# particlesall, = plt.plot([], [], 'bo', ms=0.5, color='blue')
# particlesbunch, = plt.plot([], [], 'bo', ms=0.5, color='red')
ax.set_xlim((-0.014, 1.6))
ax.set_ylim((-0.025, 0.025))
ax.set_xlabel('Z (m)')
xscale, xunits = 1e-2, 'cm'
ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x/xscale))
ax.yaxis.set_major_formatter(ticks_x)
ax.set_ylabel('X (' + xunits + ')')
ax.set_title('X by Z position down beam')
# particlesall.set_data(step_data['z'], step_data['x'])
# particlesbunch.set_data(bunch_particles['H2+']["z"], bunch_particles['H2+']["x"])
# print(np.max(bunch_particles['H2+']['z']))
fig.tight_layout(pad=5)
if mass == None:
particlesall = ax.scatter(np.array(xdata), np.array(ydata), s=0.5)
else:
# Energy calculation
# NOT RELATIVISTIC RIGHT NOW
c = 0.5 * mass * (np.square(np.array(velocity_data))) * 6.242e15
particlesall = ax.scatter(np.array(xdata), np.array(ydata), s=0.5, c=c, cmap='plasma')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='1%', pad=0.05)
clb = fig.colorbar(particlesall, cax=cax)
clb.set_label('Energy (keV)', rotation=-90)
plt.show()
def measure_bunch(self, step_data, velocity_min_sample=7500, species_dict=None, plot_bunch_name=None):
# Measures the particles to find bunch. Should not be called by user
# step_data from the dump
# species_dict: dictionary of species names
# plot_bunch_name: name of species to plot. If none, will not plot
if self._bunch_found:
return
zvelocities = np.array(step_data['vz'])
if not self._velocity_calculated:
# Gather velocities at end of RFQ to calculate wavelength
idx = []
if (len(species_dict.keys()) == 1): #only one species
step_zdata = np.array(step_data['z'])
step_mdata = np.array(step_data['m'])
step_qdata = np.array(step_data['q'])
# Extract indices where particles are within range of the end of the rfq,
# and the mass and charge match the specified species
key = list(species_dict.keys())[0]
desired_mass = species_dict[key][0]
desired_charge = species_dict[key][1]
for i in range(0, len(step_zdata)):
if step_zdata[i] < (self._rfq_end-0.01):
continue
if step_zdata[i] > (self._rfq_end+0.01):
continue
if step_mdata[i] != desired_mass:
continue
if step_qdata[i] != desired_charge:
continue
idx.append(i)
self._velocity_array = np.concatenate((self._velocity_array, zvelocities[idx]))
else: #all species
step_zdata = np.array(step_data['z'])
idx = np.where(np.logical_and(step_zdata>(self._rfq_end-0.01), step_zdata<(self._rfq_end+0.01)))
self._velocity_array = np.concatenate((self._velocity_array, zvelocities[idx]))
if (len(self._velocity_array) > velocity_min_sample): # if number of samples collected > defined minimum
average_velocity = np.mean(self._velocity_array) # calculate wavelength
wavelength = average_velocity / self._rfq_freq
self._velocity_calculated = True
self._zfar = self._zclose + wavelength
return
if self._velocity_calculated: # move on to looking for a bunch
z_positions = []
if (len(species_dict.keys()) == 1): # if only looking for bunch of one species
step_zdata = np.array(step_data['z'])
step_mdata = np.array(step_data['m'])
step_qdata = np.array(step_data['q'])
key = list(species_dict.keys())[0]
desired_mass = species_dict[key][0]
desired_charge = species_dict[key][1]
idx = []
for i in range(0, len(step_zdata)): # collect particles that are within wavelength, have the right mass
if step_zdata[i] < (self._zclose): # and the right charge for specified species
continue
if step_zdata[i] > (self._zfar):
continue
if step_mdata[i] != desired_mass:
continue
if step_qdata[i] != desired_charge:
continue
idx.append(i)
z_positions = step_zdata[idx]
else: # gather all particles
zpart = np.array(step_data['z'])
z_positions = [item for item in zpart if (self._zclose < item < self._zfar)]
print("Result: {}, Desired: {}".format(np.around(np.mean(z_positions), decimals=3), np.around((self._zfar + self._zclose) / 2, decimals=3)))
if (np.around(np.mean(z_positions), decimals=3) == (np.around(((self._zfar - self._zclose) / 2) + self._zclose, decimals=3))):
# If the mean z position of the particles within one wavelength is the same as the exact center (rounded to 3 decimals)
# a bunch has been found
self._bunch_found = True
step_zdata = np.array(step_data['z'])
bunchparticles_indices = np.where(np.logical_and(step_zdata>(self._zclose), step_zdata<(self._zfar)))
bunch_particles_x = np.array(step_data['x'])[bunchparticles_indices]
bunch_particles_y = np.array(step_data['y'])[bunchparticles_indices]
bunch_particles_z = np.array(step_data['z'])[bunchparticles_indices]
bunch_particles_vx = np.array(step_data['vx'])[bunchparticles_indices]
bunch_particles_vy = np.array(step_data['vy'])[bunchparticles_indices]
bunch_particles_vz = np.array(step_data['vz'])[bunchparticles_indices]
bunch_particles_xp = bunch_particles_vx / bunch_particles_vz
bunch_particles_yp = bunch_particles_vy / bunch_particles_vz
bunch_particles_m = np.array(step_data['m'])[bunchparticles_indices]
bunch_particles_q = np.array(step_data['q'])[bunchparticles_indices]
bunch_particles = {}
for key in species_dict.keys(): # label them by species
bunch_particles[key] = {}
idx = []
step_mdata = np.array(step_data['m'])
step_qdata = np.array(step_data['q'])
desired_mass = species_dict[key][0]
desired_charge = species_dict[key][1]
for i in range(0, len(bunchparticles_indices[0])):
if bunch_particles_m[i] == desired_mass:
if bunch_particles_q[i] == desired_charge:
idx.append(i)
bunch_particles[key]['x'] = bunch_particles_x[idx]
bunch_particles[key]['y'] = bunch_particles_y[idx]
bunch_particles[key]['z'] = bunch_particles_z[idx]
bunch_particles[key]['vx'] = bunch_particles_vx[idx]
bunch_particles[key]['vy'] = bunch_particles_vy[idx]
bunch_particles[key]['vz'] = bunch_particles_vz[idx]
bunch_particles[key]['xp'] = bunch_particles_xp[idx]
bunch_particles[key]['yp'] = bunch_particles_yp[idx]
i = 0
while os.path.exists("bunch_particles.%s.dump" % i):
i += 1
pickle.dump(bunch_particles, open("bunch_particles.%s.dump" % i, "wb"))
print("Bunch found.")
if plot_bunch_name != None: # plot the desired species
step_zdata = np.array(step_data['z'])
step_mdata = np.array(step_data['m'])
step_qdata = np.array(step_data['q'])
key = list(species_dict.keys())[0]
desired_mass = species_dict[plot_bunch_name][0]
desired_charge = species_dict[plot_bunch_name][1]
idx = []
for i in range(0, len(step_zdata)):
if step_mdata[i] != desired_mass:
continue
if step_qdata[i] != desired_charge:
continue
idx.append(i)
self.plot_bunch(step_zdata[idx], np.array(step_data['x'])[idx], velocity_data=np.array(step_data['vz'])[idx], mass=desired_mass)
def test_cluster_detection(self, min_cluster_size, ion, z, vz):
# only plots bunch data with clusters highlighted, and main bunch shown to test if the
# cluster detection algorithm is working and if the user parameter "min_cluster_size" is
# appropriate
z_beta_rel = | |
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#
# FIXME:
#
# - command line method to discover installed capabiltiies; print
# each's __doc__
#
# - do not pass device--each function should gather it from target's
# tags
"""
.. _pos_multiroot:
Provisioning OS: partitioning schema for multiple root FSs per device
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Provisioning OS multiroot methodology partitions a system with
multiple root filesystems; different OSes are installed in each root
so it is fast to switch from one to another to run things in
automated fashion.
The key to the operation is that the server maintains a list of OS
images available to be rsynced to the target's filesystem. rsync can
copy straight or transmit only the minimum set of needed changes.
This also speeds up deployment of an OS to the root filesystems, as by
picking a root filesystem that has already installed one similar to
the one to be deployed (eg: a workstation vs a server version), the
amount of data to be transfered is greatly reduced.
Like this, the following scenario sorted from more to less data
transfer (and thus slowest to fastest operation):
- can install on an empty root filesystem: in this case a full
installation is done
- can refresh an existing root fileystem to the destination: some
things might be shared or the same and a partial transfer can be
done; this might be the case when:
- moving from a distro to another
- moving from one version to another of the same distro
- moving from one spin of one distro to another
- can update an existing root filesystem: in this case very little
change is done and we are just verifying nothing was unduly
modified.
.. _pos_multiroot_partsizes:
Partition Size specification
----------------------------
To simplify setup of targets, a string such as *"1:4:10:50"* is given
to denote the sizes of the different partitions:
- 1 GiB for /boot
- 4 GiB for swap
- 10 GiB for scratch (can be used for whatever the script wants, needs
to be formated/initialized before use)
- 50 GiB for multiple root partitions (until the disk size is exhausted)
"""
import operator
import os
import pprint
import random
import re
import Levenshtein
import commonl
from . import pos
from . import tc
# FIXME: deprecate that _device
def _disk_partition(target):
# we assume we are going to work on the boot device
device_basename = target.kws['pos_boot_dev']
device = "/dev/" + device_basename
target.shell.run('swapoff -a || true') # in case we autoswapped
# find device size (FIXME: Linux specific)
dev_info = target.pos.fsinfo_get_block(device_basename)
size_gb = int(dev_info['size']) / 1024 / 1024 / 1024
target.report_info("POS: %s is %d GiB in size" % (device, size_gb),
dlevel = 2)
partsizes = target.kws.get('pos_partsizes', None)
if partsizes == None:
raise tc.blocked_e(
"Can't partition target, it doesn't "
"specify pos_partsizes tag",
{ 'target': target } )
partsize_l = partsizes.split(":")
partsize_l = [ int(_partsize) for _partsize in partsize_l ]
boot_size = partsize_l[0]
swap_size = partsize_l[1]
scratch_size = partsize_l[2]
root_size = partsize_l[3]
# note we set partition #0 as boot
# Note we set the name of the boot partition; we use that later to
# detect the disk has a partitioning scheme we support. See above.
cmdline = """parted -a optimal -ms %(device)s unit GiB \
mklabel gpt \
mkpart primary fat32 0%% %(boot_size)s \
set 1 boot on \
name 1 %(boot_label_name)s \
mkpart primary linux-swap %(boot_size)s %(swap_end)s \
name 2 TCF-swap \
mkpart primary ext4 %(swap_end)s %(scratch_end)s \
name 3 TCF-scratch \
""" % dict(
boot_label_name = target._boot_label_name,
device = device,
boot_size = boot_size,
swap_end = boot_size + swap_size,
scratch_end = boot_size + swap_size + scratch_size,
)
offset = boot_size + swap_size + scratch_size
root_devs = [] # collect the root devices
pid = 4
while offset + root_size < size_gb:
cmdline += ' mkpart primary ext4 %d %d' % (offset, offset + root_size)
offset += root_size
root_devs.append(device_basename + target.kws['p_prefix']
+ "%d" % pid)
pid += 1
for count in range(3):
try:
target.shell.run(cmdline)
break
except tcfl.error_e as e:
if 'we have been unable to inform the kernel of the change' not in e.console_output:
raise
if count == 2:
raise
target.report_info("trying to re-partition again, partitions busy?")
target.pos.fsinfo_read(target._boot_label_name)
# format quick
for root_dev in root_devs:
target.property_set('pos_root_' + root_dev, "EMPTY")
# now format filesystems
#
# note we only format the system boot partition (1), the linux
# swap(2) and the linux scratch space (3)
boot_dev = device + target.kws['p_prefix'] + "1"
swap_dev = device + target.kws['p_prefix'] + "2"
home_dev = device + target.kws['p_prefix'] + "3"
# Note: use FAT vs VFAT: vfat name translation creates issues when
# doing long file names; fat32 does not have that problem.
target.shell.run("mkfs.fat -F32 -n TCF-BOOT " + boot_dev)
target.shell.run("mkswap -L tcf-swap " + swap_dev)
target.shell.run("mkfs.ext4 -FqL tcf-scratch " + home_dev)
def _rootfs_guess_by_image(target, image, boot_dev):
# Gave a None partition, means pick our own based on a guess. We
# know what image we want to install, so we will scan the all the
# target's root partitions (defined in tags/properties
# pos_root_XYZ) to see who has installed the most similar thing to
# image and use that (so it is faster to rsync it).
partl = {}
empties = []
# refresh target information FIXME: need a better method
target.rt = target.rtb.rest_tb_target_update(target.id)
for tag, value in target.rt.items():
if not tag.startswith("pos_root_"):
continue
dev_basename = tag.replace("pos_root_", "")
dev_name = "/dev/" + dev_basename
if value == 'EMPTY':
empties.append(dev_name)
else:
partl[dev_name] = value
target.report_info("POS: %s: empty partitions: %s"
% (boot_dev, " ".join(empties)), dlevel = 2)
target.report_info("POS: %s: imaged partitions: %s"
% (boot_dev,
" ".join([ i[0] + "|" + i[1]
for i in list(partl.items()) ])),
dlevel = 2)
if not partl and not empties:
# there were no pos_root_XYZ entries, so that means we are not
# initialized properly, reinit
target.report_info("POS: %s: no root partitions known, uninitialized?"
% boot_dev, dlevel = 1)
return None
# We don't have empties to spare, so choose one that is the most
# similar, to improve the transfer rate
#
# This prolly can be made more efficient, like collect least-used
# partition data? to avoid the situation where two clients keep
# imaging over each other when they could have two separate images
root_part_dev, score, check_empties, seed = pos.image_seed_match(partl, image)
if score == 0:
# none is a good match, find an empty one...if there are
# non empty, just any
if empties:
root_part_dev = random.choice(empties)
target.report_info("POS: picked up empty root partition %s"
% root_part_dev, dlevel = 2)
else:
root_part_dev = random.choice(list(partl.keys()))
target.report_info(
"POS: picked up random partition %s, because none of the "
"existing installed ones was a good match and there "
"are no empty ones" % root_part_dev, dlevel = 2)
elif check_empties and empties:
# This is for the case where image and seed have the same distro
# but different spin. We want to check our luck if there is an empty
# partition. If there isn't, we will just take the given one from
# pos.image_seed_match.
root_part_dev = random.choice(empties)
target.report_info("POS: picked up empty root partition %s"
% root_part_dev, dlevel = 2)
else:
target.report_info("POS: picked up root partition %s for %s "
"due to a %.02f similarity with %s"
% (root_part_dev, seed, score, seed), dlevel = 2)
return root_part_dev
def _rootfs_guess(target, image, boot_dev):
reason = "unknown issue"
for tries in range(3):
tries += 1
try:
target.report_info("POS: guessing partition device [%d/3]" % tries)
root_part_dev = _rootfs_guess_by_image(target, image, boot_dev)
if root_part_dev != None:
return root_part_dev
# we couldn't find a root partition device, which means the
# thing is trashed
target.report_info("POS: repartitioning because couldn't find "
"root partitions")
_disk_partition(target)
target.pos.fsinfo_read(target._boot_label_name)
except Exception as e:
reason = str(e)
if tries < 3:
target.report_info("POS: failed to guess a root partition, "
"retrying: %s" % reason)
continue
else:
raise
raise tc.blocked_e(
"Tried too much to reinitialize the partition table to "
"pick up a root partition? is there enough space to "
"create root partitions?",
dict(target = target, reason = reason,
partsizes = target.kws.get('pos_partsizes', None)))
def mount_fs(target, image, boot_dev):
"""
Boots a root filesystem on /mnt
The partition used as a root filesystem is picked up based on | |
str(tokens[pos])
pos += 1
(pos, rhs) = parse_assignment_expression(tokens, pos, ctx)
if rhs:
return (pos, BinaryOp(op, lhs, rhs))
pos = save_pos.pop()
# conditional-expression
(pos, expr) = parse_conditional_expression(tokens, pos, ctx)
if expr:
return (pos, expr)
return (pos, None)
def parse_expression(tokens, pos, ctx):
# assignment-expression
# expression , assignment-expression
pos, exprs = parse_list(tokens, pos, ctx, parse_assignment_expression)
if exprs:
if len(exprs) >= 2:
return (pos, CommaOp(exprs))
return (pos, exprs[0])
return (pos, None)
def parse_direct_abstract_declarator_head(tokens, pos, ctx):
save_pos = []
save_pos.append(pos)
if tokens[pos].value == "(":
pos += 1
(pos, decl) = parse_abstract_declarator(tokens, pos, ctx)
if decl:
if tokens[pos].value == ")":
pos += 1
return (pos, decl)
pos = save_pos.pop()
return (pos, None)
def parse_direct_abstract_declarator_tail(tokens, pos, ctx):
save_pos = []
save_pos.append(pos)
if tokens[pos].value == "[":
raise NotImplementedError()
pos = save_pos.pop()
save_pos.append(pos)
if tokens[pos].value == "(":
pos += 1
(pos, (param_type_list, is_variadic)) = parse_parameter_type_list(
tokens, pos, ctx)
if tokens[pos].value == ")":
pos += 1
return (pos, param_type_list)
pos = save_pos.pop()
return (pos, None)
def parse_direct_abstract_declarator(tokens, pos, ctx, declarator):
save_pos = []
# ( abstract-declarator )
# direct-abstract-declarator_opt [ type-qualifier-list_opt assignment-expression_opt ]
# direct-abstract-declarator_opt [ static type-qualifier-list_opt assignment-expression ]
# direct-abstract-declarator_opt [ type-qualifier-list static assignment-expression ]
# direct-abstract-declarator_opt [ * ]
# direct-abstract-declarator_opt ( parameter-type-list_opt )
save_pos.append(pos)
(pos, decl) = parse_direct_abstract_declarator_head(tokens, pos, ctx)
declarator.ident_or_decl = decl
if decl:
tails = []
while True:
(pos, tail) = parse_direct_abstract_declarator_tail(tokens, pos, ctx)
if not tail:
break
declarator.add_chunk(tail)
tails.append(tail)
return (pos, (decl, tails))
pos = save_pos.pop()
return (pos, None)
def parse_abstract_declarator(tokens, pos, ctx):
save_pos = []
# pointer
# pointer_opt direct-abstract-declarator
save_pos.append(pos)
if tokens[pos].value == "__cdecl":
pos += 1
(pos, pointer) = parse_pointer(tokens, pos, ctx)
if tokens[pos].value == "__cdecl":
pos += 1
declarator = Declarator()
(pos, direct_decl) = parse_direct_abstract_declarator(
tokens, pos, ctx, declarator)
if direct_decl:
if pointer:
direct_decl.pointer = pointer
return (pos, declarator)
if pointer:
declarator.pointer = pointer
return (pos, declarator)
pos = save_pos.pop()
return (pos, None)
def parse_parameter_declaration(tokens, pos, ctx):
# declaration-specifiers declarator
# declaration-specifiers abstract-declarator_opt
(pos, specs) = parse_declaration_specifiers(tokens, pos, ctx)
if specs:
(pos, decl) = parse_declarator(tokens, pos, ctx)
if decl:
return (pos, (specs, decl))
(pos, decl) = parse_abstract_declarator(tokens, pos, ctx)
return (pos, (specs, decl))
return (pos, None)
def parse_identifier_list(tokens, pos, ctx):
ident_list = []
while True:
(pos, ident) = parse_identifier(tokens, pos, ctx)
if not ident:
break
ident_list.append(ident)
if len(ident_list) > 0:
return (pos, ident_list)
return (pos, None)
class TypeQualifier(Flag):
Unspecified = 0
Const = auto()
Restrict = auto()
Volatile = auto()
Atomic = auto()
def type_qualifier_from_name(name):
if name == "const":
return TypeQualifier.Const
elif name == "restrict":
return TypeQualifier.Restrict
elif name == "volatile":
return TypeQualifier.Volatile
elif name == "_Atomic":
return TypeQualifier.Atomic
raise ValueError('{} is not a valid name'.format(name))
def parse_type_qualifier(tokens, pos, ctx):
if str(tokens[pos]) in ["const", "restrict", "volatile", "_Atomic"]:
value = type_qualifier_from_name(str(tokens[pos]))
return (pos + 1, value)
return (pos, None)
def parse_constant_expression(tokens, pos, ctx):
return parse_conditional_expression(tokens, pos, ctx)
def parse_designator(tokens, pos, ctx):
save_pos = []
# [ constant-expression ]
save_pos.append(pos)
if tokens[pos].value == "[":
pos += 1
(pos, expr) = parse_constant_expression(tokens, pos, ctx)
if expr:
if tokens[pos].value == "]":
pos += 1
return (pos, expr)
pos = save_pos.pop()
# . identifier
save_pos.append(pos)
if tokens[pos].value == ".":
pos += 1
(pos, ident) = parse_identifier(tokens, pos, ctx)
if ident:
return (pos, ident)
pos = save_pos.pop()
return (pos, None)
def parse_designator_list(tokens, pos, ctx):
# designator
# designator-list designator
designators = []
while True:
(pos, designator) = parse_designator(tokens, pos, ctx)
if not designator:
break
designators.append(designator)
if len(designators) > 0:
return (pos, designators)
return (pos, None)
def parse_designation(tokens, pos, ctx):
save_pos = []
# designator-list =
save_pos.append(pos)
(pos, designator_list) = parse_designator_list(tokens, pos, ctx)
if designator_list:
if tokens[pos].value == "=":
pos += 1
return (pos, designator_list)
pos = save_pos.pop()
return (pos, None)
def parse_designation_initializer(tokens, pos, ctx):
(pos, designation) = parse_designation(tokens, pos, ctx)
src = tokens[pos:]
(pos, init) = parse_initializer(tokens, pos, ctx)
if init:
return (pos, [designation, init])
return (pos, None)
def parse_initializer_list(tokens, pos, ctx):
# designation_opt initializer
# initializer-list , designation_opt initializer
(pos, lst) = parse_list(tokens, pos, ctx, parse_designation_initializer)
if lst:
return (pos, InitializerList(lst))
return (pos, None)
def parse_initializer(tokens, pos, ctx):
save_pos = []
# assignment-expression
(pos, expr) = parse_assignment_expression(tokens, pos, ctx)
if expr:
return (pos, expr)
# { initializer-list }
# { initializer-list , }
save_pos.append(pos)
if tokens[pos].value == "{":
pos += 1
(pos, expr) = parse_initializer_list(tokens, pos, ctx)
if expr:
if tokens[pos].value == ",":
pos += 1
if tokens[pos].value == "}":
pos += 1
return (pos, expr)
raise Exception()
pos = save_pos.pop()
return (pos, None)
def parse_init_declarator(tokens, pos, ctx):
save_pos = []
# declarator
# declarator = initializer
(pos, decl) = parse_declarator(tokens, pos, ctx)
if decl:
save_pos.append(pos)
if tokens[pos].value == "=":
pos += 1
(pos, init) = parse_initializer(tokens, pos, ctx)
return (pos, [decl, init])
pos = save_pos.pop()
return (pos, (decl, None))
return (pos, None)
def parse_init_declarator_list(tokens, pos, ctx):
# init-declarator
# init-declarator-list , init-declarator
return parse_list(tokens, pos, ctx, parse_init_declarator)
def parse_struct_or_union(tokens, pos, ctx):
if tokens[pos].value in ["struct", "union"]:
return (pos + 1, tokens[pos].value)
return (pos, None)
def parse_struct_or_union_specifier(tokens, pos, ctx):
save_pos = []
# struct-or-union identifier_opt { struct-declaration-list }
# struct-or-union identifier
save_pos.append(pos)
is_packed = False
if tokens[pos].value == "__packed":
is_packed = True
pos += 1
(pos, struct_or_union) = parse_struct_or_union(tokens, pos, ctx)
if struct_or_union:
is_union = struct_or_union == "union"
(pos, ident) = parse_identifier(tokens, pos, ctx)
save_pos.append(pos)
if tokens[pos].value == "{":
pos += 1
(pos, decls) = parse_struct_declaration_list(tokens, pos, ctx)
if decls:
if tokens[pos].value == "}":
pos += 1
return (pos, RecordDecl(ident, decls, is_union, is_packed))
pos = save_pos.pop()
if ident:
return (pos, RecordDecl(ident, None, is_union, is_packed))
pos = save_pos.pop()
return (pos, None)
def parse_struct_declaration_list(tokens, pos, ctx):
# struct-declaration
# struct-declaration-list struct-declaration
decls = []
while True:
(pos, decl) = parse_struct_declaration(tokens, pos, ctx)
if not decl:
break
decls.append(decl)
if len(decls) > 0:
return (pos, decls)
return (pos, None)
def parse_struct_declaration(tokens, pos, ctx):
save_pos = []
# specifier-qualifier-list struct-declarator-list_opt ;
save_pos.append(pos)
(pos, spec_quals) = parse_specifier_qualifier_list(tokens, pos, ctx)
if spec_quals:
(pos, decls) = parse_struct_declarator_list(tokens, pos, ctx)
if tokens[pos].value == ";":
pos += 1
return (pos, FieldDecl(spec_quals, decls))
pos = save_pos.pop()
# static_assert-declaration
(pos, decl) = parse_static_assert_declaration(tokens, pos, ctx)
if decl:
return (pos, decl)
return (pos, None)
def parse_struct_declarator_list(tokens, pos, ctx):
# struct-declarator
# struct-declarator-list , struct-declarator
return parse_list(tokens, pos, ctx, parse_struct_declarator)
def parse_struct_declarator(tokens, pos, ctx):
save_pos = []
# declarator
# declarator_opt : constant-expression
(pos, decl) = parse_declarator(tokens, pos, ctx)
save_pos.append(pos)
if tokens[pos].value == ':':
pos += 1
(pos, const) = parse_constant_expression(tokens, pos, ctx)
if const:
return (pos, StructDecl(decl, const))
pos = save_pos.pop()
if decl:
return (pos, StructDecl(decl, None))
return (pos, None)
def parse_static_assert_declaration(tokens, pos, ctx):
if tokens[pos].value == "_Static_assert":
raise NotImplementedError()
return (pos, None)
class VarDecl(Node):
def __init__(self, qual_spec, decls):
self.qual_spec = qual_spec
self.decls = decls
def __len__(self):
return 1
def __getitem__(self, idx):
if idx > 1:
raise IndexError("idx")
return self.decls
def __setitem__(self, idx, value):
if idx > 1:
raise IndexError("idx")
self.decls = value
def parse_declaration(tokens, pos, ctx):
save_pos = []
# declaration-specifiers init-declarator-list_opt ;
save_pos.append(pos)
(pos, qual_spec) = parse_declaration_specifiers(tokens, pos, ctx)
if qual_spec:
(pos, decls) = parse_init_declarator_list(tokens, pos, ctx)
if tokens[pos].value == ";":
pos += 1
return (pos, VarDecl(qual_spec, decls))
pos = save_pos.pop()
# static_assert-declaration
(pos, decl) = parse_static_assert_declaration(tokens, pos, ctx)
if decl:
return (pos, decl)
return (pos, None)
def parse_declaration_statement(tokens, pos, ctx):
return parse_declaration(tokens, pos, ctx)
def parse_iteration_statement(tokens, pos, ctx):
save_pos = []
# while ( expression ) statement
save_pos.append(pos)
if tokens[pos].value == "while":
pos += 1
if tokens[pos].value == "(":
pos += 1
(pos, cond) = parse_expression(tokens, pos, ctx)
if cond:
if tokens[pos].value == ")":
pos += 1
(pos, stmt) = parse_statement(tokens, pos, ctx)
if stmt:
return (pos, WhileStmt(cond, stmt))
pos = save_pos.pop()
# do statement while ( expression ) ;
save_pos.append(pos)
if tokens[pos].value == "do":
pos += 1
(pos, stmt) = parse_statement(tokens, pos, ctx)
if stmt:
if tokens[pos].value == "while":
pos += 1
if tokens[pos].value == "(":
pos += 1
(pos, cond) = parse_expression(tokens, pos, ctx)
if cond:
if tokens[pos].value == ")":
pos += 1
if tokens[pos].value == ";":
pos += 1
return (pos, DoWhileStmt(cond, stmt))
pos = save_pos.pop()
# for ( declaration expression_opt ; expression_opt ) statement
save_pos.append(pos)
if tokens[pos].value == "for":
| |
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# author: <NAME>
## @package convert_functions
#helper functions for creating, transforming, and converting among messages,
#scipy matrices, and lists
from __future__ import division
import roslib
roslib.load_manifest('myBot')
import rospy
from geometry_msgs.msg import Transform, Pose, PoseStamped, Point, Point32, PointStamped, Vector3, Vector3Stamped, Quaternion, QuaternionStamped
from sensor_msgs.msg import PointCloud
from std_msgs.msg import Header
import tf.transformations
import tf
import numpy as np
import scipy
from sensor_msgs.msg import PointCloud, PointCloud2
from point_cloud import *
##convert a PointCloud or PointCloud2 to a 4xn scipy matrix (x y z 1)
def point_cloud_to_mat(point_cloud):
if type(point_cloud) == type(PointCloud()):
points = [[p.x, p.y, p.z, 1] for p in point_cloud.points]
elif type(point_cloud) == type(PointCloud2()):
points = [[p[0], p[1], p[2], 1] for p in read_points(point_cloud, field_names = 'xyz', skip_nans=True)]
else:
print "type not recognized:", type(point_cloud)
return None
points = scipy.matrix(points).T
return points
##convert a 4xn scipy matrix (x y z 1) to a PointCloud
def mat_to_point_cloud(mat, frame_id):
pc = PointCloud()
pc.header.frame_id = frame_id
for n in range(mat.shape[1]):
column = mat[:,n]
point = Point32()
point.x, point.y, point.z = column[0,0], column[1,0], column[2,0]
pc.points.append(point)
return pc
##transform a PointCloud or PointCloud2 to be a 4xn scipy matrix (x y z 1) in a new frame
def transform_point_cloud(tf_listener, point_cloud, frame):
points = point_cloud_to_mat(point_cloud)
transform = get_transform(tf_listener, point_cloud.header.frame_id, frame)
if transform == None:
return (None, None)
points = transform * points
return (points, transform)
##get the 4x4 transformation matrix from frame1 to frame2 from TF
def get_transform(tf_listener, frame1, frame2):
temp_header = Header()
temp_header.frame_id = frame1
temp_header.stamp = rospy.Time(0)
try:
tf_listener.waitForTransform(frame1, frame2, rospy.Time(0), rospy.Duration(5))
except tf.Exception, e:
rethrow_tf_exception(e, "tf transform was not there between %s and %s"%(frame1, frame2))
frame1_to_frame2 = tf_listener.asMatrix(frame2, temp_header)
return scipy.matrix(frame1_to_frame2)
def pose_to_mat(pose):
'''Convert a pose message to a 4x4 numpy matrix.
Args:
pose (geometry_msgs.msg.Pose): Pose rospy message class.
Returns:
mat (numpy.matrix): 4x4 numpy matrix
'''
quat = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]
pos = np.matrix([pose.position.x, pose.position.y, pose.position.z]).T
mat = np.matrix(tf.transformations.quaternion_matrix(quat))
mat[0:3, 3] = pos
return mat
def mat_to_pose(mat, transform = None):
'''Convert a homogeneous matrix to a Pose message, optionally premultiply by a transform.
Args:
mat (numpy.ndarray): 4x4 array (or matrix) representing a homogenous transform.
transform (numpy.ndarray): Optional 4x4 array representing additional transform
Returns:
pose (geometry_msgs.msg.Pose): Pose message representing transform.
'''
if transform != None:
mat = np.dot(transform, mat)
pose = Pose()
pose.position.x = mat[0,3]
pose.position.y = mat[1,3]
pose.position.z = mat[2,3]
quat = tf.transformations.quaternion_from_matrix(mat)
pose.orientation.x = quat[0]
pose.orientation.y = quat[1]
pose.orientation.z = quat[2]
pose.orientation.w = quat[3]
return pose
def transform_to_mat(transform):
'''Convert a tf transform to a 4x4 scipy mat.
Args:
transform (geometry_msgs.msg.Transform): Transform rospy message class.
Returns:
mat (numpy.matrix): 4x4 numpy matrix
'''
quat = [transform.rotation.x, transform.rotation.y, transform.rotation.z, transform.rotation.w]
pos = scipy.matrix([transform.translation.x, transform.translation.y, transform.translation.z]).T
mat = scipy.matrix(tf.transformations.quaternion_matrix(quat))
mat[0:3, 3] = pos
return mat
def mat_to_transform(mat, transform = None):
'''Convert a 4x5 scipy matrix to a transform message.
Args:
mat (numpy.matrix): 4x4 numpy matrix
transform (numpy.ndarray): Optional 4x4 array representing additional transform
Returns:
transform (geometry_msgs.msg.Transform): Transform rospy message class.
'''
if transform != None:
mat = transform * mat
trans = np.array(mat[:3,3] / mat[3,3]).flat
quat = tf.transformations.quaternion_from_matrix(mat)
pose = Transform(Vector3(*trans), Quaternion(*quat))
return pose
##convert a 4x4 scipy matrix to position and quaternion lists
def mat_to_pos_and_quat(mat):
quat = tf.transformations.quaternion_from_matrix(mat).tolist()
pos = mat[0:3,3].T.tolist()[0]
return (pos, quat)
##stamp a message by giving it a header with a timestamp of now
def stamp_msg(msg, frame_id):
msg.header.frame_id = frame_id
msg.header.stamp = rospy.Time.now()
##make a PoseStamped out of a Pose
def stamp_pose(pose, frame_id):
pose_stamped = PoseStamped()
stamp_msg(pose_stamped, frame_id)
pose_stamped.pose = pose
return pose_stamped
##make a Vector3Stamped out of a Vector3
def stamp_vector3(vector3, frame_id):
vector3_stamped = Vector3Stamped()
stamp_msg(vector3_stamped, frame_id)
vector3_stamped.vector = vector3
return vector3_stamped
##set x, y, and z fields with a list
def set_xyz(msg, xyz):
msg.x = xyz[0]
msg.y = xyz[1]
msg.z = xyz[2]
##set x, y, z, and w fields with a list
def set_xyzw(msg, xyzw):
set_xyz(msg, xyzw)
msg.w = xyzw[3]
##get x, y, and z fields in the form of a list
def get_xyz(msg):
return [msg.x, msg.y, msg.z]
##get x, y, z, and w fields in the form of a list
def get_xyzw(msg):
return [msg.x, msg.y, msg.z, msg.w]
##transform a poseStamped by a 4x4 scipy matrix
def transform_pose_stamped(pose_stamped, transform, pre_or_post = "post"):
#convert the Pose to a 4x3 scipy matrix
pose_mat = pose_to_mat(pose_stamped.pose)
#post-multiply by the transform
if pre_or_post == "post":
transformed_mat = pose_mat * transform
else:
transformed_mat = transform * pose_mat
(pos, quat) = mat_to_pos_and_quat(transformed_mat)
#create a new poseStamped
new_pose_stamped = PoseStamped()
new_pose_stamped.header = pose_stamped.header
new_pose_stamped.pose = Pose(Point(*pos), Quaternion(*quat))
return new_pose_stamped
##convert a pointStamped to a pos list in a desired frame
def point_stamped_to_list(tf_listener, point, frame):
#convert the pointStamped to the desired frame, if necessary
if point.header.frame_id != frame:
try:
tf_listener.waitForTransform(frame, point.header.frame_id, \
rospy.Time(0), rospy.Duration(5))
trans_point = tf_listener.transformPoint(frame, point)
except tf.Exception, e:
rethrow_tf_exception(e, "point_stamped_to_list: error in transforming point from " + point.header.frame_id + " to " + frame + " error msg: %s"%e)
else:
trans_point = point
#extract position as a list
pos = [trans_point.point.x, trans_point.point.y, trans_point.point.z]
return pos
##convert a Vector3Stamped to a rot list in a desired frame
def vector3_stamped_to_list(tf_listener, vector3, frame):
#convert the vector3Stamped to the desired frame, if necessary
if vector3.header.frame_id != frame:
try:
tf_listener.waitForTransform(frame, vector3.header.frame_id, \
rospy.Time(0), rospy.Duration(5))
trans_vector3 = tf_listener.transformVector3(frame, vector3)
except tf.Exception, e:
rethrow_tf_exception(e, "vector3_stamped_to_list: error in transforming point from " + vector3.header.frame_id + " to " + frame + " error msg: %s"%e)
else:
trans_vector3 = vector3
#extract vect as a list
vect = [trans_vector3.vector.x, trans_vector3.vector.y, trans_vector3.vector.z]
return vect
##convert a QuaternionStamped to a quat list in a desired frame
def quaternion_stamped_to_list(tf_listener, quaternion, frame):
#convert the QuaternionStamped to the desired frame, if necessary
if quaternion.header.frame_id != frame:
try:
tf_listener.waitForTransform(frame, quaternion.header.frame_id, \
rospy.Time(0), rospy.Duration(5))
trans_quat = tf_listener.transformQuaternion(frame, quaternion)
except tf.Exception, e:
rethrow_tf_exception(e, "quaternion_stamped_to_list: error in transforming point from " + quaternion.header.frame_id + " to " + frame + " error msg: %s"%e)
else:
trans_quat = quaternion
#extract quat as a list
quat = [trans_quat.quaternion.x, trans_quat.quaternion.y, trans_quat.quaternion.z, trans_quat.quaternion.w]
return quat
##change the frame of a Vector3Stamped
def change_vector3_stamped_frame(tf_listener, vector3_stamped, frame):
if vector3_stamped.header.frame_id != frame:
vector3_stamped.header.stamp = rospy.Time(0)
try:
tf_listener.waitForTransform(frame, vector3_stamped.header.frame_id,\
vector3_stamped.header.stamp, rospy.Duration(5))
trans_vector3 = tf_listener.transformVector3(frame, vector3_stamped)
except tf.Exception, e:
rethrow_tf_exception(e, "change_vector3_stamped: error in transforming vector3 from " + vector3_stamped.header.frame_id + " to " + frame + " error msg: %s"%e)
else:
trans_vector3 = vector3_stamped
return trans_vector3
##change the frame of a PoseStamped
def change_pose_stamped_frame(tf_listener, pose, frame):
#convert the PoseStamped to the desired frame, if necessary
if pose.header.frame_id != frame:
pose.header.stamp = rospy.Time(0)
try:
tf_listener.waitForTransform(frame, pose.header.frame_id, pose.header.stamp, rospy.Duration(5))
trans_pose = tf_listener.transformPose(frame, pose)
except tf.Exception, e:
rethrow_tf_exception(e, "change_pose_stamped_frame: error in transforming pose from " + pose.header.frame_id + " to " + frame + " error msg: %s"%e)
else:
trans_pose = pose
return trans_pose
##convert a PoseStamped to pos and rot (quaternion) lists in a desired frame
def pose_stamped_to_lists(tf_listener, pose, frame):
#change the frame, if necessary
trans_pose = change_pose_stamped_frame(tf_listener, pose, | |
<reponame>cogsys-tuebingen/uninas<gh_stars>10-100
from typing import Callable, Union
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from uninas.models.networks.abstract import AbstractNetwork
from uninas.models.networks.uninas.search import SearchUninasNetwork
from uninas.optimization.estimators.abstract import AbstractEstimator
from uninas.training.optimizers.abstract import WrappedOptimizer
from uninas.utils.shape import Shape
class BoardEntry:
def __init__(self, arc: [int], values: [float], input_data: torch.Tensor, sm_data: torch.Tensor):
self.arc = arc
self.values = None
self.input_data = None
self.sm_data = None
self.update(values, input_data, sm_data)
def update(self, values: [float], input_data: torch.Tensor, sm_data: torch.Tensor):
self.values = values
self.input_data = input_data
self.sm_data = sm_data
def __str__(self) -> str:
return '%s([%s], %s)' % (self.__class__.__name__, ', '.join(['%.3f' % v for v in self.values]), str(self.arc))
class Board:
_nds = NonDominatedSorting()
def __init__(self, max_size: int, track_tensors: bool, is_for_best=True):
self.max_size = max_size
self.track_tensors = track_tensors
self.is_for_best = is_for_best
self.entries = []
def get_entries(self) -> [BoardEntry]:
return self.entries
def get_entry_by_arc(self, arc: [int]) -> Union[BoardEntry, None]:
""" if an entry has the given architecture, return it """
for entry in self.entries:
if entry.arc == arc:
return entry
return None
def size(self) -> int:
""" number of current entries """
return len(self.entries)
def num_values(self) -> int:
""" num values that each entry has """
return len(self.entries[0].values)
def is_empty(self) -> bool:
return self.size() == 0
def is_full(self) -> bool:
return self.size() >= self.max_size
def is_overly_full(self) -> bool:
return self.size() > self.max_size
def remove_out_of_constraints(self, estimators: [AbstractEstimator]):
"""
remove all board entries that are out of constraints,
keep all those that violate the least constraints (preferably none)
"""
num = [sum([e.get_constraint_badness(e.evaluate_tuple(entry.arc)) for e in estimators])
for entry in self.entries]
if (len(num) > 0) and (len(estimators) > 0) and (min(num) != max(num)):
best = min(num)
to_remove = [i for i, n in enumerate(num) if n > best]
for n in sorted(to_remove, reverse=True):
del self.entries[n]
def _get_worst_entry(self) -> (int, int):
"""
get the index+rank of worst dominated candidate
(if the board tracks pareto best entries, otherwise give index+rank of a pareto-best candidate)
"""
all_values = np.zeros(shape=(self.size(), self.num_values()))
for i, e in enumerate(self.entries):
all_values[i] = e.values
_, rank = self._nds.do(all_values, return_rank=True)
idx = int(np.argmax(rank) if self.is_for_best else np.argmin(rank))
return idx, rank[idx]
def update_board(self, arc: [int], values: [float], match_batch_size: int,
inputs: torch.Tensor, outputs: [torch.Tensor], teacher_outputs: [torch.Tensor] = None):
"""
maybe add a path to the board
:param arc: architecture of the current candidate
:param values: metric values of the current candidate, smaller is always better
:param match_batch_size: batch size of saved input+output
:param inputs: inputs with which the metrics were measured
:param outputs: outputs with which the metrics were measured
:param teacher_outputs: outputs of the teacher of the current architecture
"""
saved_inputs, saved_outputs = None, None
if self.track_tensors:
saved_inputs = inputs[:match_batch_size].detach().clone()
o = outputs if (self.is_empty() or teacher_outputs is None) else teacher_outputs
saved_outputs = F.softmax(o[-1][:match_batch_size].detach().clone(), 1)
# if an entry with the given architecture already exists, just update its values
entry = self.get_entry_by_arc(arc)
if entry is None:
entry = BoardEntry(arc, values, saved_inputs, saved_outputs)
self.entries.append(entry)
else:
entry.update(values, saved_inputs, saved_outputs)
self.entries = sorted(self.entries, key=lambda e: e.values[0])
if self.is_overly_full():
# try to remove a (by now) dominated solution
# if none is found (all entries are pareto-best, rank == 0), remove the one with worst value1
to_delete, rank = self._get_worst_entry()
if (rank == 0) and self.is_for_best:
self.entries = sorted(self.entries, key=lambda e: e.values[0], reverse=False)
to_delete = -1
del self.entries[to_delete]
class PrioritizedMatchingBoard(nn.Module):
def __init__(self, board_size: int, grace_epochs: int, select_strategy: str, select_update_iter: int,
label_shape: Shape, match_batch_size: int, average_mmn_batches=False,
mmn_batch_size=-1, clip_grad_norm_value=1,
matching_net: Union[nn.Module, AbstractNetwork, None] = None):
"""
a board of prioritized paths for direct NAS,
also directly includes the meta weight updating
based on:
https://arxiv.org/pdf/2010.15821.pdf
https://github.com/microsoft/cream
:param board_size: number of entries (which are assumed pareto-optimal after some updates)
:param grace_epochs: epochs passed before filling the board
:param select_strategy: how to sample a teacher model
value1: always pick the one with the highest value1
random: always pick a random path
l1: pick the most similar architecture by number of identical paths
meta: pick the best match by output similarity
:param select_update_iter: if the update strategy is 'meta', update the matching every n batches
:param label_shape:
:param match_batch_size: mini-batch size for student-teacher matching
:param average_mmn_batches: whether to average the MMN results across the batches, or concat the inputs
:param mmn_batch_size: mini-batch size for the training of the matching network
:param clip_grad_norm_value: clip the probing step
:param matching_net: matching network, use a simple nn.Linear if required but not given
"""
super().__init__()
self.grace_epochs = grace_epochs
self.select_strategy = select_strategy
self.select_update_iter = select_update_iter
self.match_batch_size = match_batch_size
self.average_mmn_batches = average_mmn_batches
self.mmn_batch_size = mmn_batch_size
self.clip_grad_norm_value = clip_grad_norm_value
self.matching_net = None
if select_strategy == "meta":
assert match_batch_size * 2 <= mmn_batch_size,\
"the MMN batch size (%d) must be at least twice as big as the match batch size (%d)"\
% (mmn_batch_size, match_batch_size)
size = label_shape.numel() if average_mmn_batches else label_shape.numel() * match_batch_size
if matching_net is None:
self.matching_net = nn.Linear(size, 1)
elif isinstance(matching_net, AbstractNetwork):
self.matching_net = matching_net
self.matching_net.build(Shape([size]), Shape([1]))
elif isinstance(matching_net, nn.Module):
self.matching_net = matching_net
out = self.matching_net(torch.zeros(size=[1, size]))
assert out.shape == torch.Size([1, 1])
else:
raise NotImplementedError("can not handle matching net of type %s" % type(matching_net))
self.pareto_best = Board(board_size, track_tensors=(select_strategy == 'meta'), is_for_best=True)
self.pareto_worst = Board(board_size, track_tensors=(select_strategy == 'meta'), is_for_best=False)
def forward(self, model_out: torch.Tensor, teacher_out: torch.Tensor) -> torch.Tensor:
x = model_out - teacher_out
y = self.matching_net(x if self.average_mmn_batches else x.view(1, -1))
# networks may have multiple heads/outputs, only care for the final one
if isinstance(y, list):
y = y[-1]
# possibly average over multiple
if self.average_mmn_batches:
y = y.mean(dim=0)
return y.squeeze()
def get_pareto_best(self) -> Board:
return self.pareto_best
def get_pareto_worst(self) -> Board:
return self.pareto_worst
def is_in_grace_time(self, epoch: int) -> bool:
return epoch < self.grace_epochs
def select_teacher(self, net: SearchUninasNetwork, arc: [int]) -> (float, [int]):
"""
select a teacher architecture
returns a matching_value in [0, 1] for the loss, and the teacher architecture
"""
entries = self.pareto_best.get_entries()
# the closest entry according to the meta matching network
if self.select_strategy == 'meta':
matching_value, teacher_arc = -1000000000, None
for entry in entries:
net.forward_strategy(fixed_arc=arc)
out = net(entry.input_data)
out = F.softmax(out[-1], dim=1)
weight = self(out, entry.sm_data)
if weight > matching_value:
matching_value = weight
teacher_arc = entry.arc
return torch.sigmoid(matching_value), teacher_arc
# any of the entries maximizing the number of same arc choices
if self.select_strategy == 'l1':
n_all = [sum([a0 == a1 for a0, a1 in zip(arc, entry.arc)]) for entry in entries]
n_best = max(n_all)
best = [entries[i] for i, n in enumerate(n_all) if n == n_best]
return 0.5, random.choice(best).arc
# the entry that maximizes value1
if self.select_strategy == 'value1':
return 0.5, entries[0].arc
# a random entry
if self.select_strategy == 'random':
return 0.5, random.choice(entries).arc
raise NotImplementedError
def update_board(self, epoch: int, arc: [int], values: [float],
inputs: torch.Tensor, outputs: [torch.Tensor], teacher_outputs: [torch.Tensor] = None):
"""
maybe add a path to the board
:param epoch: current epoch
:param arc: architecture of the current candidate
:param values: metric values of the current candidate, smaller is always better
:param inputs: inputs with which the metrics were measured
:param outputs: outputs with which the metrics were measured
:param teacher_outputs: outputs of the teacher of the current architecture
"""
self.pareto_best.update_board(arc, values, match_batch_size=self.match_batch_size,
inputs=inputs, outputs=outputs, teacher_outputs=teacher_outputs)
self.pareto_worst.update_board(arc, values, match_batch_size=self.match_batch_size,
inputs=inputs, outputs=outputs, teacher_outputs=teacher_outputs)
def remove_out_of_constraints(self, estimators: [AbstractEstimator]):
"""
remove all board entries that are out of constraints,
keep all those that violate the least constraints (preferably none)
"""
self.pareto_best.remove_out_of_constraints(estimators)
self.pareto_worst.remove_out_of_constraints(estimators)
@classmethod
def _get_student_loss(cls, inputs: torch.Tensor, net: SearchUninasNetwork,
arc: [int], teacher_arc: [int], meta_value: float, loss_fn: Callable) -> torch.Tensor:
net.forward_strategy(fixed_arc=arc)
logits = net(inputs)
with torch.no_grad():
net.forward_strategy(fixed_arc=teacher_arc)
teacher_logits = net(inputs)
soft_target = F.softmax(teacher_logits[-1], dim=1)
return meta_value * loss_fn(logits, soft_target)
def _get_valid_loss(self, inputs: torch.Tensor, targets: torch.Tensor, net: SearchUninasNetwork,
arc: [int], loss_fn: Callable) -> torch.Tensor:
x = inputs[self.match_batch_size:self.match_batch_size * 2].clone()
y = targets[self.match_batch_size:self.match_batch_size * 2]
assert x.shape[0] > 1, "too small MMN batch size for slice, %s, %s, %s" % (inputs.shape, x.shape, y.shape)
net.forward_strategy(fixed_arc=arc)
return loss_fn(net(x), y)
@classmethod
def _get_mmn_grads(cls, valid_loss: torch.Tensor, params_net: [nn.Parameter], params_mmn: [nn.Parameter],
one_student_weight: torch.Tensor) -> [torch.Tensor]:
""" compute the 2nd order loss for the meta matching network's parameters """
grads1_student = torch.autograd.grad(valid_loss, params_net, retain_graph=True, allow_unused=True)
return torch.autograd.grad(one_student_weight, params_mmn, grad_outputs=grads1_student)
def | |
<filename>geoscilabs/seismic/syntheticSeismogram.py
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from ipywidgets import interact, interactive, IntSlider, widget, FloatText, FloatSlider
def getPlotLog(d, log, dmax=200):
d = np.array(d, dtype=float)
log = np.array(log, dtype=float)
dplot = np.kron(d, np.ones(2))
logplot = np.kron(log, np.ones(2))
# dplot = dplot[1:]
dplot = np.append(dplot[1:], dmax)
return dplot, logplot
def getImpedance(rholog, vlog):
"""
Acoustic Impedance is the product of density and velocity
$$
Z = \\rho v
$$
"""
rholog, vlog = np.array(rholog, dtype=float), np.array(vlog, dtype=float)
return rholog * vlog
def getReflectivity(d, rho, v, usingT=True):
"""
The reflection coefficient of an interface is
$$
R_i = \\frac{Z_{i+1} - Z_{i}}{Z_{i+1}+Z_{i}}
$$
The reflectivity can also include the effect of transmission through above layers, in which case the reflectivity is given by
$$
\\text{reflectivity} = R_i \\pi_{j = 1}^{i-1}(1-R_j^2)
$$
"""
Z = getImpedance(rho, v) # acoustic impedance
dZ = Z[1:] - Z[:-1]
sZ = Z[:-1] + Z[1:]
R = dZ / sZ # reflection coefficients
nlayer = len(v) # number of layers
rseries = R
if usingT:
for i in range(nlayer - 1):
rseries[i + 1 :] = rseries[i + 1 :] * (1.0 - R[i] ** 2)
rseries = np.array(rseries, dtype=float)
R = np.array(R, dtype=float)
return rseries, R
def getTimeDepth(d, v, dmax=200):
"""
The time depth conversion is computed by determining the two-way travel time for a reflection from a given depth.
"""
d = np.sort(d)
d = np.append(d, dmax)
twttop = 2.0 * np.diff(d) / v # 2-way travel time within each layer
twttop = np.append(0.0, twttop)
twttop = np.cumsum(twttop) # 2-way travel time from surface to top of each layer
return d, twttop
def getLogs(d, rho, v, usingT=True):
"""
Function to make plotting convenient
"""
dpth, rholog = getPlotLog(d, rho)
_, vlog = getPlotLog(d, v)
zlog = getImpedance(rholog, vlog)
rseries, _ = getReflectivity(d, rho, v, usingT)
return dpth, rholog, vlog, zlog, rseries
def syntheticSeismogram(
d, rho, v, wavf, wavA=1.0, usingT=True, wavtyp="RICKER", dt=0.0001, dmax=200
):
"""
function syntheticSeismogram(d, rho, v, wavtyp, wavf, usingT)
syntheicSeismogram generates a synthetic seismogram for
a simple 1-D layered model.
Inputs:
d : depth to the top of each layer (m)
rho : density of each layer (kg/m^3)
v : velocity of each layer (m/s)
The last layer is assumed to be a half-space
wavf : wavelet frequency
wavA : wavelet amplitude
usintT : using Transmission coefficients?
wavtyp : type of Wavelet
The wavelet options are:
Ricker: takes one frequency
Gaussian: still in progress
Ormsby: takes 4 frequencies
Klauder: takes 2 frequencies
usingT : use transmission coefficients?
<NAME>
<EMAIL>
Created: November 30, 2013
Modified: October 3, 2014
"""
v, rho, d = (
np.array(v, dtype=float),
np.array(rho, dtype=float),
np.array(d, dtype=float),
)
usingT = np.array(usingT, dtype=bool)
_, t = getTimeDepth(d, v, dmax)
rseries, R = getReflectivity(d, rho, v)
# time for reflectivity series
tref = t[1:-1]
# create time vector
t = np.arange(t.min(), t.max(), dt)
# make wavelet
twav = np.arange(-2.0 / np.min(wavf), 2.0 / np.min(wavf), dt)
# Get source wavelet
wav = {"RICKER": getRicker, "ORMSBY": getOrmsby, "KLAUDER": getKlauder}[wavtyp](
wavf, twav
)
wav = wavA * wav
rseriesconv = np.zeros(len(t))
for i in range(len(tref)):
index = np.abs(t - tref[i]).argmin()
rseriesconv[index] = rseries[i]
# Do the convolution
seis = np.convolve(wav, rseriesconv)
tseis = np.min(twav) + dt * np.arange(len(seis))
index = np.logical_and(tseis >= 0, tseis <= np.max(t))
tseis = tseis[index]
seis = seis[index]
return tseis, seis, twav, wav, tref, rseries
## WAVELET DEFINITIONS
pi = np.pi
def getRicker(f, t):
"""
Retrieves a Ricker wavelet with center frequency f.
See: http://www.subsurfwiki.org/wiki/Ricker_wavelet
"""
# assert len(f) == 1, 'Ricker wavelet needs 1 frequency as input'
# f = f[0]
pift = pi * f * t
wav = (1 - 2 * pift ** 2) * np.exp(-(pift ** 2))
return wav
# def getGauss(f,t):
# assert len(f) == 1, 'Gauss wavelet needs 1 frequency as input'
# f = f[0]
def getOrmsby(f, t):
"""
Retrieves an Ormsby wavelet with low-cut frequency f[0], low-pass frequency f[1], high-pass frequency f[2] and high-cut frequency f[3]
See: http://www.subsurfwiki.org/wiki/Ormsby_filter
"""
assert len(f) == 4, "Ormsby wavelet needs 4 frequencies as input"
f = np.sort(f) # Ormsby wavelet frequencies must be in increasing order
pif = pi * f
den1 = pif[3] - pif[2]
den2 = pif[1] - pif[0]
term1 = (pif[3] * np.sinc(pif[3] * t)) ** 2 - (pif[2] * np.sinc(pif[2])) ** 2
term2 = (pif[1] * np.sinc(pif[1] * t)) ** 2 - (pif[0] * np.sinc(pif[0])) ** 2
wav = term1 / den1 - term2 / den2
return wav
def getKlauder(f, t, T=5.0):
"""
Retrieves a Klauder Wavelet with upper frequency f[0] and lower frequency f[1].
See: http://www.subsurfwiki.org/wiki/Ormsby_filter
"""
assert len(f) == 2, "Klauder wavelet needs 2 frequencies as input"
k = np.diff(f) / T
f0 = np.sum(f) / 2.0
wav = np.real(
np.sin(pi * k * t * (T - t)) / (pi * k * t) * np.exp(2 * pi * 1j * f0 * t)
)
return wav
## Plotting Functions
def plotLogFormat(log, dpth, xlim, col="blue"):
"""
Nice formatting for plotting logs as a function of depth
"""
ax = plt.plot(log, dpth, linewidth=2, color=col)
plt.xlim(xlim)
plt.ylim((dpth.min(), dpth.max()))
plt.grid()
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1], rotation="90", fontsize=9)
plt.setp(plt.yticks()[1], fontsize=9)
return ax
def plotLogs(d, rho, v, usingT=True):
"""
Plotting wrapper to plot density, velocity, acoustic impedance and reflectivity as a function of depth.
"""
d = np.sort(d)
dpth, rholog, vlog, zlog, rseries = getLogs(d, rho, v, usingT)
nd = len(dpth)
xlimrho = (1.95, 5.05)
xlimv = (0.25, 4.05)
xlimz = (xlimrho[0] * xlimv[0], xlimrho[1] * xlimv[1])
# Plot Density
plt.figure(1, figsize=(10, 5))
plt.subplot(141)
plotLogFormat(rholog * 10 ** -3, dpth, xlimrho, "blue")
plt.title("$\\rho$")
plt.xlabel("Density \n $\\times 10^3$ (kg /m$^3$)", fontsize=9)
plt.ylabel("Depth (m)", fontsize=9)
plt.subplot(142)
plotLogFormat(vlog * 10 ** -3, dpth, xlimv, "red")
plt.title("$v$")
plt.xlabel("Velocity \n $\\times 10^3$ (m/s)", fontsize=9)
plt.setp(plt.yticks()[1], visible=False)
plt.subplot(143)
plotLogFormat(zlog * 10.0 ** -6.0, dpth, xlimz, "green")
plt.gca().set_title("$Z = \\rho v$")
plt.gca().set_xlabel(
"Impedance \n $\\times 10^{6}$ (kg m$^{-2}$ s$^{-1}$)", fontsize=9
)
plt.setp(plt.yticks()[1], visible=False)
plt.subplot(144)
plt.hlines(d[1:], np.zeros(len(d) - 1), rseries, linewidth=2)
plt.plot(np.zeros(nd), dpth, linewidth=2, color="black")
plt.xlim((-1.0, 1.0))
if usingT is True:
plt.title("Reflectivity", fontsize=8.0)
plt.gca().set_xlabel("Reflectivity", fontsize=8.0)
else:
plt.title("Reflection Coeff.", fontsize=8.0)
plt.gca().set_xlabel("Reflection Coeff.", fontsize=8.0)
plt.grid()
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1], rotation="90", fontsize=9)
plt.setp(plt.yticks()[1], visible=False)
plt.tight_layout()
plt.show()
def plotTimeDepth(d, rho, v):
"""
Wrapper to plot time-depth conversion based on the provided velocity model
"""
rseries, _ = getReflectivity(d, rho, v, usingT=True)
dpth, t = getTimeDepth(d, v)
nd = len(dpth)
plt.figure(num=0, figsize=(10, 5))
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
ax1.hlines(d[1:], np.zeros(len(d) - 1), rseries, linewidth=2)
ax1.plot(np.zeros(nd), dpth, linewidth=2, color="black")
ax1.invert_yaxis()
ax1.set_xlim(-1, 1)
ax1.grid(True)
ax1.set_xlabel("Reflectivity")
ax1.set_ylabel("Depth (m)")
ax3.hlines(t[1:-1], np.zeros(len(d) - 1), rseries, linewidth=2)
ax3.plot(np.zeros(nd), t, linewidth=2, color="black")
# ax3.set_ylim(0., 0.28)
ax3.invert_yaxis()
ax3.set_xlim(-1, 1)
ax3.grid(True)
ax3.set_xlabel("Reflectivity")
ax3.set_ylabel("Two Way Time (s)")
ax2.plot(t, dpth, linewidth=2)
ax2.ticklabel_format(style="sci", axis="x", scilimits=(0, 0))
ax1.set_title("Depth")
ax2.set_title("Depth to Time")
ax3.set_title("Time")
ax2.grid()
ax2.set_ylabel("Depth (m)", fontsize=9)
ax2.set_xlabel("Two Way Time (s)", fontsize=9)
ax1.set_ylabel("Depth (m)", fontsize=9)
ax3.set_ylabel("Two Way Time (s)", fontsize=9)
plt.tight_layout()
plt.show()
def plotSeismogram(d, rho, v, wavf, wavA=1.0, noise=0.0, usingT=True, wavtyp="RICKER"):
"""
Plotting function to plot the wavelet, reflectivity series and seismogram as functions of time provided the geologic model (depths, densities, and velocities)
"""
tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(
d, rho, v, wavf, wavA, usingT, wavtyp
)
noise = noise * np.max(np.abs(seis)) * np.random.randn(seis.size)
filt = np.arange(1.0, 15.0)
filtr = filt[::-1]
filt = np.append(filt, filtr[1:]) * 1.0 / 15.0
noise = np.convolve(noise, filt)
noise = noise[0 : seis.size]
seis = seis + noise
plt.figure(num=0, figsize=(10, 5))
plt.subplot(131)
plt.plot(wav, twav, linewidth=1, color="black")
posind = wav > 0.0
plt.fill_between(wav[posind], twav[posind], np.zeros_like(wav[posind]), color="k")
plt.title("Wavelet")
plt.xlim((-2.0, 2.0))
plt.ylim((-0.2, 0.2))
majorytick = np.arange(-0.2, 0.3, 0.1)
minorytick = np.arange(-0.2, 0.21, 0.01)
plt.gca().set_yticks(majorytick)
plt.gca().set_yticks(minorytick, minor=True)
plt.gca().grid(True, which="major", axis="both", linewidth=1.5)
plt.gca().grid(True, which="minor", axis="y")
plt.ylim((tseis.min() - tseis.mean(), tseis.max() - tseis.mean()))
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1], rotation="90", fontsize=9)
plt.setp(plt.yticks()[1], fontsize=9)
plt.gca().set_xlabel("Amplitude", fontsize=9)
plt.gca().set_ylabel("Time (s)", fontsize=9)
plt.subplot(132)
plt.plot(
np.zeros(tref.size), (tseis.max(), tseis.min()), linewidth=2, color="black"
)
plt.hlines(
tref, np.zeros(len(rseriesconv)), rseriesconv, linewidth=2
) # ,'marker','none'
if usingT is True:
plt.title("Reflectivity")
else:
plt.title("Reflection Coeff.")
plt.grid()
plt.ylim((0, tseis.max()))
plt.gca().invert_yaxis()
plt.xlim((-2.0, 2.0))
plt.setp(plt.xticks()[1], rotation="90", fontsize=9)
plt.setp(plt.yticks()[1], fontsize=9)
plt.gca().set_xlabel("Amplitude", fontsize=9)
plt.gca().set_ylabel("Time (s)", fontsize=9)
plt.subplot(133)
| |
save_dir
self.save_RGBD_client.send_goal(goal)
self.save_RGBD_client.wait_for_result()
super_fast_speed = q = self.graspingParams["speed"]["super_fast"]
self.moveHome(speed=super_fast_speed)
def run_category_manipulation_pipeline(self):
self._clear_cache()
self.run_keypoint_detection()
self.run_category_manipulation_goal_estimation()
self.run_manipulate_object()
def visualize_poser_result(self):
"""
DEPRECATED (this code is best used from pdc_ros)
Visualize the poser output
"""
# debugging
if self.poser_result is None:
# use the default path for debugging purposes
path_to_poser_output = os.path.join(spartanUtils.get_sandbox_dir(), "poser")
else:
path_to_poser_output = os.path.join(spartanUtils.get_sandbox_dir(), self.poser_result.poser_output_folder)
self._poser_visualizer = PoserVisualizer(path_to_poser_output)
poser_response = self._poser_visualizer.load_poser_response()
self._poser_visualizer.visualize_result(poser_response)
def grasp_best_match(self):
assert self.best_match_result.match_found
best_match_location_msg = self.best_match_result.best_match_location
best_match_location = np.zeros(3)
best_match_location[0] = best_match_location_msg.x
best_match_location[1] = best_match_location_msg.y
best_match_location[2] = best_match_location_msg.z
# check that it is above table
min_pt = np.array([0.4, -0.357198029757, 0.0])
max_pt = np.array([0.822621226311, 0.3723, 0.5])
greater_than_min = (best_match_location > min_pt).all()
less_than_max = (best_match_location < max_pt).all()
if not (greater_than_min and less_than_max):
print "best match location is outside of workspace bounds"
print "best_match_location:", best_match_location
return False
print "requesting Grasp 3D location"
self.grasp_3D_location_request(best_match_location)
result = self.wait_for_grasp_3D_location_result()
print "received Grasp 3D Location Response"
print "result:\n", result
grasp_found = self.processGenerateGraspsResult(result)
if not grasp_found:
print "no grasp found, returning"
return False
print "attempting grasp"
return self.attemptGrasp(self.graspFrame)
def find_best_match_and_grasp_and_stow(self):
# find best match
result = self.findBestBatch()
if not result.match_found:
return False
# attempt grasp best match
grasp_successful = self.grasp_best_match()
if not grasp_successful:
self.gripperDriver.send_open_gripper_set_distance_from_current()
self.moveHome()
print "grasp attempt failed, resetting"
return False
# stow
stow_pose = self.graspingParams["poses"]["hand_to_human_right"]
# stow_pose = self.graspingParams["poses"]["stow_in_bin"]
self.pickupObject(stow=True, stow_pose=stow_pose)
def request_best_match(self):
goal = pdc_ros_msgs.msg.FindBestMatchGoal()
goal.rgbd_with_pose_list = self.list_rgbd_with_pose_msg
goal.camera_info = self.camera_info_subscriber.waitForNextMessage()
self.find_best_match_client.send_goal(goal)
self.moveHome()
# From: https://www.programcreek.com/python/example/99841/sensor_msgs.msg.PointCloud2
def pointcloud2_to_array(self, cloud_msg):
'''
Converts a rospy PointCloud2 message to a numpy recordarray
Assumes all fields 32 bit floats, and there is no padding.
'''
dtype_list = [(f.name, np.float32) for f in cloud_msg.fields]
cloud_arr = np.fromstring(cloud_msg.data, dtype_list)
return cloud_arr
return np.reshape(cloud_arr, (cloud_msg.height, cloud_msg.width))
def processGenerateGraspsResult(self, result):
"""
Takes the result of spartan_grasp and parses it into a usable form
:param result:
:return:
"""
print "num antipodal grasps = ", len(result.antipodal_grasps)
print "num volume grasps = ", len(result.volume_grasps)
if (len(result.antipodal_grasps) == 0) and (len(result.volume_grasps) == 0):
self.topGrasp = None
self._grasp_found = False
rospy.loginfo("no valid grasps found")
return False
if len(result.antipodal_grasps) > 0:
self._grasp_found = True
grasp_msg = result.antipodal_grasps[0]
print "top grasp was ANTIPODAL"
elif len(result.volume_grasps) > 0:
self._grasp_found = True
grasp_msg = result.volume_grasps[0]
print "top grasp was VOLUME"
self.topGrasp = grasp_msg
rospy.loginfo("-------- top grasp score = %.3f", self.topGrasp.score)
self.graspFrame = spartanUtils.transformFromROSPoseMsg(self.topGrasp.pose.pose)
self.rotateGraspFrameToAlignWithNominal(self.graspFrame)
return True
def make_grasp_data_from_spartan_grasp_result(self, result):
"""
Takes the result of spartan_grasp and parses it into a usable form
:param result:
:return: bool, GraspData
"""
print "num antipodal grasps = ", len(result.antipodal_grasps)
print "num volume grasps = ", len(result.volume_grasps)
if (len(result.antipodal_grasps) == 0) and (len(result.volume_grasps) == 0):
rospy.loginfo("no valid grasps found")
return False, False
if len(result.antipodal_grasps) > 0:
grasp_msg = result.antipodal_grasps[0]
type = "antipodal"
print "top grasp was ANTIPODAL"
elif len(result.volume_grasps) > 0:
grasp_msg = result.volume_grasps[0]
type = "volume"
print "top grasp was VOLUME"
rospy.loginfo("-------- top grasp score = %.3f", grasp_msg.score)
grasp_data = GraspData.from_spartan_grasp(grasp_msg)
grasp_data.data['type'] = type
# rotate the grasp to align with nominal
params = self.getParamsForCurrentLocation()
grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])
grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)
return True, grasp_data
def getIiwaLinkEEFrameFromGraspFrame(self, graspFrame):
return transformUtils.concatenateTransforms([self.iiwaLinkEEToGraspFrame, graspFrame])
def get_iiwa_link_ee_from_gripper_fingertip_frame(self, T_W__gripper_fingertip):
"""
:param T_gripper_fingertip__W: gripper fingertip to world transform
:return:
"""
return transformUtils.concatenateTransforms([self.T_gripper_fingertip__iiwa_link_ee, T_W__gripper_fingertip])
def moveToFrame(self, graspFrame, speed=None):
if speed is None:
speed = self.config['grasp_speed']
poseStamped = self.makePoseStampedFromGraspFrame(graspFrame)
return self.robotService.moveToCartesianPosition(poseStamped, speed)
def makePoseStampedFromGraspFrame(self, graspFrame):
"""
Make PoseStamped message for the end effector frame from a given grasp frame
:param graspFrame: vtkTransform of the gripper frame
:return : pose of the end-effector for that grasp frame location
:rtype : geometry_msgs/PoseStamped
"""
iiwaLinkEEFrame = self.getIiwaLinkEEFrameFromGraspFrame(graspFrame)
poseDict = spartanUtils.poseFromTransform(iiwaLinkEEFrame)
poseMsg = rosUtils.ROSPoseMsgFromPose(poseDict)
poseStamped = geometry_msgs.msg.PoseStamped()
poseStamped.pose = poseMsg
poseStamped.header.frame_id = "base"
return poseStamped
def make_ee_pose_stamped_from_grasp(self, T_W_gripper_fingertip):
"""
Make PoseStamped message for the end effector frame from a given grasp frame.
:param T_W_gripper_fingertip: The position of the tips of the fingers, move down 3 cm to get
:return : pose of the end-effector for that grasp frame location
:rtype : geometry_msgs/PoseStamped
"""
iiwaLinkEEFrame = self.get_iiwa_link_ee_from_gripper_fingertip_frame(T_W_gripper_fingertip)
poseDict = spartanUtils.poseFromTransform(iiwaLinkEEFrame)
poseMsg = rosUtils.ROSPoseMsgFromPose(poseDict)
poseStamped = geometry_msgs.msg.PoseStamped()
poseStamped.pose = poseMsg
poseStamped.header.frame_id = "base"
return poseStamped
def execute_grasp(self, grasp_data=None, close_gripper=True, use_cartesian_plan=True, stop_at_pre_grasp=False, push_in_distance=None, use_debug_speed=False, force_threshold_magnitude=None, ee_speed_m_s=0.05):
"""
Moves to pre-grasp frame, then grasp frame
attemps to close gripper if `close_gripper=True` was passed in
:return: bool (whether or not grasp was successful)
"""
if grasp_data is None:
grasp_data = self.state.grasp_data
if push_in_distance is None:
push_in_distance = self.graspingParams['grasp_push_in_distance']
gripper_width = grasp_data.grasp_inner_diameter
if gripper_width is not None:
gripper_driver_width = gripper_width + self.graspingParams['gripper_width_offset']
self.gripperDriver.sendGripperCommand(gripper_driver_width, force=20.0)
else:
self.gripperDriver.send_open_gripper_set_distance_from_current()
rospy.sleep(0.5) # wait for 0.5 for gripper to move
# compute the pre-grasp frame
pre_grasp_distance = self.graspingParams['pre_grasp_distance']
pre_grasp_frame_gripper = grasp_data.compute_pre_grasp_frame(distance=pre_grasp_distance)
pre_grasp_ee_pose_stamped = self.makePoseStampedFromGraspFrame(pre_grasp_frame_gripper)
# safety check
is_safe = (GraspData.grasp_frame_safety_check(grasp_data.grasp_frame) and GraspData.grasp_frame_safety_check(pre_grasp_frame_gripper))
if not is_safe:
self.state.set_status("SAFETY_CHECK_FAILED")
return False
# run the ik for moving to pre-grasp location
graspLocationData = self.graspingParams[self.state.graspingLocation]
above_table_pre_grasp = graspLocationData['poses']['above_table_pre_grasp']
pre_grasp_ik_response = self.robotService.runIK(pre_grasp_ee_pose_stamped,
seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
pre_grasp_pose = pre_grasp_ik_response.joint_state.position
if not pre_grasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# run the ik for moving to grasp location
# for now just do IK, otherwise use cartesian space plan with force guards
grasp_frame_ee_pose_stamped = self.makePoseStampedFromGraspFrame(grasp_data.grasp_frame)
grasp_ik_response = self.robotService.runIK(grasp_frame_ee_pose_stamped,
seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
grasp_pose = grasp_ik_response.joint_state.position
if not grasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# store for later use
self.state.cache['grasp_ik_response'] = grasp_ik_response
self.state.cache['pre_grasp_ik_response'] = pre_grasp_ik_response
# move to pre-grasp position
# we do this using a position trajectory
print "moving to pre-grasp"
pre_grasp_speed = self.graspingParams['speed']['pre_grasp']
#### debugging
speed = pre_grasp_speed
if use_debug_speed:
speed = DEBUG_SPEED
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
speed)
self.state.set_status("PRE_GRASP")
print "at pre-grasp pose"
if stop_at_pre_grasp:
return
if use_cartesian_plan:
# move to grasp position using compliant cartesian plan
move_forward_distance = pre_grasp_distance + push_in_distance
print "move_forward_distance", move_forward_distance
xyz_goal = move_forward_distance * np.array([1, 0, 0])
ee_frame_id = "iiwa_link_ee"
expressed_in_frame = ee_frame_id
cartesian_grasp_speed = self.graspingParams['speed']['cartesian_grasp']
cartesian_grasp_speed = ee_speed_m_s
cartesian_traj_goal = \
control_utils.make_cartesian_trajectory_goal(xyz_goal,
ee_frame_id,
expressed_in_frame,
speed=cartesian_grasp_speed)
# add force guards
# -z (gripper) direction in frame iiwa_link_ee,
if force_threshold_magnitude is None:
force_threshold_magnitude = self.graspingParams['force_threshold_magnitude']
force_vector = force_threshold_magnitude * np.array([-1, 0, 0])
force_guard = control_utils.make_force_guard_msg(force_vector)
cartesian_traj_goal.force_guard.append(force_guard)
action_client = self.robotService.cartesian_trajectory_action_client
action_client.send_goal(cartesian_traj_goal)
# wait for result
action_client.wait_for_result()
result = action_client.get_result()
grasp_data.data['cartesian_trajectory_result'] = result
print "Cartesian Trajectory Result\n", result
else:
# move to grasp pose using standard IK
speed = self.graspingParams['speed']['grasp']
if use_debug_speed:
speed = DEBUG_SPEED
self.robotService.moveToJointPosition(grasp_pose,
maxJointDegreesPerSecond=
speed)
# record current location of gripper (in world frame)
# before closing the gripper
pos, quat = self.get_transform("iiwa_link_ee", "base")
T_world_ee = transformUtils.transformFromPose(pos, quat)
T_world_grasp = transformUtils.concatenateTransforms([self.graspToIiwaLinkEE, T_world_ee])
self.state.cache['gripper_frame_at_grasp'] = T_world_grasp
has_object = False
if close_gripper:
print "closing gripper"
has_object = self.gripperDriver.closeGripper()
if has_object:
self.state.set_status("OBJECT_IN_GRIPPER")
print "object in gripper"
else:
self.state.set_status("GRASP_FAILED")
print "grasp failed"
return has_object
def execute_place(self, grasp_data=None, use_cartesian_plan=True):
if grasp_data is None:
grasp_data = self.state.grasp_data
# compute the pre-grasp frame
pre_grasp_distance = self.graspingParams['pre_grasp_distance']
pre_grasp_frame_gripper = grasp_data.compute_pre_grasp_frame(distance=pre_grasp_distance)
pre_grasp_ee_pose_stamped = self.makePoseStampedFromGraspFrame(pre_grasp_frame_gripper)
# run the ik for moving to pre-grasp location
graspLocationData = self.graspingParams[self.state.graspingLocation]
above_table_pre_grasp = graspLocationData['poses']['above_table_pre_grasp']
pre_grasp_ik_response = self.robotService.runIK(pre_grasp_ee_pose_stamped,
seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
pre_grasp_pose = pre_grasp_ik_response.joint_state.position
if not pre_grasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# run the ik for moving to grasp location
# for now just do IK, otherwise use cartesian space plan with force guards
grasp_frame_ee_pose_stamped = self.makePoseStampedFromGraspFrame(grasp_data.grasp_frame)
grasp_ik_response = self.robotService.runIK(grasp_frame_ee_pose_stamped,
seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
grasp_pose = grasp_ik_response.joint_state.position
if not grasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# store for later use
self.state.cache['grasp_ik_response'] = grasp_ik_response
self.state.cache['pre_grasp_ik_response'] = pre_grasp_ik_response
# move to pre-grasp position
# we do this using a position trajectory
print "moving to pre-grasp"
pre_grasp_speed = self.graspingParams['speed']['pre_grasp']
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pre_grasp_speed)
self.state.set_status("PRE_GRASP")
print "at pre-grasp pose"
if use_cartesian_plan:
# move to grasp position using compliant cartesian plan
push_distance = self.graspingParams['grasp_push_in_distance']
move_forward_distance = pre_grasp_distance + push_distance
print "move_forward_distance", move_forward_distance
xyz_goal = move_forward_distance * np.array([1, 0, 0])
ee_frame_id = "iiwa_link_ee"
expressed_in_frame = ee_frame_id
cartesian_grasp_speed = self.graspingParams['speed']['cartesian_grasp']
cartesian_traj_goal = \
control_utils.make_cartesian_trajectory_goal(xyz_goal,
ee_frame_id,
expressed_in_frame,
speed=cartesian_grasp_speed)
# add force guards
# -z (gripper) direction in frame iiwa_link_ee,
force_magnitude = self.graspingParams['force_threshold_magnitude']
force_vector = force_magnitude * np.array([-1, 0, 0])
force_guard = control_utils.make_force_guard_msg(force_vector)
cartesian_traj_goal.force_guard.append(force_guard)
action_client = self.robotService.cartesian_trajectory_action_client
action_client.send_goal(cartesian_traj_goal)
# wait for result
| |
inputs = inputs[:, :max_length, :]
# (num_layers * num_directions, batch, hidden_size):
hs = []
for rnn in self.rnns:
outputs, h1, h2 = rnn(inputs, mask=mask)
h = tf.stack([h1, h2])
hs.append(h)
inputs = outputs
_, B, H = get_shape_as_list(h)
h = tf.reshape(tf.stack(hs), [-1, B, H * 2])
return self.output_fn(outputs, h)
@property
def requires_length(self) -> bool:
return self._requires_length
class Reduction(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, inputs: List[tf.Tensor]) -> tf.Tensor:
pass
class ConcatReduction(Reduction):
def __init__(self, output_dims: List[int], axis=-1):
super().__init__()
self.axis = axis
self.output_dim = sum(output_dims)
def call(self, inputs: List[tf.Tensor]) -> tf.Tensor:
return tf.concat(values=inputs, axis=-1)
class ConcatSubtractReduction(Reduction):
"""This reduction assumes paired input and subtracts the two to get a distance
It is useful for training sentence encoders and is used, for example, in SentenceBERT
For this to work we assume that the inputs are paired, and subtract them
"""
def __init__(self, output_dims: List[int], axis=-1):
super().__init__()
self.axis = axis
self.output_dim = 3 * output_dims[0]
def call(self, inputs: List[tf.Tensor]) -> tf.Tensor:
sub = tf.abs(inputs[0] - inputs[1])
return tf.concat(values=[inputs[0], inputs[1], sub], axis=-1)
class SumReduction(Reduction):
def __init__(self, output_dims: List[int]):
super().__init__()
# We could actually project if we needed, or at least should validate
self.output_dim = output_dims[0]
def call(self, inputs: List[tf.Tensor]) -> tf.Tensor:
return tf.add_n(inputs)
class SumLayerNormReduction(Reduction):
def __init__(self, output_dims: List[int], layer_norm_eps: float = 1.0e-12):
super().__init__()
self.ln = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.output_dim = output_dims[0]
def call(self, inputs: List[tf.Tensor]) -> tf.Tensor:
outputs = tf.add_n(inputs)
return self.ln(outputs)
class EmbeddingsStack(tf.keras.layers.Layer):
def __init__(
self,
embeddings_dict: Dict[str, tf.keras.layers.Layer],
dropout_rate: float = 0.0,
requires_length: bool = False,
reduction: Optional[Union[str, tf.keras.layers.Layer]] = 'concat',
name: Optional[str] = None,
**kwargs,
):
"""Takes in a dictionary where the keys are the input tensor names, and the values are the embeddings
:param embeddings_dict: (``dict``) dictionary of each feature embedding
"""
super().__init__(name=name)
self.embeddings = embeddings_dict
output_dims = []
for embedding in embeddings_dict.values():
output_dims += [embedding.get_dsz()]
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self._requires_length = requires_length
if isinstance(reduction, str):
if reduction == 'sum':
self.reduction = SumReduction(output_dims)
elif reduction == 'sum-layer-norm':
self.reduction = SumLayerNormReduction(output_dims, layer_norm_eps=kwargs.get('layer_norm_eps', 1.0e-12))
else:
self.reduction = ConcatReduction(output_dims)
else:
self.reduction = reduction
self.dsz = self.reduction.output_dim
def keys(self):
return self.embeddings.keys()
def items(self):
return self.embeddings.items()
def __getitem__(self, item: str):
return self.embeddings[item]
def call(self, inputs: Dict[str, tf.Tensor]) -> tf.Tensor:
"""This method performs "embedding" of the inputs. The base method here then concatenates along depth
dimension to form word embeddings
:return: A 3-d vector where the last dimension is the concatenated dimensions of all embeddings
"""
all_embeddings_out = []
i = 0
for k, embedding in self.embeddings.items():
x = inputs[k]
# Its a hair faster to do this than using isinstance
if x.__class__ == tuple:
embeddings_out = embedding(*x)
else:
embeddings_out = embedding(x)
all_embeddings_out.append(embeddings_out)
i += 1
word_embeddings = self.reduction(all_embeddings_out)
return self.dropout(word_embeddings, TRAIN_FLAG())
#@property
#def dsz(self) -> int:
# total_dsz = 0
# for embeddings in self.embeddings.values():
# total_dsz += embeddings.get_dsz()
# return total_dsz
def keys(self):
return self.embeddings.keys()
@property
def requires_length(self) -> bool:
return self._requires_length
@property
def output_dim(self) -> bool:
return self.dsz
class WeightTieDense(tf.keras.layers.Layer):
def __init__(self, tied, name="weight-tied", use_bias=False):
super().__init__(name=name)
self.tied = tied
self.use_bias = use_bias
def _add_bias(self, W):
if self.use_bias:
self.bias = self.add_weight('bias', shape=[tf.shape(W)[0], ], initializer='zeros', regularizer=None,
constraint=None, dtype=self.W.dtype, trainable=True)
else:
self.bias = None
def build(self, input_shape):
emb = getattr(self.tied, "embedding_layer", None)
if emb is not None:
self._add_bias(getattr(emb, "W"))
super().build(input_shape)
return
W = getattr(self.tied, "W", None)
if W is not None:
self._add_bias(W)
super().build(input_shape)
return
self._add_bias(self.W)
super().build()
@property
def W(self):
w = getattr(self.tied, "W", None)
return w if w is not None else getattr(self.tied, "kernel")
def call(self, inputs):
shape = tf.shape(inputs)
inputs = tf.reshape(inputs, [-1, shape[-1]])
outs = tf.matmul(inputs, self.W, transpose_b=True)
if self.use_bias:
outs = tf.nn.bias_add(outs, self.bias)
new_shape = tf.concat([shape[:-1], tf.constant([-1])], axis=0)
return tf.reshape(outs, new_shape)
class DenseStack(tf.keras.layers.Layer):
def __init__(
self,
insz: Optional[int],
hsz: Union[int, List[int]],
activation: Union[tf.keras.layers.Activation, str, List[str]] = "relu",
pdrop_value: float = 0.5,
init: Optional[Any] = None,
name: Optional[str] = None,
skip_connect = False,
layer_norm = False,
**kwargs,
):
"""Stack 1 or more hidden layers, optionally (forming an MLP)
:param hsz: The number of hidden units
:param activation: The name of the activation function to use
:param pdrop_value: The dropout probability
:param init: The tensorflow initializer
"""
super().__init__(name=name)
hszs = listify(hsz)
self.output_dim = hszs[-1]
activations = listify(activation)
if len(activations) == 1:
activations = activations * len(hszs)
if len(activations) != len(hszs):
raise ValueError("Number of activations must match number of hidden sizes in a stack!")
if layer_norm:
layer_norm_eps = kwargs.get('layer_norm_eps', 1e-6)
if skip_connect:
if not insz:
raise ValueError("In order to use skip connection, insz must be provided in DenseStack!")
current = insz
layer_stack = []
for hsz, activation in zip(hszs, activations):
if skip_connect and current == hsz:
layer_stack.append(SkipConnection(hsz, activation))
current = hsz
else:
layer_stack.append(tf.keras.layers.Dense(hsz, activation))
if layer_norm:
layer_stack.append(tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps))
self.layer_stack = layer_stack
self.dropout = tf.keras.layers.Dropout(pdrop_value)
def call(self, inputs):
"""Stack 1 or more hidden layers, optionally (forming an MLP)
:param inputs: The fixed representation of the model
:param training: (``bool``) A boolean specifying if we are training or not
:param init: The tensorflow initializer
:param kwargs: See below
:Keyword Arguments:
* *hsz* -- (``int``) The number of hidden units (defaults to `100`)
:return: The final layer
"""
x = inputs
for layer in self.layer_stack:
x = layer(x)
x = self.dropout(x, TRAIN_FLAG())
return x
@property
def requires_length(self) -> bool:
return False
class WithDropout(tf.keras.layers.Layer):
"""This is a utility wrapper that applies dropout after executing the layer it wraps
For variational dropout, we use `SpatialDropout1D` as described in:
https://github.com/keras-team/keras/issues/7290
"""
def __init__(self, layer: tf.keras.layers.Layer, pdrop: float = 0.5, variational: bool = False):
super().__init__()
self.layer = layer
self.dropout = tf.keras.layers.SpatialDropout1D(pdrop) if variational else tf.keras.layers.Dropout(pdrop)
def call(self, inputs):
return self.dropout(self.layer(inputs), TRAIN_FLAG())
@property
def output_dim(self) -> int:
try:
return self.layer.output_dim
except:
return self.layer.units
class WithDropoutOnFirst(tf.keras.layers.Layer):
"""Wrapper for any layer that surrounds it with dropout
This exists primarily for the LSTMEncoderWithState to allow dropout on the output while
passing back the hidden state
For variational dropout, we use `SpatialDropout1D` as described in:
https://github.com/keras-team/keras/issues/7290
"""
def __init__(self, layer: tf.keras.layers.Layer, pdrop: float = 0.5, variational: bool = False):
super().__init__()
self.layer = layer
self.dropout = tf.keras.layers.SpatialDropout1D(pdrop) if variational else tf.keras.layers.Dropout(pdrop)
def call(self, inputs):
outputs = self.layer(inputs)
return self.dropout(outputs[0], TRAIN_FLAG()), outputs[1]
@property
def output_dim(self) -> int:
return self.layer.output_dim
class Highway(tf.keras.layers.Layer):
def __init__(self, input_size: int, name: Optional[str] = None, **kwargs):
super().__init__(name=name)
self.proj = tf.keras.layers.Dense(input_size, activation="relu")
self.transform = tf.keras.layers.Dense(
input_size, bias_initializer=tf.keras.initializers.Constant(value=-2.0), activation="sigmoid"
)
self.output_dim = input_size
def call(self, inputs):
proj_result = self.proj(inputs)
proj_gate = self.transform(inputs)
gated = (proj_gate * proj_result) + ((1 - proj_gate) * inputs)
return gated
@property
def requires_length(self):
return False
class ResidualBlock(tf.keras.layers.Layer):
def __init__(self, layer: Optional[tf.keras.layers.Layer] = None, name: Optional[str] = None, **kwargs):
super().__init__(name=name)
self.layer = layer
def call(self, inputs):
return inputs + self.layer(inputs)
@property
def requires_length(self) -> bool:
return False
class SkipConnection(ResidualBlock):
def __init__(self, input_size: int, activation: str = "relu"):
super().__init__(tf.keras.layers.Dense(input_size, activation=activation))
class TimeDistributedProjection(tf.keras.layers.Layer):
def __init__(self, num_outputs, name=None):
"""Set up a low-order projection (embedding) by flattening the batch and time dims and matmul
TODO: Avoid where possible, Dense should work in most cases
:param name: The name for this scope
:param num_outputs: The number of feature maps out
"""
super().__init__(True, name)
self.output_dim = num_outputs
self.W = None
self.b = None
def build(self, input_shape):
nx = int(input_shape[-1])
self.W = self.add_weight("W", [nx, self.output_dim])
self.b = self.add_weight("b", [self.output_dim], initializer=tf.constant_initializer(0.0))
super().build(input_shape)
def call(self, inputs):
"""Low-order projection (embedding) by flattening the batch and time dims and matmul
:param inputs: The input tensor
:return: An output tensor having the same dims as the input, except the last which is `output_dim`
"""
input_shape = get_shape_as_list(inputs)
collapse = tf.reshape(inputs, [-1, input_shape[-1]])
c = tf.matmul(collapse, self.W) + self.b
c = tf.reshape(c, input_shape[:-1] + [self.output_dim])
return c
def compute_output_shape(self, input_shape):
return input_shape[0], self.output_dim
@property
def requires_length(self) -> bool:
return False
class SequenceSequenceAttention(tf.keras.layers.Layer):
def __init__(self, hsz: Optional[int] = None, pdrop: float = 0.1, name: str = None):
super().__init__(name=name)
self.hsz = hsz
self.dropout = tf.keras.layers.Dropout(pdrop)
self.attn = None
def call(self, qkvm):
query, key, value, mask = qkvm
a = self._attention(query, key, mask)
self.attn = a
a = self.dropout(a, training=TRAIN_FLAG())
return self._update(a, value)
def _attention(self, queries, keys, mask=None):
pass
def _update(self, a, value):
"""Attention weights are applied for each value, but in | |
+ m.x835 >= -1)
m.c2396 = Constraint(expr= - m.b16 - m.b375 + m.x836 >= -1)
m.c2397 = Constraint(expr= - m.b16 - m.b376 + m.x837 >= -1)
m.c2398 = Constraint(expr= - m.b16 - m.b377 + m.x838 >= -1)
m.c2399 = Constraint(expr= - m.b16 - m.b378 + m.x839 >= -1)
m.c2400 = Constraint(expr= - m.b16 - m.b379 + m.x840 >= -1)
m.c2401 = Constraint(expr= - m.b16 - m.b380 + m.x841 >= -1)
m.c2402 = Constraint(expr= - m.b17 - m.b381 + m.x842 >= -1)
m.c2403 = Constraint(expr= - m.b17 - m.b382 + m.x843 >= -1)
m.c2404 = Constraint(expr= - m.b17 - m.b383 + m.x844 >= -1)
m.c2405 = Constraint(expr= - m.b17 - m.b384 + m.x845 >= -1)
m.c2406 = Constraint(expr= - m.b17 - m.b385 + m.x846 >= -1)
m.c2407 = Constraint(expr= - m.b17 - m.b386 + m.x847 >= -1)
m.c2408 = Constraint(expr= - m.b17 - m.b387 + m.x848 >= -1)
m.c2409 = Constraint(expr= - m.b17 - m.b388 + m.x849 >= -1)
m.c2410 = Constraint(expr= - m.b17 - m.b389 + m.x850 >= -1)
m.c2411 = Constraint(expr= - m.b17 - m.b390 + m.x851 >= -1)
m.c2412 = Constraint(expr= - m.b17 - m.b391 + m.x852 >= -1)
m.c2413 = Constraint(expr= - m.b17 - m.b392 + m.x853 >= -1)
m.c2414 = Constraint(expr= - m.b17 - m.b393 + m.x854 >= -1)
m.c2415 = Constraint(expr= - m.b17 - m.b394 + m.x855 >= -1)
m.c2416 = Constraint(expr= - m.b17 - m.b395 + m.x856 >= -1)
m.c2417 = Constraint(expr= - m.b17 - m.b396 + m.x857 >= -1)
m.c2418 = Constraint(expr= - m.b17 - m.b397 + m.x858 >= -1)
m.c2419 = Constraint(expr= - m.b17 - m.b398 + m.x859 >= -1)
m.c2420 = Constraint(expr= - m.b17 - m.b399 + m.x860 >= -1)
m.c2421 = Constraint(expr= - m.b17 - m.b400 + m.x861 >= -1)
m.c2422 = Constraint(expr= - m.b18 - m.b401 + m.x862 >= -1)
m.c2423 = Constraint(expr= - m.b18 - m.b402 + m.x863 >= -1)
m.c2424 = Constraint(expr= - m.b18 - m.b403 + m.x864 >= -1)
m.c2425 = Constraint(expr= - m.b18 - m.b404 + m.x865 >= -1)
m.c2426 = Constraint(expr= - m.b18 - m.b405 + m.x866 >= -1)
m.c2427 = Constraint(expr= - m.b18 - m.b406 + m.x867 >= -1)
m.c2428 = Constraint(expr= - m.b18 - m.b407 + m.x868 >= -1)
m.c2429 = Constraint(expr= - m.b18 - m.b408 + m.x869 >= -1)
m.c2430 = Constraint(expr= - m.b18 - m.b409 + m.x870 >= -1)
m.c2431 = Constraint(expr= - m.b18 - m.b410 + m.x871 >= -1)
m.c2432 = Constraint(expr= - m.b18 - m.b411 + m.x872 >= -1)
m.c2433 = Constraint(expr= - m.b18 - m.b412 + m.x873 >= -1)
m.c2434 = Constraint(expr= - m.b18 - m.b413 + m.x874 >= -1)
m.c2435 = Constraint(expr= - m.b18 - m.b414 + m.x875 >= -1)
m.c2436 = Constraint(expr= - m.b18 - m.b415 + m.x876 >= -1)
m.c2437 = Constraint(expr= - m.b18 - m.b416 + m.x877 >= -1)
m.c2438 = Constraint(expr= - m.b18 - m.b417 + m.x878 >= -1)
m.c2439 = Constraint(expr= - m.b18 - m.b418 + m.x879 >= -1)
m.c2440 = Constraint(expr= - m.b18 - m.b419 + m.x880 >= -1)
m.c2441 = Constraint(expr= - m.b18 - m.b420 + m.x881 >= -1)
m.c2442 = Constraint(expr= - m.b19 - m.b421 + m.x882 >= -1)
m.c2443 = Constraint(expr= - m.b19 - m.b422 + m.x883 >= -1)
m.c2444 = Constraint(expr= - m.b19 - m.b423 + m.x884 >= -1)
m.c2445 = Constraint(expr= - m.b19 - m.b424 + m.x885 >= -1)
m.c2446 = Constraint(expr= - m.b19 - m.b425 + m.x886 >= -1)
m.c2447 = Constraint(expr= - m.b19 - m.b426 + m.x887 >= -1)
m.c2448 = Constraint(expr= - m.b19 - m.b427 + m.x888 >= -1)
m.c2449 = Constraint(expr= - m.b19 - m.b428 + m.x889 >= -1)
m.c2450 = Constraint(expr= - m.b19 - m.b429 + m.x890 >= -1)
m.c2451 = Constraint(expr= - m.b19 - m.b430 + m.x891 >= -1)
m.c2452 = Constraint(expr= - m.b19 - m.b431 + m.x892 >= -1)
m.c2453 = Constraint(expr= - m.b19 - m.b432 + m.x893 >= -1)
m.c2454 = Constraint(expr= - m.b19 - m.b433 + m.x894 >= -1)
m.c2455 = Constraint(expr= - m.b19 - m.b434 + m.x895 >= -1)
m.c2456 = Constraint(expr= - m.b19 - m.b435 + m.x896 >= -1)
m.c2457 = Constraint(expr= - m.b19 - m.b436 + m.x897 >= -1)
m.c2458 = Constraint(expr= - m.b19 - m.b437 + m.x898 >= -1)
m.c2459 = Constraint(expr= - m.b19 - m.b438 + m.x899 >= -1)
m.c2460 = Constraint(expr= - m.b19 - m.b439 + m.x900 >= -1)
m.c2461 = Constraint(expr= - m.b19 - m.b440 + m.x901 >= -1)
m.c2462 = Constraint(expr= - m.b20 - m.b441 + m.x902 >= -1)
m.c2463 = Constraint(expr= - m.b20 - m.b442 + m.x903 >= -1)
m.c2464 = Constraint(expr= - m.b20 - m.b443 + m.x904 >= -1)
m.c2465 = Constraint(expr= - m.b20 - m.b444 + m.x905 >= -1)
m.c2466 = Constraint(expr= - m.b20 - m.b445 + m.x906 >= -1)
m.c2467 = Constraint(expr= - m.b20 - m.b446 + m.x907 >= -1)
m.c2468 = Constraint(expr= - m.b20 - m.b447 + m.x908 >= -1)
m.c2469 = Constraint(expr= - m.b20 - m.b448 + m.x909 >= -1)
m.c2470 = Constraint(expr= - m.b20 - m.b449 + m.x910 >= -1)
m.c2471 = Constraint(expr= - m.b20 - m.b450 + m.x911 >= -1)
m.c2472 = Constraint(expr= - m.b20 - m.b451 + m.x912 >= -1)
m.c2473 = Constraint(expr= - m.b20 - m.b452 + m.x913 >= -1)
m.c2474 = Constraint(expr= - m.b20 - m.b453 + m.x914 >= -1)
m.c2475 = Constraint(expr= - m.b20 - m.b454 + m.x915 >= -1)
m.c2476 = Constraint(expr= - m.b20 - m.b455 + m.x916 >= -1)
m.c2477 = Constraint(expr= - m.b20 - m.b456 + m.x917 >= -1)
m.c2478 = Constraint(expr= - m.b20 - m.b457 + m.x918 >= -1)
m.c2479 = Constraint(expr= - m.b20 - m.b458 + m.x919 >= -1)
m.c2480 = Constraint(expr= - m.b20 - m.b459 + m.x920 >= -1)
m.c2481 = Constraint(expr= - m.b20 - m.b460 + m.x921 >= -1)
m.c2482 = Constraint(expr= - m.b21 - m.b61 + m.x922 >= -1)
m.c2483 = Constraint(expr= - m.b21 - m.b62 + m.x923 >= -1)
m.c2484 = Constraint(expr= - m.b21 - m.b63 + m.x924 >= -1)
m.c2485 = Constraint(expr= - m.b21 - m.b64 + m.x925 >= -1)
m.c2486 = Constraint(expr= - m.b21 - m.b65 + m.x926 >= -1)
m.c2487 = Constraint(expr= - m.b21 - m.b66 + m.x927 >= -1)
m.c2488 = Constraint(expr= - m.b21 - m.b67 + m.x928 >= -1)
m.c2489 = Constraint(expr= - m.b21 - m.b68 + m.x929 >= -1)
m.c2490 = Constraint(expr= - m.b21 - m.b69 + m.x930 >= -1)
m.c2491 = Constraint(expr= - m.b21 - m.b70 + m.x931 >= -1)
m.c2492 = Constraint(expr= - m.b21 - m.b71 + m.x932 >= -1)
m.c2493 = Constraint(expr= - m.b21 - m.b72 + m.x933 >= -1)
m.c2494 = Constraint(expr= - m.b21 - m.b73 + m.x934 >= -1)
m.c2495 = Constraint(expr= - m.b21 - m.b74 + m.x935 >= -1)
m.c2496 = Constraint(expr= - m.b21 - m.b75 + m.x936 >= -1)
m.c2497 = Constraint(expr= - m.b21 - m.b76 + m.x937 >= -1)
m.c2498 = Constraint(expr= - m.b21 - m.b77 + m.x938 >= -1)
m.c2499 = Constraint(expr= - m.b21 - m.b78 + m.x939 >= -1)
m.c2500 = Constraint(expr= - m.b21 - m.b79 + m.x940 >= -1)
m.c2501 = Constraint(expr= - m.b21 - m.b80 + m.x941 >= -1)
m.c2502 = Constraint(expr= - m.b22 - m.b81 + m.x942 >= -1)
m.c2503 = Constraint(expr= - m.b22 - m.b82 + m.x943 >= -1)
m.c2504 = Constraint(expr= - m.b22 - m.b83 + m.x944 >= -1)
m.c2505 = Constraint(expr= - m.b22 - m.b84 + m.x945 >= -1)
m.c2506 = Constraint(expr= - m.b22 - m.b85 + m.x946 >= -1)
m.c2507 = Constraint(expr= - m.b22 - m.b86 + m.x947 >= -1)
m.c2508 = Constraint(expr= - m.b22 - m.b87 + m.x948 >= -1)
m.c2509 = Constraint(expr= - m.b22 - m.b88 + m.x949 >= -1)
m.c2510 = Constraint(expr= - m.b22 - m.b89 + m.x950 >= -1)
m.c2511 = Constraint(expr= - m.b22 - m.b90 + m.x951 >= -1)
m.c2512 = Constraint(expr= - m.b22 - m.b91 + m.x952 >= -1)
m.c2513 = Constraint(expr= - m.b22 - m.b92 + m.x953 >= -1)
m.c2514 = Constraint(expr= - m.b22 - m.b93 + m.x954 >= -1)
m.c2515 = Constraint(expr= - m.b22 - m.b94 + m.x955 >= -1)
m.c2516 = Constraint(expr= - m.b22 - m.b95 + m.x956 >= -1)
m.c2517 = Constraint(expr= - m.b22 - m.b96 + m.x957 >= -1)
m.c2518 = Constraint(expr= - m.b22 - m.b97 + m.x958 >= -1)
m.c2519 = Constraint(expr= - m.b22 - m.b98 + m.x959 >= -1)
m.c2520 = Constraint(expr= - m.b22 - m.b99 + m.x960 >= -1)
m.c2521 = Constraint(expr= - m.b22 - m.b100 + m.x961 >= -1)
m.c2522 = Constraint(expr= - m.b23 - m.b101 + m.x962 >= -1)
m.c2523 = Constraint(expr= - m.b23 - m.b102 | |
return {}
# determine how many samples, starting from self._cursor, will fit into the requested minibatch size of num_samples
begin = self._cursor
end = self._cursor
assert begin < self._num_samples
actual_num_samples = { name: 0 for name in self._data.keys() }
while end < self._num_samples:
new_num_samples = { name: actual_num_samples[name] + (MinibatchSourceFromData._get_len(value[end]) if self._is_sequence[name] else 1)
for name, value in self._data.items() }
# return up to requested number of samples. but at least one even if longer
# also stop if we hit the maximum requested number of samples
max_num_samples = max(new_num_samples.values())
if actual_num_samples and (max_num_samples > num_samples or self._total_num_samples + max_num_samples > self._max_samples):
break
actual_num_samples = new_num_samples
end += 1
self._total_num_samples += max(actual_num_samples.values())
# the minibatch data to return
result = {} # [stream_info] -> MinibatchData
at_end = (end == self._num_samples)
for si in self.streams.values():
arg = self._data[si.name]
if isinstance(arg, Value): # if entire corpus is one big Value, then slice NDArrayView directly
data = arg.data
sub_shape = data.shape[1:]
extent = (end - begin,) + sub_shape
start_offset = (begin,) + tuple(0 for _ in sub_shape)
if number_of_workers != 1: # slice_view presently does not support strides
raise ValueError('distributed reading from Value objects is not supported')
mb_data = data.slice_view(start_offset, extent, data.is_read_only)
else:
# in case of distributed reading, we sub-slice the minibatch
#print('rank/worker', worker_rank, number_of_workers, 'reading', slice(begin+worker_rank, end+worker_rank, number_of_workers))
mb_data = arg[begin+worker_rank:end+worker_rank:number_of_workers]
if number_of_workers != 1:
mb_data = mb_data.copy() # un-stride it, to avoid performance warning
if isinstance(mb_data, list): # create a Value object
if si.name not in self._vars: # this case is more complex, we need a CNTK Variable
from cntk import input_variable, device
self._vars[si.name] = input_variable(**self._types[si.name])
value = Value.create(self._vars[si.name], mb_data)
else:
value = Value(mb_data)
result[si] = MinibatchData(value, num_sequences=end - begin, num_samples=actual_num_samples[si.name],
sweep_end=at_end or (self._total_num_samples >= self._max_samples))
# wrap around the cursor
self._cursor = 0 if at_end else end
return result
def get_checkpoint_state(self):
'''
Gets the checkpoint state of the MinibatchSource.
Returns:
cntk.cntk_py.Dictionary:
A :class:`~cntk.cntk_py.Dictionary` that has the checkpoint state
of the MinibatchSource
'''
return dict(cursor=self._cursor, total_num_samples=self._total_num_samples)
def restore_from_checkpoint(self, checkpoint):
'''
Restores the MinibatchSource state from the specified checkpoint.
Args:
checkpoint (:class:`~cntk.cntk_py.Dictionary`): checkpoint to restore from
'''
self._cursor = checkpoint['cursor']
self._total_num_samples = checkpoint['total_num_samples']
def HTKFeatureDeserializer(streams):
'''
Configures the HTK feature reader that reads speech data from scp files.
Args:
streams: any dictionary-like object that contains a mapping from stream
names to :class:`StreamDef` objects. Each StreamDef object configures
a feature stream.
'''
feat = []
for stream_name, stream in streams.items():
if stream.stream_alias is not None:
raise ValueError("HTKFeatureDeserializer does not support stream names")
if 'scp' not in stream:
raise ValueError("No scp files specified for HTKFeatureDeserializer")
dimension = stream.dim
scp_file = stream['scp']
broadcast = stream['broadcast'] if 'broadcast' in stream else False
defines_mb_size = stream.get('defines_mb_size', False)
left_context, right_context = stream.context if 'context' in stream\
else (0, 0)
htk_config = cntk_py.HTKFeatureConfiguration(stream_name, scp_file,
dimension, left_context,
right_context, broadcast,
defines_mb_size)
feat.append(htk_config)
if len(feat) == 0:
raise ValueError("no feature streams found")
return cntk_py.htk_feature_deserializer(feat)
def HTKMLFDeserializer(label_mapping_file, streams, phoneBoundaries = False):
'''
Configures an HTK label reader that reads speech HTK format MLF (Master
Label File)
Args:
label_mapping_file (str): path to the label mapping file
streams: any dictionary-like object that contains a mapping from stream
names to :class:`StreamDef` objects. Each StreamDef object configures
a label stream.
phoneBoundaries (bool): if phone boundaries should be considered (should be set to True for CTC training, False otherwise)
'''
if len(streams) != 1:
raise ValueError("HTKMLFDeserializer only accepts a single stream")
for stream_name, stream in streams.items():
if stream.stream_alias is not None:
raise ValueError("HTKMLFDeserializer does not support stream names")
dimension = stream.dim
if 'mlf' not in stream:
raise ValueError(
"No master label files specified for HTKMLFDeserializer")
master_label_files = stream['mlf']
if not isinstance(master_label_files, list):
master_label_files = [master_label_files]
return cntk_py.htk_mlf_deserializer(stream_name, label_mapping_file, dimension, master_label_files, phoneBoundaries)
def _process_image_deserializer_args(filename, streams, deserializer):
image_stream_name = None
# Streams with the same name are not allowed, make sure the default is
# unique.
label_stream_name = '_ignore_labels_' + str(uuid.uuid1())
num_labels = 2
transforms = []
for key in streams:
s = streams[key]
alias = s.stream_alias
if alias == "image":
image_stream_name = key
transforms = s.transforms
elif alias == "label":
label_stream_name = key
num_labels = s.dim
else:
raise ValueError(
"{}: invalid field name '{}', allowed are "
"'image' and 'label'".format(deserializer, alias))
if image_stream_name is None:
raise ValueError("{}: stream name ('image' or 'label') must be "
"specified".format(deserializer))
return (filename, label_stream_name, num_labels,
image_stream_name, transforms)
def ImageDeserializer(filename, streams):
'''
Configures the image reader that reads images and corresponding
labels from a file of the form::
<full path to image> <tab> <numerical label (0-based class id)>
or::
sequenceId <tab> path <tab> label
Args:
filename (str): file name of the map file that associates images to
classes
See also:
:cntkwiki:`Image reader definition <BrainScript-Image-reader>`
'''
args = _process_image_deserializer_args(filename, streams,
'ImageDeserializer')
return cntk_py.image_deserializer(*args)
def Base64ImageDeserializer(filename, streams):
'''
Configures the image reader that reads base64 encoded images and corresponding
labels from a file of the form::
[sequenceId <tab>] <numerical label (0-based class id)> <tab> <base64 encoded image>
Similarly to the ImageDeserializer, the sequenceId prefix is optional and can be omitted.
Args:
filename (str): file name of the input file dataset that contains images
and corresponding labels
See also:
:cntkwiki:`Base64ImageDeserializer options <BrainScript-and-Python---Understanding-and-Extending-Readers#base64imagedeserializer-options>`
'''
args = _process_image_deserializer_args(filename, streams,
'Base64ImageDeserializer')
return cntk_py.base64_image_deserializer(*args)
def CTFDeserializer(filename, streams):
'''
Configures the CNTK text-format reader that reads text-based files with
lines of the form::
[Sequence_Id] (Sample)+
where::
Sample=|Input_Name (Value )*
Args:
filename (str): file name containing the text input
streams: any dictionary-like object that contains a mapping from stream
names to :class:`StreamDef` objects. Each StreamDef object configures
an input stream.
See also:
:cntkwiki:`CNTKTextReader format <BrainScript-CNTKTextFormat-Reader>`
'''
for k, s in streams.items():
if s.stream_alias is None:
raise ValueError("CTFDeserializer: stream name for key %s must be "
"specified" % k)
sc = [cntk_py.StreamConfiguration(
k, s.dim, s.is_sparse, s.stream_alias, s['defines_mb_size']) for k, s in streams.items()]
return cntk_py.ctf_deserializer(filename, sc)
# TODO: this should be a private class; use StreamDef instead
class StreamConfiguration(cntk_py.StreamConfiguration):
'''
Configuration of a stream in a text format reader.
Args:
name (str): name of this stream
dim (int): dimensions of this stream. A text format reader reads data
as flat arrays. If you need different shapes you can
:func:`~cntk.ops.reshape` it later.
is_sparse (bool, defaults to `False`): whether the provided data is
sparse (`False` by default)
stream_alias (str, defaults to ''): name of the stream in the file
defines_mb_size (`bool`, defaults to False): whether this stream defines
the minibatch size.
'''
def __init__(self, name, dim, is_sparse=False, stream_alias='', defines_mb_size = False):
return super(StreamConfiguration, self).__init__(name, dim, is_sparse,
stream_alias, defines_mb_size)
# stream definition for use in StreamDefs
# returns a record { stream_alias, is_sparse, optional shape, optional transforms, optional context, optional scp, optional mlf }
def StreamDef(field=None, shape=None, is_sparse=False, transforms=None,
context=None, scp=None, mlf=None, broadcast=None, defines_mb_size=False):
'''
Configuration of a stream for use with the builtin Deserializers.
The meanings of some configuration keys have a mild dependency on the
exact deserializer, and certain keys are meaningless for certain
deserializers.
Args:
field (`str`, defaults to `None`): this is the name of the stream
* for CTFDeserializer the name is inside the CTF file
* for ImageDeserializer the acceptable names are `image` or `label`
* for HTKFeatureDeserializer and HTKMLFDeserializer only the default
value of None is acceptable
shape (`int` or `tuple`, defaults to `None`): dimensions of this
stream. HTKFeatureDeserializer, HTKMLFDeserializer, and
CTFDeserializer read data as flat arrays. If you need different
shapes you can :func:`~cntk.ops.reshape` it later.
is_sparse (`bool`, defaults to `False`): whether the provided data is
sparse. `False` by default, unless mlf is provided.
transforms (`list`, defaults to `None`): list of transforms to be
applied by the Deserializer. Currently only ImageDeserializer
supports transforms.
context (`tuple`, defaults to `None`): left and right context to
consider when reading in HTK data. Only supported by
HTKFeatureDeserializer.
scp (`str` or `list`, defaults to `None`): scp files for HTK data
mlf (`str` or `list`, defaults to `None`): mlf files for HTK data
broadcast (`bool`, defaults to `None`): whether the features in this
stream should be | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2021, kevin-dot-g-dot-stewart-at-gmail-dot-com
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Version: 1.0.1
#### Updates:
#### 1.0.1 - added 9.0 support
# - changed max version
# - added "Client VLANs" traffic condition
# - added "Server Certificate (Issuer DN)" traffic condition
# - added "Server Certificate (SANs)" traffic condition
# - added "Server Certificate (Subject DN)" traffic condition
# - added "Server Name (TLS ClientHello)" traffic condition
# - added "http2" as option in "TCP L7 Protocol Lookup" traffic condition
# - updated version and previousVersion keys to match target SSLO version
# - modified code in ssloGS_global_exists() to ensure ssloGS_global lookup does not trigger an error (20210917)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_sslo_config_policy
short_description: Manage an SSL Orchestrator security policy
description:
- Manage an SSL Orchestrator security policy
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the security policy. Configuration auto-prepends "ssloP_" to the policy. The policy name should be less than 14 characters and not contain dashes "-".
type: str
required: True
policyType:
description:
- Specifies the type of policy, either "outbound" or "inbound".
type: str
required: True
defaultRule:
description:
- Specifies the settings for the default "All Traffic" security policy rule
suboptions:
allowBlock:
description:
- Defines the allow/block behavior for the default All Traffic rule.
type: str
choices:
- allow
- block
default: allow
tlsIntercept:
description:
- Defines the TLS intercept/bypass behavior for the default All Traffic rule.
type: str
choices:
- bypass
- intercept
default: bypass
serviceChain:
description:
- Defines the service chain to attach to the default All Traffic rule.
type: str
default: None
proxyConnect:
description:
- Specifies the proxy-connect settings, as required, to establish an upstream proxy chain egress
suboptions:
enabled:
description:
- Defines the type of cipher used, either "string" (for cipher strings), or "group" (an existing cipher group).
type: bool
default: False
pool:
description:
- Defines the upstream explicit proxy pool. This must be a pre-defined pool.
type: str
default: None
serverCertValidation:
description:
- Enables or disables server certificate validation. When enabled, and the SSL configuration also sets blockUntrusted and blockExpired to ignore (False), this will generate a blocking page to the user, using a valid "masked" forged server certificate, for any expired or untrusted remote server certificates.
type: bool
default: False
trafficRules:
description:
- Defines the traffic rules to apply to the security policy, in defined order.
type: list
elements: dict
suboptions:
name:
description:
- Defines the name of the rule.
type: str
matchType:
description:
- Defines the match type when multiple conditions are applied to a single rule.
type: str
choices:
- or
- and
default: or
allowBlock:
description:
- Defines the allow/block behavior for this rule.
type: str
choices:
- allow
- block
default: allow
tlsIntercept:
description:
- Defines the TLS intercept/bypass behavior for this rule.
type: str
choices:
- bypass
- intercept
default: bypass
serviceChain:
description:
- Defines the service chain to attach to this rule.
type: str
default: None
conditions:
description:
- Defines the list of conditions within this rule.
type: list
elements: dict
suboptions:
pinnersRule:
description: enables the default certificate pinners condition. This condition is used alone in a rule.
categoryLookupAll:
description: enables the Category Lookup All condition.
suboptions:
values:
description: a list of URL categories (ex. "Financial and Data Services")
type: str
categoryLookupConnect:
description: enables the Category Lookup HTTP Connect condition.
suboptions:
values:
description: a list of URL categories (ex. "Financial and Data Services")
type: str
categoryLookupSNI:
description: enables the Category Lookup SNI condition.
suboptions:
values:
description: a list of URL categories (ex. "Financial and Data Services")
type: str
clientIpGeoLocation:
description: enables the Client IP Geolocation condition.
suboptions:
values:
description: a list of 'type' and 'value' keys, where type can be 'countryCode', 'countryName', 'continent', or 'state'
type: str
serverIpGeolocation:
description: enables the Server IP Geolocation condition.
suboptions:
values:
description: a list of 'type' and 'value' keys, where type can be 'countryCode', 'countryName', 'continent', or 'state'
type: str
clientIpReputation:
description: enables the Client IP Reputation condition.
suboptions:
value:
description: defines the values type as one of 'good', 'bad', or 'category'. If good or bad entered here, the 'values' key is not needed. If category is entered here, the values key must exist and contain a list of IP reputation categories (ex. "Web Attacks"). Note that IP reputation categories requires BIG-IP 16.0 and higher.
type: str
values:
description: when above 'value' is 'category', this key contains the list of IP reputation categories (ex. "Spam Sources")
type: str
serverIpReputation:
description: enables the Server IP Reputation condition.
suboptions:
value:
description: defines the values type as one of 'good', 'bad', or 'category'. If good or bad entered here, the 'values' key is not needed. If category is entered here, the values key must exist and contain a list of IP reputation categories (ex. "Web Attacks"). Note that IP reputation categories requires BIG-IP 16.0 and higher.
type: str
values:
description: when above 'value' is 'category', this key contains the list of IP reputation categories (ex. "Spam Sources")
type: str
clientIpSubnet:
description: enables the Client IP Subnet Match condition.
suboptions:
values:
description: a list of IP subnets, or datagroup of IP subnets. Note that IP subnet datagroups requires BIG-IP 16.0 and higher.
type: str
serverIpSubnet:
description: enables the Server IP Subnet Match condition.
suboptions:
values:
description: a list of IP subnets, or datagroup of IP subnets. Note that IP subnet datagroups requires BIG-IP 16.0 and higher.
type: str
clientPort:
description: enables the Client Port Match condition.
suboptions:
type:
description: defines the data as a set of static 'values' (including datagroups), or port 'range'. When the type is 'value', the 'values' key must exist and contain a list of ports or datagroups. When type is 'range', the 'fromPort' and 'toPort' keys must exists and contain integer port numbers. Note that port datagroups and port ranges requires BIG-IP 16.0 and higher.
type: str
choices:
- value
- range
default: value
values:
description: a list of ports, or datagroup of ports. Note that port datagroups requires BIG-IP 16.0 and higher.
type: str
fromPort:
description: the starting integer port number in a range of ports.
type: int
toPort:
description: the ending integer port number in a range of ports.
type: int
serverPort:
description: enables the Server Port Match condition.
suboptions:
type:
description: defines the data as a set of static 'values' (including datagroups), or port 'range'. When the type is 'value', the 'values' key must exist and contain a list of ports or datagroups. When type is 'range', the 'fromPort' and 'toPort' keys must exists and contain integer port numbers. Note that port datagroups and port ranges requires BIG-IP 16.0 and higher.
type: str
choices:
- value
- range
default: value
values:
description: a list of ports, or datagroup of ports. Note that port datagroups requires BIG-IP 16.0 and higher.
type: str
fromPort:
description: the starting integer port number in a range of ports.
type: int
toPort:
description: the ending integer port number in a range of ports.
type: int
sslCheck:
description: enables the SSL Check condition.
suboptions:
value:
description: enables or disables SSL check
type: bool
choices:
- True
- False
L7ProtocolCheckTcp:
description: enables the TCP L7 Protocol Check condition.
suboptions:
values:
description: a list of TCP protocols, where the options are 'dns', 'ftp', 'ftps', 'http', 'httpConnect', 'https', 'imap', 'imaps', 'smtp', 'smtps', 'pop3', 'pop3s', 'telnet', or http2 (9.0+)
type: str
L7ProtocolCheckUdp:
description: enables the UDP L7 Protocol Check condition.
suboptions:
values:
description: a list of UDP protocols, where the options are 'dns', or 'quic'.
type: str
urlMatch:
description: enables the URL Match condition.
suboptions:
values:
description: a list of 'type' and 'value' keys. The 'type' key can be one of 'equals', 'substring', 'prefix', 'suffix', or 'glob'. The | |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.layers import *
from model.build import *
import cv2
from model.utils import *
def get_test_input():
img = cv2.imread("images/dog-cycle-car.png")
img = cv2.resize(img, (416, 416)) # Resize to the input dimension
# BGR -> RGB | H X W C -> C X H X W
img_ = img[:, :, ::-1].transpose((2, 0, 1))
# Add a channel at 0 (for batch) | Normalise
img_ = img_[np.newaxis, :, :, :]/255.0
img_ = torch.from_numpy(img_).float() # Convert to float
img_ = Variable(img_) # Convert to Variable
return img_
class Darknet(nn.Module):
"""
Main Darknet class. It is a subclass of nn.Module
"""
def __init__(self, cfgfile):
super(Darknet, self).__init__()
# Translate our YOLOv3 CFG file to blocks
self.blocks = parse_cfg(cfgfile)
# Convert those blocks to a module list for Pytorch
self.net_info, self.module_list = create_modules(self.blocks)
# These are for loading the weights below
self.header = torch.IntTensor([0, 0, 0, 0])
self.seen = 0
def get_blocks(self):
"""
Getter function for blocks
Returns:
blocks
"""
return self.blocks
def get_module_list(self):
"""
Getter function for module_list
Returns:
module_list
"""
return self.module_list
# Main forward pass
def forward(self, x, CUDA):
"""
Does the forward pass
Params:
x: The input
CUDA: Use GPU to accelerate task
"""
detections = []
# We don't want the first block, that contains the network info
modules = self.blocks[1:]
# We cache the output feature maps of every layer in a dict outputs.
# The keys are the the indices of the layers, and the values are
# the feature maps. We can then search through the keys to look up
# a layers feature maps for route or shortcuts.
outputs = {}
write = 0
# Go through every module (layer)
for i in range(len(modules)):
# Get the module type value from the current index
module_type = (modules[i]["type"])
if module_type == "convolutional" or module_type == "upsample" or module_type == "maxpool":
# Not 100% sure, but I think because the module list is a
# Pytorch nn.ModuleList(), you can multiply the index of this list,
# that is, the block, by the inputs to this function (x), to get the output.
# I believe this is the matrix multiplication part.
x = self.module_list[i](x)
# Set the key to the index, and set the value to the computed
# calculation of the block and the input
outputs[i] = x
elif module_type == "route":
layers = modules[i]["layers"]
# The two layers designated in the layer get turned into a list with indexes
# of 0 and 1
layers = [int(a) for a in layers]
# Route layers[0] is never greater than 0, so candidate for optimization deletion
if (layers[0]) > 0:
layers[0] = layers[0] - i
# This happens only on the 2 smaller detection laters, i.e. on a 416x416 image,
# the 13x13 and 26x26 detection region levels
if len(layers) == 1:
# Grab the out put from the index plus the first value, usually
# a -4 in this situation. This is what allows a kind of independent route
# for the detection region layers. This will then go back and take the layer
# where the split happen, pull those weights forward past the detection
# layer, and prepare them as a piece of input for the next convolution.
x = outputs[i + (layers[0])]
else:
# These are the two large skip connections, from layers 37 -> 99 and 62 -> 87
if (layers[1]) > 0:
# Reset layer 1 to the difference between the desired layer index
# and the current layer. So, from 37 - 99 = (-62). We then add
# it to the current layer below in map2
layers[1] = layers[1] - i
# map1 is the output of the previous layer (layers[0] is always a
# negative number), here an upsample layer in the YOLO Cfg
map1 = outputs[i + layers[0]]
# map2 is the previous convolution to pull the data from
map2 = outputs[i + layers[1]]
# We're adding together the values of the outputs from the routed layers
# along the depth of the tensor since the param of 1 corresponds to
# the depth dimension. `Cat` method stands for concatenate.
x = torch.cat((map1, map2), 1)
# Set the key to the current module index, and set the dict value to the computed
# calculation of the block x variable
outputs[i] = x
elif module_type == "shortcut":
from_ = int(modules[i]["from"])
# Grab the output from the previous layer, as well as the `from` layer (which
# is always -3) before. This is either a downsampling, upsampling or shortcut
# connection.This simply adds the weights together without the tensor
# concatenation you find in the routings. The is what creates the residual
# blocks throughout the YOLO network
# x = outputs[i-1] + outputs[i+from_]
x = outputs[i-1] + outputs[i+from_]
# Set the key to the current module index, and value to x variable calculation
outputs[i] = x
elif module_type == 'yolo':
# Get the anchor list
anchors = self.module_list[i][0].anchors
# Get the input dimensions
inp_dim = int(self.net_info["height"])
# Get the number of classes
num_classes = int(modules[i]["classes"])
# Output the result
x = x.data
# Run a prediction on a particular region size
x = predict_transform(x, inp_dim, anchors, num_classes, CUDA)
if type(x) == int:
continue
# If write = 0, that means this is the first detection
if not write:
detections = x
write = 1
# Otherise, concatenate the different predictions together along the
# depth of the tensor
else:
detections = torch.cat((detections, x), 1)
# Since this is a detection layer, we still need to pull the weights from the previous layer
# output, so that we can use it as input to the next later
outputs[i] = outputs[i-1]
try:
# After all the modules have been gone through, return the detections tensor, which is a
# combined tensor for all three region size
return detections
except:
return 0
def load_weights(self, weightfile):
"""
Loads the weightfile. It is all 32-bit floats with 5 bytes as headers. There
are only weights for convolution and batch_normalization layers.
Params:
weightfile: link to weightfile
Return:
loads weights
"""
# Open the weights file
fp = open(weightfile, "rb")
# The first 4 values are header information
# 1. Major version number
# 2. Minor Version Number
# 3. Subversion number
# 4. Images seen
header = np.fromfile(fp, dtype=np.int32, count=5)
# Turn the numpy header file into a tensor
self.header = torch.from_numpy(header)
# The total number of images seen
self.seen = self.header[3]
# The rest of the values are the weights, let's load them up
# into a numpy
weights = np.fromfile(fp, dtype=np.float32)
# This variable keeps track of where we are in the weight list
# which is different than the module list
ptr = 0
# Let's go through every item in the module list of this
# instantiated class
for i in range(len(self.module_list)):
# We have to add one to this list because the first block
# is the netinfo block. This is different then the module
# list which took the netinfo block out
module_type = self.blocks[i + 1]["type"]
if module_type == "convolutional":
# Grab the current module
model = self.module_list[i]
try:
# If there is batch normalize on this convolutional layer
# let's grab that
batch_normalize = int(self.blocks[i+1]["batch_normalize"])
except:
batch_normalize = 0
# The first value in the model is the Conv2D module, so, for example
# Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
conv = model[0]
if (batch_normalize):
# The second value in the model is a BatchNorm2d module, so, for example
# BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
bn = model[1]
# Get the number of weights of Batch Norm Layer
# This is the first value in the module, so | |
0, 0, 0, 0],
[1196, 101.213103, 0, 9999, -9999, 1.0, 100, 1, 160.697956, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1197, 62.33039, 0, 9999, -9999, 1.0, 100, 1, 90.592266, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1198, 5.408584, 0, 9999, -9999, 1.0, 100, 1, 39.819157, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1199, 23.212065, 0, 9999, -9999, 1.0, 100, 1, 201.421956, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1200, 6.07511, 0, 9999, -9999, 1.0, 100, 1, 56.012408, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1201, 7.352618, 0, 9999, -9999, 1.0, 100, 1, 25.166667, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1202, 16.567385, 0, 9999, -9999, 1.0, 100, 1, 49.89238, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1203, 108.33802, 0, 9999, -9999, 1.0, 100, 1, 182.623256, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1204, 14.093463, 0, 9999, -9999, 1.0, 100, 1, 47.541821, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1205, 0.080769, 0, 9999, -9999, 1.0, 100, 1, 0.548843, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1206, 1.272824, 0, 9999, -9999, 1.0, 100, 1, 3.806894, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1207, 1.109715, 0, 9999, -9999, 1.0, 100, 1, 3.575453, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1208, 0.511668, 0, 9999, -9999, 1.0, 100, 1, 2.242031, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1209, 0.018747, 0, 9999, -9999, 1.0, 100, 1, 1.268261, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1210, 1.024823, 0, 9999, -9999, 1.0, 100, 1, 9.02599, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1211, 11.536565, 0, 9999, -9999, 1.0, 100, 1, 18.005229, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1212, 59.277726, 0, 9999, -9999, 1.0, 100, 1, 91.171888, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1213, 42.144244, 0, 9999, -9999, 1.0, 100, 1, 57.342704, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1214, 1.660534, 0, 9999, -9999, 1.0, 100, 1, 4.505907, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1215, 0.563216, 0, 9999, -9999, 1.0, 100, 1, 2.252965, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1216, 18.093364, 0, 9999, -9999, 1.0, 100, 1, 67.754469, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1217, 20.076386, 0, 9999, -9999, 1.0, 100, 1, 35.871617, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1218, 0.440761, 0, 9999, -9999, 1.0, 100, 1, 0.980482, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1219, 3.701181, 0, 9999, -9999, 1.0, 100, 1, 12.33953, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1220, 8.153208, 0, 9999, -9999, 1.0, 100, 1, 30.597849, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1221, 274.044802, 0, 9999, -9999, 1.0, 100, 1, 593.230436, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1222, 177.900019, 0, 9999, -9999, 1.0, 100, 1, 211.057769, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1223, 3.000807, 0, 9999, -9999, 1.0, 100, 1, 3.806101, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1224, 56.060278, 0, 9999, -9999, 1.0, 100, 1, 160.523778, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1225, 12.590114, 0, 9999, -9999, 1.0, 100, 1, 34.931481, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1226, 1.070936, 0, 9999, -9999, 1.0, 100, 1, 3.982858, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1227, 8.83993, 0, 9999, -9999, 1.0, 100, 1, 17.482807, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1228, 0.578779, 0, 9999, -9999, 1.0, 100, 1, 3.021367, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1229, 6.688221, 0, 9999, -9999, 1.0, 100, 1, 51.244222, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1230, 0.008365, 0, 9999, -9999, 1.0, 100, 1, 1.681276, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1231, 9.577207, 0, 9999, -9999, 1.0, 100, 1, 33.55478, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1232, 24.257954, 0, 9999, -9999, 1.0, 100, 1, 75.075088, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1233, 102.794107, 0, 9999, -9999, 1.0, 100, 1, 575.36828, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1235, 4.244503, 0, 9999, -9999, 1.0, 100, 1, 9.03734, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1236, 38.326427, 0, 9999, -9999, 1.0, 100, 1, 82.225035, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1237, 10.80595, 0, 9999, -9999, 1.0, 100, 1, 14.605409, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1238, 85.329663, 0, 9999, -9999, 1.0, 100, 1, 188.691049, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1239, 0.410744, 0, 9999, -9999, 1.0, 100, 1, 2.267706, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1240, 157.828852, 0, 9999, -9999, 1.0, 100, 1, 339.51051, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1241, 281.101359, 0, 9999, -9999, 1.0, 100, 1, 385.361595, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1242, 8.889558, 0, 9999, -9999, 1.0, 100, 1, 27.074038, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1243, 46.620497, 0, 9999, -9999, 1.0, 100, 1, 83.079842, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1244, 38.473433, 0, 9999, -9999, 1.0, 100, 1, 323.472536, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1245, 2.619557, 0, 9999, -9999, 1.0, 100, 1, 8.080896, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1246, 6.54627, 0, 9999, -9999, 1.0, 100, 1, 57.127825, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1247, 7.490614, 0, 9999, -9999, 1.0, 100, 1, 21.833396, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1248, 9.526373, 0, 9999, -9999, 1.0, 100, 1, 91.958275, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1249, 74.059905, 0, 9999, -9999, 1.0, 100, 1, 76.135177, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1250, 30.032746, 0, 9999, -9999, 1.0, 100, 1, 30.830519, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1251, 21.468642, 0, 9999, -9999, 1.0, 100, 1, 23.404345, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1252, 12.061855, 0, 9999, -9999, 1.0, 100, 1, 14.887727, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1253, 27.34799, 0, 9999, -9999, 1.0, 100, 1, 64.502694, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1254, 13.630516, 0, 9999, -9999, 1.0, 100, 1, 82.278695, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1255, 1.779472, 0, 9999, -9999, 1.0, 100, 1, 3.818419, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1256, 7.252506, 0, 9999, -9999, 1.0, 100, 1, 15.091842, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1257, 39.129466, 0, 9999, -9999, 1.0, 100, 1, 88.95288, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1258, 32.795784, 0, 9999, -9999, 1.0, 100, 1, 235.487329, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1259, 49.777722, 0, 9999, -9999, 1.0, 100, 1, 109.288719, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1260, 6.743172, 0, 9999, -9999, 1.0, 100, 1, 20.168717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1261, 115.61025, 0, 9999, -9999, 1.0, 100, 1, 201.699555, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1262, 0.140205, 0, 9999, -9999, 1.0, 100, 1, 0.524108, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1263, 0.104037, 0, 9999, -9999, 1.0, 100, 1, 0.352421, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1264, 41.638272, 0, 9999, -9999, 1.0, 100, 1, 82.035361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1265, 2.523416, 0, 9999, -9999, 1.0, 100, 1, 6.654727, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1266, 63.767622, 0, 9999, -9999, 1.0, 100, 1, 119.710849, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1267, 30.533562, 0, 9999, -9999, 1.0, 100, 1, 39.469006, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1268, 0.247148, 0, 9999, -9999, 1.0, 100, 1, 3.4295, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1269, 0.400988, 0, 9999, -9999, 1.0, 100, 1, 5.105829, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1270, 7.164013, 0, 9999, -9999, 1.0, 100, 1, 38.950511, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1271, 12.743744, 0, 9999, -9999, 1.0, 100, 1, 47.371792, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1272, 0.353864, 0, 9999, -9999, 1.0, 100, 1, 1.23166, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1273, 0.47203, 0, 9999, -9999, 1.0, 100, 1, 2.169201, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1274, 44.76307, 0, 9999, -9999, 1.0, 100, 1, 53.095629, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1275, 96.382348, 0, 9999, -9999, 1.0, 100, 1, 99.0753, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1276, 25.072764, 0, 9999, -9999, 1.0, 100, 1, 25.655641, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1277, 57.305334, 0, 9999, -9999, 1.0, 100, 1, 65.611252, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1278, 140.076908, 0, 9999, -9999, 1.0, 100, 1, 170.437781, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1280, 0.009292, 0, 9999, -9999, 1.0, 100, 1, 0.626494, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1281, 0.215819, 0, 9999, -9999, 1.0, 100, 1, 2.51246, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1282, 0.035455, 0, 9999, -9999, 1.0, 100, 1, 4.363037, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1283, 566.886433, 0, 9999, -9999, 1.0, 100, 1, 1297.764428, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1284, 11.147042, 0, 9999, -9999, 1.0, 100, 1, 28.426322, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1285, 0.176128, 0, 9999, -9999, 1.0, 100, 1, 2.937048, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1286, 7.298765, 0, 9999, -9999, 1.0, 100, 1, 17.872201, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1287, 54.686984, 0, 9999, -9999, 1.0, 100, 1, 93.199628, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1288, 115.570154, 0, 9999, -9999, 1.0, 100, 1, 148.402692, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1289, 109.233162, 0, 9999, -9999, 1.0, 100, 1, 184.149235, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1290, 2.82219, 0, 9999, -9999, 1.0, 100, 1, 4.901974, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1291, 79.750992, 0, 9999, -9999, 1.0, 100, 1, 98.293351, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1292, 34.540423, 0, 9999, -9999, 1.0, 100, 1, 41.682074, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1293, 1.393372, 0, 9999, -9999, 1.0, 100, 1, 2.402107, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1294, 3.305487, 0, 9999, -9999, 1.0, 100, 1, 5.39743, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1295, 3.568033, 0, 9999, -9999, 1.0, 100, 1, 5.873666, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1296, 5.595408, 0, 9999, -9999, 1.0, 100, 1, 27.356489, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1297, 51.707692, 0, 9999, -9999, 1.0, 100, 1, 177.778742, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1298, 0.338037, 0, 9999, -9999, 1.0, 100, 1, 4.014603, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1299, 0.122192, 0, 9999, -9999, 1.0, 100, 1, 2.158207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1300, 14.580961, 0, 9999, -9999, 1.0, 100, 1, 23.74405, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1301, 37.34829, 0, 9999, -9999, 1.0, 100, 1, 60.863304, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1302, 3.108793, 0, 9999, -9999, 1.0, 100, 1, 4.877299, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1303, 2.783283, 0, 9999, -9999, 1.0, 100, 1, 4.335516, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1304, 6.167381, 0, 9999, -9999, 1.0, 100, 1, 9.594319, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1305, 0.002999, 0, 9999, -9999, 1.0, 100, 1, 0.004567, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1306, 0.464409, 0, 9999, -9999, 1.0, 100, 1, 1.827014, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1307, 0.114555, 0, 9999, -9999, 1.0, 100, 1, 0.29894, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1308, 1.890417, 0, 9999, -9999, 1.0, 100, 1, 3.278321, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1309, 0.7771, 0, 9999, -9999, 1.0, 100, 1, 3.34909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1310, 0.382267, 0, 9999, -9999, 1.0, 100, 1, 1.64589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1311, 1.687945, 0, 9999, -9999, 1.0, 100, 1, 11.854004, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1312, 68.410581, 0, 9999, -9999, 1.0, 100, 1, 262.264924, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1313, 19.984456, 0, 9999, -9999, 1.0, 100, 1, 30.836748, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1314, 8.217651, 0, 9999, -9999, 1.0, 100, 1, 12.003987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1315, 6.211984, 0, 9999, -9999, 1.0, 100, 1, 7.879027, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1316, 0.382385, 0, 9999, -9999, 1.0, 100, 1, 2.757497, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1317, 3.330057, 0, 9999, -9999, 1.0, 100, 1, 23.958574, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1318, 0.464025, 0, 9999, -9999, 1.0, 100, 1, 1.956332, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1319, 3.049645, 0, 9999, -9999, 1.0, 100, 1, 17.708276, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1320, 6.381865, 0, 9999, -9999, 1.0, 100, 1, 20.75859, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1321, 0.048851, 0, 9999, -9999, 1.0, 100, 1, 0.161123, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1322, 0.268439, 0, 9999, -9999, 1.0, 100, 1, 0.929763, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1323, 58.940361, 0, 9999, -9999, 1.0, 100, 1, 199.111909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1324, 5.203827, 0, 9999, -9999, 1.0, 100, 1, 13.063258, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1325, 33.770731, 0, 9999, -9999, 1.0, 100, 1, 90.497559, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
"loss became nan"
status["training duration"] = formatDuration(
time() - training_time_start)
status["last epoch duration"] = formatDuration(
time() - epoch_time_start)
return status, run_id
print()
epoch_duration = time() - epoch_time_start
print("Epoch {} ({}):".format(epoch + 1,
formatDuration(epoch_duration)))
# With warmup or not
if warm_up_weight < 1:
print(' Warm-up weight: {:.2g}'.format(warm_up_weight))
# Export parameter summaries
parameter_summary_string = session.run(
self.parameter_summary,
feed_dict = {self.warm_up_weight: warm_up_weight}
)
parameter_summary_writer.add_summary(
parameter_summary_string, global_step = epoch + 1)
parameter_summary_writer.flush()
# Evaluation
print(' Evaluating model.')
## Centroids
p_z_probabilities, p_z_means, p_z_variances = \
session.run(
[self.p_z_probabilities, self.p_z_means,
self.p_z_variances]
)
## Training
evaluating_time_start = time()
ELBO_train = 0
KL_train = 0
ENRE_train = 0
q_z_mean_train = numpy.empty([M_train, self.latent_size],
numpy.float32)
if "mixture" in self.latent_distribution_name:
z_KL = numpy.zeros(1)
else:
z_KL = numpy.zeros(self.latent_size)
for i in range(0, M_train, batch_size):
subset = slice(i, min(i + batch_size, M_train))
x_batch = x_train[subset].toarray()
t_batch = t_train[subset].toarray()
feed_dict_batch = {
self.x: x_batch,
self.t: t_batch,
self.is_training: False,
self.use_deterministic_z: False,
self.warm_up_weight: 1.0,
self.number_of_iw_samples:
self.number_of_importance_samples["training"],
self.number_of_mc_samples:
self.number_of_monte_carlo_samples["training"]
}
if self.count_sum:
feed_dict_batch[self.n] = n_train[subset]
if self.count_sum_feature:
feed_dict_batch[self.n_feature] = \
n_feature_train[subset]
ELBO_i, KL_i, ENRE_i, q_z_mean_i, z_KL_i = session.run(
[self.ELBO, self.KL, self.ENRE, self.q_z_mean,
self.KL_all],
feed_dict = feed_dict_batch
)
ELBO_train += ELBO_i
KL_train += KL_i
ENRE_train += ENRE_i
q_z_mean_train[subset] = q_z_mean_i
z_KL += z_KL_i
ELBO_train /= M_train / batch_size
KL_train /= M_train / batch_size
ENRE_train /= M_train / batch_size
z_KL /= M_train / batch_size
learning_curves["training"]["lower_bound"].append(ELBO_train)
learning_curves["training"]["reconstruction_error"].append(
ENRE_train)
learning_curves["training"]["kl_divergence"].append(KL_train)
evaluating_duration = time() - evaluating_time_start
### Summaries
training_summary = tf.Summary()
#### Losses
training_summary.value.add(tag="losses/lower_bound",
simple_value = ELBO_train)
training_summary.value.add(tag="losses/reconstruction_error",
simple_value = ENRE_train)
training_summary.value.add(tag="losses/kl_divergence",
simple_value = KL_train)
#### KL divergence
for i in range(z_KL.size):
training_summary.value.add(
tag="kl_divergence_neurons/{}".format(i),
simple_value = z_KL[i]
)
#### Centroids
if not validation_set:
for k in range(len(p_z_probabilities)):
training_summary.value.add(
tag="prior/cluster_{}/probability".format(k),
simple_value = p_z_probabilities[k]
)
for l in range(self.latent_size):
# The same Gaussian for all
if not p_z_means[k].shape:
p_z_mean_k_l = p_z_means[k]
p_z_variances_k_l = p_z_variances[k]
# Different Gaussians for all
else:
p_z_mean_k_l = p_z_means[k][l]
p_z_variances_k_l = p_z_variances[k][l]
training_summary.value.add(
tag="prior/cluster_{}/mean/dimension_{}".format(
k, l),
simple_value = p_z_mean_k_l
)
training_summary.value.add(
tag="prior/cluster_{}/variance/dimension_{}"\
.format(k, l),
simple_value = p_z_variances_k_l
)
#### Writing
training_summary_writer.add_summary(training_summary,
global_step = epoch + 1)
training_summary_writer.flush()
### Printing
print(
" {} set ({}):".format(
training_set.kind.capitalize(),
formatDuration(evaluating_duration)
),
"ELBO: {:.5g}, ENRE: {:.5g}, KL: {:.5g}.".format(
ELBO_train, ENRE_train, KL_train
)
)
## Validation
if validation_set:
evaluating_time_start = time()
ELBO_valid = 0
KL_valid = 0
ENRE_valid = 0
q_z_mean_valid = numpy.empty([M_valid, self.latent_size],
numpy.float32)
for i in range(0, M_valid, batch_size):
subset = slice(i, min(i + batch_size, M_valid))
x_batch = x_valid[subset].toarray()
t_batch = t_valid[subset].toarray()
feed_dict_batch = {
self.x: x_batch,
self.t: t_batch,
self.is_training: False,
self.use_deterministic_z: False,
self.warm_up_weight: 1.0,
self.number_of_iw_samples:
self.number_of_importance_samples["training"],
self.number_of_mc_samples:
self.number_of_monte_carlo_samples["training"]
}
if self.count_sum:
feed_dict_batch[self.n] = n_valid[subset]
if self.count_sum_feature:
feed_dict_batch[self.n_feature] = \
n_feature_valid[subset]
ELBO_i, KL_i, ENRE_i, q_z_mean_i = session.run(
[self.ELBO, self.KL, self.ENRE, self.q_z_mean],
feed_dict = feed_dict_batch
)
ELBO_valid += ELBO_i
KL_valid += KL_i
ENRE_valid += ENRE_i
q_z_mean_valid[subset] = q_z_mean_i
ELBO_valid /= M_valid / batch_size
KL_valid /= M_valid / batch_size
ENRE_valid /= M_valid / batch_size
learning_curves["validation"]["lower_bound"]\
.append(ELBO_valid)
learning_curves["validation"]["reconstruction_error"]\
.append(ENRE_valid)
learning_curves["validation"]["kl_divergence"]\
.append(KL_valid)
evaluating_duration = time() - evaluating_time_start
### Summaries
summary = tf.Summary()
#### Losses
summary.value.add(tag="losses/lower_bound",
simple_value = ELBO_valid)
summary.value.add(tag="losses/reconstruction_error",
simple_value = ENRE_valid)
summary.value.add(tag="losses/kl_divergence",
simple_value = KL_valid)
#### Centroids
for k in range(len(p_z_probabilities)):
summary.value.add(
tag="prior/cluster_{}/probability".format(k),
simple_value = p_z_probabilities[k]
)
for l in range(self.latent_size):
# The same Gaussian for all
if not p_z_means[k].shape:
p_z_mean_k_l = p_z_means[k]
p_z_variances_k_l = p_z_variances[k]
# Different Gaussians for all
else:
p_z_mean_k_l = p_z_means[k][l]
p_z_variances_k_l = p_z_variances[k][l]
summary.value.add(
tag="prior/cluster_{}/mean/dimension_{}".format(
k, l),
simple_value = p_z_mean_k_l
)
summary.value.add(
tag="prior/cluster_{}/variance/dimension_{}"\
.format(k, l),
simple_value = p_z_variances_k_l
)
#### Writing
validation_summary_writer.add_summary(summary,
global_step = epoch + 1)
validation_summary_writer.flush()
### Printing
print(
" {} set ({}):".format(
validation_set.kind.capitalize(),
formatDuration(evaluating_duration)
),
"ELBO: {:.5g}, ENRE: {:.5g}, KL: {:.5g}.".format(
ELBO_valid, ENRE_valid, KL_valid
)
)
# Early stopping
if validation_set and not self.stopped_early:
if ELBO_valid < ELBO_valid_early_stopping:
if epochs_with_no_improvement == 0:
print(" Early stopping:",
"Validation loss did not improve",
"for this epoch.")
print(" " + \
"Saving model parameters for previous epoch.")
saving_time_start = time()
ELBO_valid_early_stopping = ELBO_valid
current_checkpoint = tf.train.get_checkpoint_state(
log_directory)
if current_checkpoint:
copyModelDirectory(current_checkpoint,
early_stopping_log_directory)
saving_duration = time() - saving_time_start
print(" " +
"Previous model parameters saved ({})."\
.format(formatDuration(saving_duration)))
else:
print(" Early stopping:",
"Validation loss has not improved",
"for {} epochs.".format(
epochs_with_no_improvement + 1))
epochs_with_no_improvement += 1
else:
if epochs_with_no_improvement > 0:
print(" Early stopping cancelled:",
"Validation loss improved.")
epochs_with_no_improvement = 0
ELBO_valid_early_stopping = ELBO_valid
if os.path.exists(early_stopping_log_directory):
shutil.rmtree(early_stopping_log_directory)
if epochs_with_no_improvement >= \
self.early_stopping_rounds:
print(" Early stopping in effect:",
"Previously saved model parameters is available.")
self.stopped_early = True
epochs_with_no_improvement = numpy.nan
# Saving model parameters (update checkpoint)
print(' Saving model parameters.')
saving_time_start = time()
self.saver.save(session, checkpoint_file,
global_step = epoch + 1)
saving_duration = time() - saving_time_start
print(' Model parameters saved ({}).'.format(
formatDuration(saving_duration)))
# Saving best model parameters yet
if validation_set and ELBO_valid > ELBO_valid_maximum:
print(" Best validation ELBO yet.",
"Saving model parameters as best model parameters.")
saving_time_start = time()
ELBO_valid_maximum = ELBO_valid
current_checkpoint = \
tf.train.get_checkpoint_state(log_directory)
if current_checkpoint:
copyModelDirectory(current_checkpoint,
best_model_log_directory)
removeOldCheckpoints(best_model_log_directory)
saving_duration = time() - saving_time_start
print(' Best model parameters saved ({}).'.format(
formatDuration(saving_duration)))
print()
# Plot latent validation values
if plotting_interval is None:
under_10 = epoch < 10
under_100 = epoch < 100 and (epoch + 1) % 10 == 0
under_1000 = epoch < 1000 and (epoch + 1) % 50 == 0
above_1000 = epoch > 1000 and (epoch + 1) % 100 == 0
last_one = epoch == number_of_epochs - 1
plot_intermediate_results = under_10 \
or under_100 \
or under_1000 \
or above_1000 \
or last_one
else:
plot_intermediate_results = \
epoch % plotting_interval == 0
if plot_intermediate_results:
if "mixture" in self.latent_distribution_name:
K = len(p_z_probabilities)
L = self.latent_size
p_z_covariance_matrices = numpy.empty([K, L, L])
for k in range(K):
p_z_covariance_matrices[k] = numpy.diag(
p_z_variances[k])
centroids = {
"prior": {
"probabilities": numpy.array(
p_z_probabilities),
"means": numpy.stack(p_z_means),
"covariance_matrices": p_z_covariance_matrices
}
}
else:
centroids = None
if validation_set:
intermediate_latent_values = q_z_mean_valid
intermediate_data_set = validation_set
else:
intermediate_latent_values = q_z_mean_train
intermediate_data_set = training_set
analyseIntermediateResults(
learning_curves = learning_curves,
epoch_start = epoch_start,
epoch = epoch,
latent_values = intermediate_latent_values,
data_set = intermediate_data_set,
centroids = centroids,
model_name = self.name,
run_id = run_id,
model_type = self.type,
results_directory = self.base_results_directory
)
print()
else:
analyseIntermediateResults(
learning_curves = learning_curves,
epoch_start = epoch_start,
model_name = self.name,
run_id = run_id,
model_type = self.type,
results_directory = self.base_results_directory
)
print()
# Update variables for previous iteration
if validation_set:
ELBO_valid_prev = ELBO_valid
training_duration = time() - training_time_start
print("{} trained for {} epochs ({}).".format(
capitaliseString(model_string),
number_of_epochs,
formatDuration(training_duration))
)
print()
# Clean up
removeOldCheckpoints(log_directory)
if temporary_log_directory:
print("Moving log directory to permanent directory.")
copying_time_start = time()
if os.path.exists(permanent_log_directory):
shutil.rmtree(permanent_log_directory)
shutil.move(log_directory, permanent_log_directory)
copying_duration = time() - copying_time_start
print("Log directory moved ({}).".format(formatDuration(
copying_duration)))
print()
status["completed"] = True
status["training duration"] = formatDuration(training_duration)
status["last epoch duration"] = formatDuration(epoch_duration)
return status, run_id
def evaluate(self, evaluation_set, evaluation_subset_indices = set(),
batch_size = 100, predict_labels = False, run_id = None,
use_early_stopping_model = False, use_best_model = False,
use_deterministic_z = False, output_versions = "all",
log_results = True):
if run_id:
run_id = checkRunID(run_id)
model_string = "model for run {}".format(run_id)
else:
model_string = "model"
if output_versions == "all":
output_versions = ["transformed", "reconstructed", "latent"]
elif not isinstance(output_versions, list):
output_versions = [output_versions]
else:
number_of_output_versions = len(output_versions)
if number_of_output_versions > 3:
raise ValueError("Can only output at most 3 sets, "
+ "{} requested".format(number_of_output_versions))
elif number_of_output_versions != len(set(output_versions)):
raise ValueError("Cannot output duplicate sets, "
+ "{} requested.".format(output_versions))
evaluation_set_transformed = False
batch_size /= self.number_of_importance_samples["evaluation"] \
* self.number_of_monte_carlo_samples["evaluation"]
batch_size = int(numpy.ceil(batch_size))
if self.count_sum:
n_eval = evaluation_set.count_sum
if self.count_sum_feature:
n_feature_eval = evaluation_set.normalised_count_sum
M_eval = evaluation_set.number_of_examples
F_eval = evaluation_set.number_of_features
noisy_preprocess = evaluation_set.noisy_preprocess
if not noisy_preprocess:
if evaluation_set.has_preprocessed_values:
x_eval = evaluation_set.preprocessed_values
else:
x_eval = evaluation_set.values
| |
import errno
import logging
import os
import uuid
import biom
import pandas as pd
from Bio import SeqIO
import shutil
from installed_clients.DataFileUtilClient import DataFileUtil
from GenericsAPI.Utils.AttributeUtils import AttributesUtil
from GenericsAPI.Utils.SampleServiceUtil import SampleServiceUtil
from GenericsAPI.Utils.DataUtil import DataUtil
from GenericsAPI.Utils.MatrixUtil import MatrixUtil
from GenericsAPI.Utils.TaxonUtil import TaxonUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.KBaseSearchEngineClient import KBaseSearchEngine
from installed_clients.kb_GenericsReportClient import kb_GenericsReport
TYPE_ATTRIBUTES = {'description', 'scale', 'row_normalization', 'col_normalization'}
SCALE_TYPES = {'raw', 'ln', 'log2', 'log10'}
DEFAULT_META_KEYS = ["lineage", "score", "taxonomy_source", "species_name",
"consensus_sequence"]
TARGET_GENE_SUBFRAGMENT_MAP = {'16S': ['V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9'],
'18S': ['V1', 'V2', 'V3', 'V4', 'V9'],
'ITS': ['ITS1', 'ITS2']}
SEQ_INSTRUMENTS_MAP = {'Applied Biosystems': ['AB 310 Genetic Analyzer',
'AB 3130 Genetic Analyzer',
'AB 3130xL Genetic Analyzer',
'AB 3500 Genetic Analyzer',
'AB 3500xL Genetic Analyzer',
'AB 3730 Genetic Analyzer',
'AB 3730xL Genetic Analyzer',
'AB 5500xl Genetic Analyzer',
'AB 5500x-Wl Genetic Analyzer',
'AB SOLiD System',
'AB SOLiD System 2.0',
'AB SOLiD System 3.0',
'AB SOLiD 3 Plus System',
'AB SOLiD 4 System',
'AB SOLiD 4hq System',
'AB SOLiD PI System'],
'Roche 454': ['454 GS', '454 GS 20', '454 GS FLX', '454 GS FLX+',
'454 GS FLX Titanium'],
'Life Sciences': ['454 GS Junior'],
'Illumina': ['Illumina Genome Analyzer',
'Illumina Genome Analyzer II',
'Illumina Genome Analyzer IIx',
'Illumina HiScanSQ',
'Illumina HiSeq 1000',
'Illumina HiSeq 1500',
'Illumina HiSeq 2000',
'Illumina HiSeq 2500',
'Illumina HiSeq 3000',
'Illumina HiSeq 4000',
'Illumina HiSeq X',
'HiSeq X Five',
'HiSeq X Ten',
'Illumina iSeq 100',
'Illumina MiSeq',
'Illumina MiniSeq',
'NextSeq 500',
'NextSeq 550',
'NextSeq 1000',
'NextSeq 2000',
'Illumina NovaSeq 6000'],
'ThermoFisher': ['Ion Torrent PGM', 'Ion Torrent Proton',
'Ion Torrent S5 XL', 'Ion Torrent S5'],
'Pacific Biosciences': ['PacBio RS', 'PacBio RS II', 'PacBio Sequel',
'PacBio Sequel II'],
'Oxford Nanopore': ['MinION', 'GridION', 'PromethION'],
'BGI Group': ['BGISEQ-500', 'DNBSEQ-G400', 'DNBSEQ-T7', 'DNBSEQ-G50',
'MGISEQ-2000RS']}
class BiomUtil:
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _process_params(self, params):
logging.info('start validating import_matrix_from_biom params')
# check for required parameters
for p in ['obj_type', 'matrix_name', 'workspace_id', 'scale', 'amplicon_type',
'sequencing_technology', 'sequencing_instrument',
'target_gene', 'target_subfragment', 'taxon_calling']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
# check sequencing_technology and sequencing_instrument matching
sequencing_technology = params.get('sequencing_technology')
sequencing_instrument = params.get('sequencing_instrument')
if sequencing_technology not in SEQ_INSTRUMENTS_MAP:
raise ValueError('Unexpected sequencing technology: {}'.format(sequencing_technology))
expected_instruments = SEQ_INSTRUMENTS_MAP.get(sequencing_technology)
if sequencing_instrument not in expected_instruments:
raise ValueError('Please select sequencing instrument among {} for {}'.format(
expected_instruments, sequencing_technology))
# check target_gene and target_subfragment matching
target_gene = params.get('target_gene')
target_subfragment = list(set(params.get('target_subfragment')))
params['target_subfragment'] = target_subfragment
if target_gene not in TARGET_GENE_SUBFRAGMENT_MAP:
raise ValueError('Unexpected target gene: {}'.format(target_gene))
expected_subfragments = TARGET_GENE_SUBFRAGMENT_MAP.get(target_gene)
if not set(target_subfragment) <= set(expected_subfragments):
raise ValueError('Please select target subfragments among {} for {}'.format(
expected_subfragments, target_gene))
# check taxon_calling
taxon_calling = params.get('taxon_calling')
taxon_calling_method = list(set(taxon_calling.get('taxon_calling_method')))
params['taxon_calling_method'] = taxon_calling_method
if 'denoising' in taxon_calling_method:
denoise_method = taxon_calling.get('denoise_method')
sequence_error_cutoff = taxon_calling.get('sequence_error_cutoff')
if not (denoise_method and sequence_error_cutoff):
raise ValueError('Please provide denoise_method and sequence_error_cutoff')
params['denoise_method'] = denoise_method
params['sequence_error_cutoff'] = sequence_error_cutoff
if 'clustering' in taxon_calling_method:
clustering_method = taxon_calling.get('clustering_method')
clustering_cutoff = taxon_calling.get('clustering_cutoff')
if not (clustering_method and clustering_cutoff):
raise ValueError('Please provide clustering_method and clustering_cutoff')
params['clustering_method'] = clustering_method
params['clustering_cutoff'] = clustering_cutoff
obj_type = params.get('obj_type')
if obj_type not in self.matrix_types:
raise ValueError('Unknown matrix object type: {}'.format(obj_type))
scale = params.get('scale')
if scale not in SCALE_TYPES:
raise ValueError('Unknown scale type: {}'.format(scale))
biom_file = None
tsv_file = None
fasta_file = None
metadata_keys = DEFAULT_META_KEYS
input_local_file = params.get('input_local_file', False)
if params.get('taxonomic_abundance_tsv') and params.get('taxonomic_fasta'):
tsv_file = params.get('taxonomic_abundance_tsv')
fasta_file = params.get('taxonomic_fasta')
if not (tsv_file and fasta_file):
raise ValueError('missing TSV or FASTA file')
if not input_local_file:
tsv_file = self.dfu.download_staging_file(
{'staging_file_subdir_path': tsv_file}).get('copy_file_path')
fasta_file = self.dfu.download_staging_file(
{'staging_file_subdir_path': fasta_file}).get('copy_file_path')
metadata_keys_str = params.get('metadata_keys')
if metadata_keys_str:
metadata_keys += [x.strip() for x in metadata_keys_str.split(',')]
mode = 'tsv_fasta'
elif params.get('biom_fasta'):
biom_fasta = params.get('biom_fasta')
biom_file = biom_fasta.get('biom_file_biom_fasta')
fasta_file = biom_fasta.get('fasta_file_biom_fasta')
if not (biom_file and fasta_file):
raise ValueError('missing BIOM or FASTA file')
if not input_local_file:
biom_file = self.dfu.download_staging_file(
{'staging_file_subdir_path': biom_file}).get('copy_file_path')
fasta_file = self.dfu.download_staging_file(
{'staging_file_subdir_path': fasta_file}).get('copy_file_path')
mode = 'biom_fasta'
elif params.get('tsv_fasta'):
tsv_fasta = params.get('tsv_fasta')
tsv_file = tsv_fasta.get('tsv_file_tsv_fasta')
fasta_file = tsv_fasta.get('fasta_file_tsv_fasta')
if not (tsv_file and fasta_file):
raise ValueError('missing TSV or FASTA file')
if not input_local_file:
tsv_file = self.dfu.download_staging_file(
{'staging_file_subdir_path': tsv_file}).get('copy_file_path')
fasta_file = self.dfu.download_staging_file(
{'staging_file_subdir_path': fasta_file}).get('copy_file_path')
metadata_keys_str = tsv_fasta.get('metadata_keys_tsv_fasta')
if metadata_keys_str:
metadata_keys += [x.strip() for x in metadata_keys_str.split(',')]
mode = 'tsv_fasta'
else:
raise ValueError('missing valide file group type in parameters')
return (biom_file, tsv_file, fasta_file, mode, list(set(metadata_keys)))
def _validate_fasta_file(self, df, fasta_file):
logging.info('start validating FASTA file')
try:
fastq_dict = SeqIO.index(fasta_file, "fasta")
except Exception:
raise ValueError('Cannot parse file. Please provide valide FASTA file')
matrix_ids = df.index
file_ids = fastq_dict.keys()
unmatched_ids = set(matrix_ids) - set(file_ids)
if unmatched_ids:
raise ValueError('FASTA file does not have [{}] OTU id'.format(unmatched_ids))
def _file_to_amplicon_data(self, biom_file, tsv_file, fasta_file, mode, refs, matrix_name,
workspace_id, scale, description, metadata_keys=None):
amplicon_data = refs
if mode.startswith('biom'):
logging.info('start parsing BIOM file for matrix data')
table = biom.load_table(biom_file)
observation_metadata = table._observation_metadata
sample_metadata = table._sample_metadata
matrix_data = {'row_ids': table._observation_ids.tolist(),
'col_ids': table._sample_ids.tolist(),
'values': table.matrix_data.toarray().tolist()}
logging.info('start building attribute mapping object')
amplicon_data.update(self.get_attribute_mapping("row", observation_metadata,
matrix_data, matrix_name, refs,
workspace_id))
amplicon_data.update(self.get_attribute_mapping("col", sample_metadata,
matrix_data, matrix_name, refs,
workspace_id))
amplicon_data['attributes'] = {}
for k in ('create_date', 'generated_by'):
val = getattr(table, k)
if not val:
continue
if isinstance(val, bytes):
amplicon_data['attributes'][k] = val.decode('utf-8')
else:
amplicon_data['attributes'][k] = str(val)
elif mode.startswith('tsv'):
observation_metadata = None
sample_metadata = None
try:
logging.info('start parsing TSV file for matrix data')
reader = pd.read_csv(tsv_file, sep=None, iterator=True)
inferred_sep = reader._engine.data.dialect.delimiter
df = pd.read_csv(tsv_file, sep=inferred_sep, index_col=0)
except Exception:
raise ValueError('Cannot parse file. Please provide valide tsv file')
else:
self._validate_fasta_file(df, fasta_file)
metadata_df = None
if metadata_keys:
shared_metadata_keys = list(set(metadata_keys) & set(df.columns))
if mode == 'tsv' and 'consensus_sequence' not in shared_metadata_keys:
raise ValueError('TSV file does not include consensus_sequence')
if shared_metadata_keys:
metadata_df = df[shared_metadata_keys]
df.drop(columns=shared_metadata_keys, inplace=True)
try:
df = df.astype(float)
except ValueError:
err_msg = 'Found some non-float values. Matrix contains only numeric values\n'
err_msg += 'Please list any non-numeric column names in Metadata Keys field'
raise ValueError(err_msg)
df.fillna(0, inplace=True)
df.index = df.index.astype('str')
df.columns = df.columns.astype('str')
matrix_data = {'row_ids': df.index.tolist(),
'col_ids': df.columns.tolist(),
'values': df.values.tolist()}
logging.info('start building attribute mapping object')
amplicon_data.update(self.get_attribute_mapping("row", observation_metadata,
matrix_data, matrix_name, refs,
workspace_id, metadata_df=metadata_df))
amplicon_data.update(self.get_attribute_mapping("col", sample_metadata,
matrix_data, matrix_name, refs,
workspace_id))
amplicon_data['attributes'] = {}
else:
raise ValueError('error parsing _file_to_amplicon_data, mode: {}'.format(mode))
amplicon_data.update({'data': matrix_data})
amplicon_data['search_attributes'] = [f'{k}|{v}' for k, v in amplicon_data['attributes'].items()]
amplicon_data['scale'] = scale
if description:
amplicon_data['description'] = description
return amplicon_data
def get_attribute_mapping(self, axis, metadata, matrix_data, matrix_name, refs, workspace_id,
metadata_df=None):
mapping_data = {}
axis_ids = matrix_data[f'{axis}_ids']
if refs.get('sample_set_ref') and axis == 'col':
name = matrix_name + "_{}_attributes".format(axis)
mapping_data[f'{axis}_attributemapping_ref'] = self._sample_set_to_attribute_mapping(
axis_ids, refs.get('sample_set_ref'), name, workspace_id)
mapping_data[f'{axis}_mapping'] = {x: x for x in axis_ids}
elif refs.get(f'{axis}_attributemapping_ref'):
am_data = self.dfu.get_objects(
{'object_refs': [refs[f'{axis}_attributemapping_ref']]}
)['data'][0]['data']
unmatched_ids = set(axis_ids) - set(am_data['instances'].keys())
if unmatched_ids:
name = "Column" if axis == 'col' else "Row"
raise ValueError(f"The following {name} IDs from the uploaded matrix do not match "
f"the supplied {name} attribute mapping: {', '.join(unmatched_ids)}"
f"\nPlease verify the input data or upload an excel file with a"
f"{name} mapping tab.")
else:
mapping_data[f'{axis}_mapping'] = {x: x for x in axis_ids}
elif metadata:
name = matrix_name + "_{}_attributes".format(axis)
mapping_data[f'{axis}_attributemapping_ref'] = self._metadata_to_attribute_mapping(
axis_ids, metadata, name, workspace_id)
# if coming from biom file, metadata and axis IDs are guaranteed to match
mapping_data[f'{axis}_mapping'] = {x: x for x in axis_ids}
elif metadata_df is not None:
name = matrix_name + "_{}_attributes".format(axis)
mapping_data[f'{axis}_attributemapping_ref'] = self._meta_df_to_attribute_mapping(
axis_ids, metadata_df, name, workspace_id)
mapping_data[f'{axis}_mapping'] = {x: x for x in axis_ids}
return mapping_data
def _meta_df_to_attribute_mapping(self, axis_ids, metadata_df, obj_name, ws_id):
data = {'ontology_mapping_method': "TSV file", 'instances': {}}
metadata_df = metadata_df.astype(str)
attribute_keys = metadata_df.columns.tolist()
data['attributes'] = [{'attribute': key, 'source': 'upload'} for key in attribute_keys]
if 'taxonomy' in attribute_keys:
data['attributes'].append({'attribute': 'parsed_user_taxonomy', 'source': 'upload'})
for axis_id in axis_ids:
data['instances'][axis_id] = metadata_df.loc[axis_id].tolist()
if 'taxonomy' in attribute_keys:
parsed_user_taxonomy = None
taxonomy_index = attribute_keys.index('taxonomy')
taxonomy_str = metadata_df.loc[axis_id].tolist()[taxonomy_index]
parsed_user_taxonomy = self.taxon_util.process_taxonomic_str(taxonomy_str)
data['instances'][axis_id].append(parsed_user_taxonomy)
logging.info('start saving AttributeMapping object: {}'.format(obj_name))
info = self.dfu.save_objects({
"id": ws_id,
"objects": [{
"type": "KBaseExperiments.AttributeMapping",
"data": data,
"name": obj_name
}]
})[0]
return f'{info[6]}/{info[0]}/{info[4]}'
def _sample_set_to_attribute_mapping(self, axis_ids, sample_set_ref, obj_name, ws_id):
am_data = self.sampleservice_util.sample_set_to_attribute_mapping(sample_set_ref)
unmatched_ids = set(axis_ids) - set(am_data['instances'].keys())
if unmatched_ids:
name = "Column"
raise ValueError(f"The following {name} IDs from the uploaded matrix do not match "
f"the supplied {name} attribute mapping: {', '.join(unmatched_ids)}"
f"\nPlease verify the input data or upload an excel | |
from collections import defaultdict
from dataclasses import dataclass, replace
from enum import Enum
from functools import lru_cache
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Dict,
List,
Mapping,
Optional,
Pattern,
Sequence,
Set,
Tuple,
Type,
TypeVar,
overload,
)
from apischema.aliases import Aliaser
from apischema.cache import cache
from apischema.conversions.conversions import AnyConversion, DefaultConversion
from apischema.conversions.visitor import (
Deserialization,
DeserializationVisitor,
sub_conversion,
)
from apischema.dependencies import get_dependent_required
from apischema.deserialization.coercion import Coerce, Coercer
from apischema.deserialization.flattened import get_deserialization_flattened_aliases
from apischema.json_schema.patterns import infer_pattern
from apischema.json_schema.types import bad_type
from apischema.metadata.implem import ValidatorsMetadata
from apischema.metadata.keys import SCHEMA_METADATA, VALIDATORS_METADATA
from apischema.objects import ObjectField
from apischema.objects.fields import FieldKind
from apischema.objects.visitor import DeserializationObjectVisitor
from apischema.recursion import RecursiveConversionsVisitor
from apischema.schemas import Schema, get_schema
from apischema.schemas.constraints import Check, Constraints, merge_constraints
from apischema.types import AnyType, NoneType
from apischema.typing import get_args, get_origin
from apischema.utils import (
Lazy,
PREFIX,
deprecate_kwargs,
get_origin_or_type,
literal_values,
opt_or,
)
from apischema.validation import get_validators
from apischema.validation.errors import ErrorKey, ValidationError, merge_errors
from apischema.validation.mock import ValidatorMock
from apischema.validation.validators import Validator, validate
from apischema.visitor import Unsupported
MISSING_PROPERTY = "missing property"
UNEXPECTED_PROPERTY = "unexpected property"
NOT_NONE = object()
INIT_VARS_ATTR = f"{PREFIX}_init_vars"
T = TypeVar("T")
DeserializationMethod = Callable[[Any], T]
@dataclass(frozen=True)
class DeserializationMethodFactory:
factory: Callable[
[Optional[Constraints], Sequence[Validator]], DeserializationMethod
]
constraints: Optional[Constraints] = None
validators: Sequence[Validator] = ()
def merge(
self, constraints: Optional[Constraints], validators: Sequence[Validator]
) -> "DeserializationMethodFactory":
if constraints is None and not validators:
return self
return replace(
self,
constraints=merge_constraints(self.constraints, constraints),
validators=(*validators, *self.validators),
)
@property # type: ignore
@lru_cache()
def method(self) -> DeserializationMethod:
return self.factory(self.constraints, self.validators) # type: ignore
def get_constraints(schema: Optional[Schema]) -> Optional[Constraints]:
return schema.constraints if schema is not None else None
def get_constraint_checks(
constraints: Optional[Constraints], cls: type
) -> Collection[Tuple[Check, Any, str]]:
return () if constraints is None else constraints.checks_by_type[cls]
class DeserializationMethodVisitor(
RecursiveConversionsVisitor[Deserialization, DeserializationMethodFactory],
DeserializationVisitor[DeserializationMethodFactory],
DeserializationObjectVisitor[DeserializationMethodFactory],
):
def __init__(
self,
additional_properties: bool,
aliaser: Aliaser,
coercer: Optional[Coercer],
default_conversion: DefaultConversion,
fall_back_on_default: bool,
):
super().__init__(default_conversion)
self.additional_properties = additional_properties
self.aliaser = aliaser
self.coercer = coercer
self.fall_back_on_default = fall_back_on_default
self._first_visit = True
def _recursive_result(
self, lazy: Lazy[DeserializationMethodFactory]
) -> DeserializationMethodFactory:
def factory(
constraints: Optional[Constraints], validators: Sequence[Validator]
) -> DeserializationMethod:
rec_method = None
def method(data: Any) -> Any:
nonlocal rec_method
if rec_method is None:
rec_method = lazy().merge(constraints, validators).method
return rec_method(data)
return method
return DeserializationMethodFactory(factory)
def visit_not_recursive(self, tp: AnyType) -> DeserializationMethodFactory:
if self._first_visit:
self._first_visit = False
return super().visit_not_recursive(tp)
return deserialization_method_factory(
tp,
self.additional_properties,
self.aliaser,
self.coercer,
self._conversion,
self.default_conversion,
self.fall_back_on_default,
)
def annotated(
self, tp: AnyType, annotations: Sequence[Any]
) -> DeserializationMethodFactory:
factory = super().annotated(tp, annotations)
for annotation in reversed(annotations):
if isinstance(annotation, Mapping):
factory = factory.merge(
get_constraints(annotation.get(SCHEMA_METADATA)),
annotation.get(
VALIDATORS_METADATA, ValidatorsMetadata(())
).validators,
)
return factory
def _wrap(
self,
method: DeserializationMethod,
validators: Sequence[Validator],
cls: Optional[type] = None,
) -> DeserializationMethod:
if self.coercer is not None and cls is not None:
wrapped_for_coercer, coercer = method, self.coercer
def method(data: Any) -> Any:
assert cls is not None
return wrapped_for_coercer(coercer(cls, data))
if validators:
wrapped_for_validators, aliaser = method, self.aliaser
def method(data: Any) -> Any:
result = wrapped_for_validators(data)
validate(result, validators, aliaser=aliaser)
return result
return method
def any(self) -> DeserializationMethodFactory:
def factory(
constraints: Optional[Constraints], validators: Sequence[Validator]
) -> DeserializationMethod:
checks = None if constraints is None else constraints.checks_by_type
def method(data: Any) -> Any:
if checks is not None:
if data.__class__ in checks:
errors = [
err
for check, attr, err in checks[data.__class__]
if check(data, attr)
]
if errors:
raise ValidationError(errors)
return data
return self._wrap(method, validators)
return DeserializationMethodFactory(factory)
def collection(
self, cls: Type[Collection], value_type: AnyType
) -> DeserializationMethodFactory:
value_factory = self.visit(value_type)
def factory(
constraints: Optional[Constraints], validators: Sequence[Validator]
) -> DeserializationMethod:
deserialize_value = value_factory.method
checks = get_constraint_checks(constraints, list)
constructor: Optional[Callable[[list], Collection]] = None
if issubclass(cls, AbstractSet):
constructor = set
elif issubclass(cls, tuple):
constructor = tuple
def method(data: Any) -> Any:
if not isinstance(data, list):
raise bad_type(data, list)
elt_errors: Dict[ErrorKey, ValidationError] = {}
values: list = [None] * len(data)
index = 0 # don't use `enumerate` for performance
for elt in data:
try:
values[index] = deserialize_value(elt)
except ValidationError as err:
elt_errors[index] = err
index += 1
if checks:
errors = [err for check, attr, err in checks if check(data, attr)]
if errors or elt_errors:
raise ValidationError(errors, elt_errors)
elif elt_errors:
raise ValidationError([], elt_errors)
return constructor(values) if constructor else values
return self._wrap(method, validators, list)
return DeserializationMethodFactory(factory)
def enum(self, cls: Type[Enum]) -> DeserializationMethodFactory:
return self.literal(list(cls))
def literal(self, values: Sequence[Any]) -> DeserializationMethodFactory:
def factory(
constraints: Optional[Constraints], validators: Sequence[Validator]
) -> DeserializationMethod:
value_map = dict(zip(literal_values(values), values))
types = list(set(map(type, value_map))) if self.coercer else []
error = f"not one of {list(value_map)}"
coercer = self.coercer
def method(data: Any) -> Any:
try:
return value_map[data]
except KeyError:
if coercer:
for cls in types:
try:
return value_map[coercer(cls, data)]
except IndexError:
pass
raise ValidationError([error])
return method
return DeserializationMethodFactory(factory)
def mapping(
self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType
) -> DeserializationMethodFactory:
key_factory, value_factory = self.visit(key_type), self.visit(value_type)
def factory(
constraints: Optional[Constraints], validators: Sequence[Validator]
) -> DeserializationMethod:
deserialize_key = key_factory.method
deserialize_value = value_factory.method
checks = get_constraint_checks(constraints, dict)
def method(data: Any) -> Any:
if not isinstance(data, dict):
raise bad_type(data, dict)
item_errors: Dict[ErrorKey, ValidationError] = {}
items = {}
for key, value in data.items():
assert isinstance(key, str)
try:
items[deserialize_key(key)] = deserialize_value(value)
except ValidationError as err:
item_errors[key] = err
if checks:
errors = [err for check, attr, err in checks if check(data, attr)]
if errors or item_errors:
raise ValidationError(errors, item_errors)
elif item_errors:
raise ValidationError([], item_errors)
return items
return self._wrap(method, validators, dict)
return DeserializationMethodFactory(factory)
def object(
self, tp: Type, fields: Sequence[ObjectField]
) -> DeserializationMethodFactory:
field_factories = [
self.visit_with_conv(f.type, f.deserialization).merge(
get_constraints(f.schema), f.validators
)
for f in fields
]
def factory(
constraints: Optional[Constraints], validators: Sequence[Validator]
) -> DeserializationMethod:
cls = get_origin_or_type(tp)
alias_by_name = {field.name: self.aliaser(field.alias) for field in fields}
requiring: Dict[str, Set[str]] = defaultdict(set)
for f, reqs in get_dependent_required(cls).items():
for req in reqs:
requiring[req].add(alias_by_name[f])
normal_fields, flattened_fields, pattern_fields = [], [], []
additional_field = None
for field, field_factory in zip(fields, field_factories):
deserialize_field: DeserializationMethod = field_factory.method
fall_back_on_default = (
field.fall_back_on_default or self.fall_back_on_default
)
if field.flattened:
flattened_aliases = get_deserialization_flattened_aliases(
cls, field, self.default_conversion
)
flattened_fields.append(
(
field.name,
set(map(self.aliaser, flattened_aliases)),
deserialize_field,
fall_back_on_default,
)
)
elif field.pattern_properties is not None:
field_pattern = field.pattern_properties
if field_pattern is ...:
field_pattern = infer_pattern(
field.type, self.default_conversion
)
assert isinstance(field_pattern, Pattern)
pattern_fields.append(
(
field.name,
field_pattern,
deserialize_field,
fall_back_on_default,
)
)
elif field.additional_properties:
additional_field = (
field.name,
deserialize_field,
fall_back_on_default,
)
else:
normal_fields.append(
(
field.name,
self.aliaser(field.alias),
deserialize_field,
field.required,
requiring[field.name],
fall_back_on_default,
)
)
has_aggregate_field = (
flattened_fields or pattern_fields or (additional_field is not None)
)
post_init_modified = {field.name for field in fields if field.post_init}
checks = get_constraint_checks(constraints, dict)
aliaser = self.aliaser
additional_properties = self.additional_properties
all_aliases = set(alias_by_name.values())
init_defaults = [
(f.name, f.default_factory)
for f in fields
if f.kind == FieldKind.WRITE_ONLY
]
def method(data: Any) -> Any:
if not isinstance(data, dict):
raise bad_type(data, dict)
values: Dict[str, Any] = {}
fields_count = 0
errors = (
[err for check, attr, err in checks if check(data, attr)]
if checks
else []
)
field_errors: Dict[ErrorKey, ValidationError] = {}
for (
name,
alias,
deserialize_field,
required,
required_by,
fall_back_on_default,
) in normal_fields:
if required:
try:
value = data[alias]
except KeyError:
field_errors[alias] = ValidationError([MISSING_PROPERTY])
else:
fields_count += 1
try:
values[name] = deserialize_field(value)
except ValidationError as err:
field_errors[alias] = err
elif alias in data:
fields_count += 1
try:
values[name] = deserialize_field(data[alias])
except ValidationError as err:
if not fall_back_on_default:
field_errors[alias] = err
elif required_by and not required_by.isdisjoint(data):
requiring = sorted(required_by & data.keys())
msg = f"missing property (required by {requiring})"
field_errors[alias] = ValidationError([msg])
if has_aggregate_field:
remain = data.keys() - all_aliases
for (
name,
flattened_alias,
deserialize_field,
fall_back_on_default,
) in flattened_fields:
flattened = {
alias: data[alias]
for alias in flattened_alias
if alias in data
}
remain.difference_update(flattened)
try:
values[name] = deserialize_field(flattened)
except ValidationError as err:
if not fall_back_on_default:
errors.extend(err.messages)
field_errors.update(err.children)
for (
name,
pattern,
deserialize_field,
fall_back_on_default,
) in pattern_fields:
matched = {
key: data[key] for key in remain if pattern.match(key)
}
remain.difference_update(matched)
try:
values[name] = deserialize_field(matched)
except ValidationError as err:
if not fall_back_on_default:
errors.extend(err.messages)
field_errors.update(err.children)
if additional_field:
name, deserialize_field, fall_back_on_default = additional_field
additional = {key: data[key] for key in remain}
try:
values[name] = deserialize_field(additional)
except ValidationError as err:
if not fall_back_on_default:
errors.extend(err.messages)
field_errors.update(err.children)
elif remain and not additional_properties:
for key in remain:
field_errors[key] = ValidationError([UNEXPECTED_PROPERTY])
elif not additional_properties and len(data) != fields_count:
for key in data.keys() - all_aliases:
field_errors[key] = ValidationError([UNEXPECTED_PROPERTY])
validators2: Sequence[Validator]
if validators:
init: Dict[str, Any] = {}
for name, default_factory in init_defaults:
if name in values:
init[name] = values[name]
elif name | |
plugins)
# 71
templateParams = TemplateParams(
"Oncomine Myeloid Research Fusions for S5", S5, "AMPS_RNA"
)
templateParams.update(
{
"applicationGroup": "DNA + RNA",
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES_RNA_FUSIONS,
"chipType": CHIP_NAME,
"flows": FLOWS,
"libraryKitName": LIB_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": "",
"sampleGrouping": "Single Fusions",
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
# 72
templateParams = TemplateParams(
"Oncomine Myeloid Research DNA and Fusions for S5", S5, "AMPS_DNA_RNA"
)
templateParams.update(
{
"applicationGroup": "DNA + RNA",
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES_DNA_n_FUSIONS,
"chipType": CHIP_NAME,
"flows": FLOWS,
"libraryKitName": LIB_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": "DNA and Fusions",
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
def add_or_update_ocp_myeloid_mrd_550_s5_system_templates():
BARCODE_KIT_NAME = "Ion AmpliSeq HD Dual Barcode Kit 1-24"
CATEGORIES_DNA = "Oncomine;barcodes_12;onco_liquidBiopsy;onco_heme;mrd;"
CATEGORIES_RNA_FUSIONS = "Oncomine;barcodes_48;onco_liquidBiopsy;onco_heme;"
CATEGORIES_DNA_n_FUSIONS = "Oncomine;barcodes_24;onco_liquidBiopsy;onco_heme;mrd;"
CHIP_NAME = "550"
FLOWS = 550
LIB_KIT_NAME = "Ion AmpliSeq HD Library Kit"
LIBRARY_READ_LENGTH = 200
REFERENCE = "hg19"
SAMPLE_GROUPING = "Self"
SAMPLE_PREP_PROTOCOL = ""
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S550 V1"
PLAN_STATUS = "inactive"
# pre-select plugins
plugins = {}
plugins["molecularCoverageAnalysis"] = get_mca_plugin_dict("ampliseq_hd_cfdna")
plugins["sampleID"] = _get_plugin_dict("sampleID")
templateParams = TemplateParams(
"Oncomine Myeloid MRD DNA for 550", S5, "AMPS_HD_DNA"
)
templateParams.update(
{
"applicationGroup": "DNA",
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES_DNA,
"chipType": CHIP_NAME,
"flows": FLOWS,
"libraryKitName": LIB_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
templateParams = TemplateParams(
"Oncomine Myeloid MRD Fusions for 550", S5, "AMPS_HD_RNA"
)
templateParams.update(
{
"applicationGroup": "DNA + RNA",
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES_RNA_FUSIONS,
"chipType": CHIP_NAME,
"flows": FLOWS,
"libraryKitName": LIB_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": "",
"sampleGrouping": "Single Fusions",
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
templateParams = TemplateParams(
"Oncomine Myeloid MRD DNA and Fusions for 550", S5, "AMPS_HD_DNA_RNA_1"
)
templateParams.update(
{
"applicationGroup": "DNA + RNA",
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES_DNA_n_FUSIONS,
"chipType": CHIP_NAME,
"flows": FLOWS,
"libraryKitName": LIB_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": "DNA and Fusions",
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
def add_or_update_ocp_myeloid_mrd_540_s5_system_templates():
BARCODE_KIT_NAME = "Ion AmpliSeq HD Dual Barcode Kit 1-24"
CATEGORIES_DNA = "Oncomine;barcodes_12;onco_liquidBiopsy;onco_heme;mrd"
CATEGORIES_RNA_FUSIONS = "Oncomine;barcodes_48;onco_liquidBiopsy;onco_heme;"
CATEGORIES_DNA_n_FUSIONS = "Oncomine;barcodes_24;onco_liquidBiopsy;onco_heme;mrd"
CHIP_NAME = "540"
FLOWS = 550
LIB_KIT_NAME = "Ion AmpliSeq HD Library Kit"
LIBRARY_READ_LENGTH = 200
REFERENCE = "hg19"
SAMPLE_GROUPING = "Self"
SAMPLE_PREP_PROTOCOL = ""
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S540 V1"
PLAN_STATUS = "inactive"
# pre-select plugins
plugins = {}
plugins["molecularCoverageAnalysis"] = get_mca_plugin_dict("ampliseq_hd_cfdna")
plugins["sampleID"] = _get_plugin_dict("sampleID")
templateParams = TemplateParams(
"Oncomine Myeloid MRD DNA for 540", S5, "AMPS_HD_DNA"
)
templateParams.update(
{
"applicationGroup": "DNA",
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES_DNA,
"chipType": CHIP_NAME,
"flows": FLOWS,
"libraryKitName": LIB_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
templateParams = TemplateParams(
"Oncomine Myeloid MRD Fusions for 540", S5, "AMPS_HD_RNA"
)
templateParams.update(
{
"applicationGroup": "DNA + RNA",
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES_RNA_FUSIONS,
"chipType": CHIP_NAME,
"flows": FLOWS,
"libraryKitName": LIB_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": "",
"sampleGrouping": "Single Fusions",
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
templateParams = TemplateParams(
"Oncomine Myeloid MRD DNA and Fusions for 540", S5, "AMPS_HD_DNA_RNA_1"
)
templateParams.update(
{
"applicationGroup": "DNA + RNA",
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES_DNA_n_FUSIONS,
"chipType": CHIP_NAME,
"flows": FLOWS,
"libraryKitName": LIB_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": "DNA and Fusions",
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
def add_or_update_proton_PQ_system_template():
CATEGORIES = ""
CHIP = "P2.2.2"
FLOWS = 150
LIBRARY_KIT_NAME = "Ion Xpress Plus Fragment Library Kit"
TEMPLATE_KIT_NAME = "Ion PQ Template OT2 Kit"
SEQ_KIT_NAME = "IonProtonPQKit"
BARCODE_KIT_NAME = "IonXpress"
PLAN_STATUS = "planned"
# 73
templateParams = TemplateParams("Ion NIPT template - PQ", PROTON, "WGNM")
templateParams.update(
{
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"reference": "hg19",
"sampleGrouping": "Self",
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_mouse_transcriptome_s5_system_templates():
APPLICATION_GROUP = "RNA"
BARCODE_KIT_NAME = "IonCode Barcodes 1-32"
BARCODE_KIT_NAME_MANUAL = "IonXpress"
CATEGORIES = ""
CHIP = "540"
FLOWS = 500
LIBRARY_KIT_NAME = "Ampliseq DNA V1"
LIBRARY_KIT_NAME_MANUAL = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 200
REFERENCE = "AmpliSeq_Mouse_Transcriptome_v1"
BEDFILE = "/AmpliSeq_Mouse_Transcriptome_v1/unmerged/detail/AmpliSeq_Mouse_Transcriptome_V1_Designed.bed"
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S540 V1"
PLAN_STATUS = "planned"
# pre-select plugins
plugins = {}
plugins["ampliSeqRNA"] = _get_plugin_dict("ampliSeqRNA")
# 74
templateParams = TemplateParams(
"Ion AmpliSeq Transcriptome Mouse Gene Expression Chef-S5", S5, "AMPS_RNA"
)
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"targetRegionBedFile": BEDFILE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
# 75
templateParams = TemplateParams(
"Ion AmpliSeq Transcriptome Mouse Gene Expression Manual Chef-S5",
S5,
"AMPS_RNA",
)
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME_MANUAL,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME_MANUAL,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"targetRegionBedFile": BEDFILE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
def add_or_update_mouse_transcriptome_proton_system_templates():
APPLICATION_GROUP = "RNA"
BARCODE_KIT_NAME = "IonXpress"
CATEGORIES = ""
CHIP = "P1.1.17"
FLOWS = 500
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
REFERENCE = "AmpliSeq_Mouse_Transcriptome_v1"
BEDFILE = "/AmpliSeq_Mouse_Transcriptome_v1/unmerged/detail/AmpliSeq_Mouse_Transcriptome_V1_Designed.bed"
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "ProtonI200Kit-v3"
TEMPLATE_KIT_NAME = "Ion PI Template OT2 200 Kit v3"
PLAN_STATUS = "planned"
# pre-select plugins
plugins = {}
plugins["ampliSeqRNA"] = _get_plugin_dict("ampliSeqRNA")
# 76
templateParams = TemplateParams(
"Ion AmpliSeq Transcriptome Mouse Gene Expression Panel OT2-Proton",
PROTON,
"AMPS_RNA",
)
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"reference": REFERENCE,
"targetRegionBedFile": BEDFILE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
def add_or_update_mutation_load_s5_system_templates():
APPLICATION_GROUP = "mutation_load"
BARCODE_KIT_NAME = "IonCode"
CATEGORIES = "Oncomine;onco_immune"
CHIP = "540"
FLOWS = 400
LIBRARY_KIT_NAME = "Ampliseq DNA V1"
LIBRARY_READ_LENGTH = 200
REFERENCE = "hg19"
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S540 V1"
PLAN_STATUS = "planned"
# pre-select plugins
plugins = {}
plugins["coverageAnalysis"] = _get_plugin_dict("coverageAnalysis")
# 77
templateParams = TemplateParams("Oncomine Tumor Mutation Load", S5, "AMPS")
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams, plugins)
def add_or_update_tagseq_cfdna_system_templates():
liquid_biopsy_plugins, tumor_plugins = create_tagseq_plugins()
# 78
sysTemplate, isCreated, isUpdated, templateParams = add_or_update_tagseq_cfdna_s5_530_chef_system_template(
"Oncomine TagSeq Tumor"
)
finish_sys_template(sysTemplate, isCreated, templateParams, tumor_plugins)
# 79
sysTemplate, isCreated, isUpdated, templateParams = add_or_update_tagseq_cfdna_s5_530_chef_system_template(
"Oncomine TagSeq Liquid Biopsy"
)
finish_sys_template(sysTemplate, isCreated, templateParams, liquid_biopsy_plugins)
def add_or_update_tagseq_cfdna_s5_530_chef_system_template(templateName):
# Tag Sequencing
TAG_SEQ_APPLICATION_GROUP = "onco_liquidBiopsy"
TAG_SEQ_BARCODE_KIT_NAME = "TagSequencing"
TAG_SEQ_CATEGORIES = "Oncomine;onco_liquidBiopsy;barcodes_8"
TAG_SEQ_CHIP_NAME = "530"
TAG_SEQ_FLOWS = 500
TAG_SEQ_LIB_KIT_NAME = "Oncomine cfDNA Assay"
TAG_SEQ_LIBRARY_READ_LENGTH = 200
TAG_SEQ_REFERENCE = "hg19"
TAG_SEQ_RUN_TYPE = "TAG_SEQUENCING"
TAG_SEQ_SAMPLE_GROUPING = "Self"
TAG_SEQ_SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TAG_SEQ_TEMPLATE_KIT_NAME = "Ion Chef S530 V2"
PLAN_STATUS = "planned"
templateParams = TemplateParams(templateName, S5, TAG_SEQ_RUN_TYPE)
templateParams.update(
{
"applicationGroup": TAG_SEQ_APPLICATION_GROUP,
"barcodeKitName": TAG_SEQ_BARCODE_KIT_NAME,
"categories": TAG_SEQ_CATEGORIES,
"chipType": TAG_SEQ_CHIP_NAME,
"flows": TAG_SEQ_FLOWS,
"libraryKitName": TAG_SEQ_LIB_KIT_NAME,
"libraryReadLength": TAG_SEQ_LIBRARY_READ_LENGTH,
"reference": TAG_SEQ_REFERENCE,
"sampleGrouping": TAG_SEQ_SAMPLE_GROUPING,
"sequencekitname": TAG_SEQ_SEQ_KIT_NAME,
"templatingKitName": TAG_SEQ_TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
return sysTemplate, isCreated, isUpdated, templateParams
def add_or_update_tagseq_cfdna_540_system_templates():
liquid_biopsy_plugins, tumor_plugins = create_tagseq_plugins()
# 80
sysTemplate, isCreated, isUpdated, templateParams = add_or_update_tagseq_cfdna_s5_540_chef_system_template(
"Oncomine TagSeq S540 Tumor"
)
finish_sys_template(sysTemplate, isCreated, templateParams, tumor_plugins)
# 81
sysTemplate, isCreated, isUpdated, templateParams = add_or_update_tagseq_cfdna_s5_540_chef_system_template(
"Oncomine TagSeq S540 Liquid Biopsy"
)
finish_sys_template(sysTemplate, isCreated, templateParams, liquid_biopsy_plugins)
def add_or_update_tagseq_cfdna_s5_540_chef_system_template(templateName):
# Tag Sequencing
TAG_SEQ_APPLICATION_GROUP = "onco_liquidBiopsy"
TAG_SEQ_BARCODE_KIT_NAME = "TagSequencing"
TAG_SEQ_CATEGORIES = "Oncomine;onco_liquidBiopsy;barcodes_8"
TAG_SEQ_CHIP_NAME = "540"
TAG_SEQ_FLOWS = 500
TAG_SEQ_LIB_KIT_NAME = "Oncomine cfDNA Assay"
TAG_SEQ_LIBRARY_READ_LENGTH = 200
TAG_SEQ_REFERENCE = "hg19"
TAG_SEQ_RUN_TYPE = "TAG_SEQUENCING"
TAG_SEQ_SAMPLE_GROUPING = "Self"
TAG_SEQ_SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TAG_SEQ_TEMPLATE_KIT_NAME = "Ion Chef S540 V1"
PLAN_STATUS = "planned"
templateParams = TemplateParams(templateName, S5, TAG_SEQ_RUN_TYPE)
templateParams.update(
{
"applicationGroup": TAG_SEQ_APPLICATION_GROUP,
"barcodeKitName": TAG_SEQ_BARCODE_KIT_NAME,
"categories": TAG_SEQ_CATEGORIES,
"chipType": | |
or 'stopped'.
The method assumes there is a database connection active.
"""
self.connect_db()
sql_get = """
SELECT
t_source
FROM
sch_chameleon.t_sources
WHERE
enm_status NOT IN ('ready','stopped')
;
"""
self.pgsql_cur.execute(sql_get)
source_get = self.pgsql_cur.fetchall()
self.disconnect_db()
return source_get
def upgrade_catalogue_v20(self):
"""
The method applies the migration scripts to the replica catalogue version 2.0.
The method checks that all sources are in stopped or ready state.
"""
sql_view = """
CREATE OR REPLACE VIEW sch_chameleon.v_version
AS
SELECT %s::TEXT t_version
;"""
self.connect_db()
sources_active = self.__count_active_sources()
if sources_active[0] == 0:
catalog_version = self.get_catalog_version()
catalog_number = int(''.join([value for value in catalog_version.split('.')]))
self.connect_db()
for migration in self.migrations:
migration_version = migration["version"]
migration_number = int(''.join([value for value in migration_version.split('.')]))
if migration_number>catalog_number:
migration_file_name = '%s/%s' % (self.sql_upgrade_dir, migration["script"])
print("Migrating the catalogue from version %s to version %s" % (catalog_version, migration_version))
migration_data = open(migration_file_name, 'rb')
migration_sql = migration_data.read()
migration_data.close()
self.pgsql_cur.execute(migration_sql)
self.pgsql_cur.execute(sql_view, (migration_version, ))
else:
print('There are sources in running or syncing state. You shall stop all the replica processes before upgrading the catalogue.')
sys.exit()
def upgrade_catalogue_v1(self):
"""
The method upgrade a replica catalogue from version 1 to version 2.
The original catalogue is not altered but just renamed.
All the existing data are transferred into the new catalogue loaded using the create_schema.sql file.
"""
replay_max_rows = 10000
self.__v2_schema = "_sch_chameleon_version2"
self.__current_schema = "sch_chameleon"
self.__v1_schema = "_sch_chameleon_version1"
self.connect_db()
upgrade_possible = True
sql_get_min_max = """
SELECT
sch_chameleon.binlog_max(
ARRAY[
t_binlog_name,
i_binlog_position::text
]
),
sch_chameleon.binlog_min(
ARRAY[
t_binlog_name,
i_binlog_position::text
]
)
FROM
sch_chameleon.t_replica_tables
WHERE
i_id_source=%s
;
"""
sql_migrate_tables = """
WITH t_old_new AS
(
SELECT
old.i_id_source as id_source_old,
new.i_id_source as id_source_new,
new.t_dest_schema
FROM
_sch_chameleon_version1.t_sources old
INNER JOIN (
SELECT
i_id_source,
(jsonb_each_text(jsb_schema_mappings)).value as t_dest_schema
FROM
sch_chameleon.t_sources
) new
ON old.t_dest_schema=new.t_dest_schema
)
INSERT INTO sch_chameleon.t_replica_tables
(
i_id_source,
v_table_name,
v_schema_name,
v_table_pkey,
t_binlog_name,
i_binlog_position,
b_replica_enabled
)
SELECT
id_source_new,
v_table_name,
t_dest_schema,
string_to_array(replace(v_table_pkey[1],'"',''),',') as table_pkey,
bat.t_binlog_name,
bat.i_binlog_position,
't'::boolean as b_replica_enabled
FROM
_sch_chameleon_version1.t_replica_batch bat
INNER JOIN _sch_chameleon_version1.t_replica_tables tab
ON tab.i_id_source=bat.i_id_source
INNER JOIN t_old_new
ON tab.i_id_source=t_old_new.id_source_old
WHERE
NOT bat.b_processed
AND bat.b_started
;
"""
sql_mapping = """
WITH t_mapping AS
(
SELECT json_each_text(%s::json) AS t_sch_map
)
SELECT
mapped_schema=config_schema as match_mapping,
mapped_list,
config_list
FROM
(
SELECT
count(dst.t_sch_map) as mapped_schema,
string_agg((dst.t_sch_map).value,' ') as mapped_list
FROM
t_mapping dst
INNER JOIN sch_chameleon.t_sources src
ON
src.t_dest_schema=(dst.t_sch_map).value
AND src.t_source_schema= (dst.t_sch_map).key
) cnt_map,
(
SELECT
count(t_sch_map) as config_schema,
string_agg((t_sch_map).value,' ') as config_list
FROM
t_mapping
) cnt_cnf
;
"""
self.logger.info("Checking if we need to replay data in the existing catalogue")
sql_check = """
SELECT
src.i_id_source,
src.t_source,
count(log.i_id_event)
FROM
sch_chameleon.t_log_replica log
INNER JOIN sch_chameleon.t_replica_batch bat
ON log.i_id_batch=bat.i_id_batch
INNER JOIN sch_chameleon.t_sources src
ON src.i_id_source=bat.i_id_source
GROUP BY
src.i_id_source,
src.t_source
;
"""
self.pgsql_cur.execute(sql_check)
source_replay = self.pgsql_cur.fetchall()
if source_replay:
for source in source_replay:
id_source = source[0]
source_name = source[1]
replay_rows = source[2]
self.logger.info("Replaying last %s rows for source %s " % (replay_rows, source_name))
continue_loop = True
while continue_loop:
sql_replay = """SELECT sch_chameleon.fn_process_batch(%s,%s);"""
self.pgsql_cur.execute(sql_replay, (replay_max_rows, id_source, ))
replay_status = self.pgsql_cur.fetchone()
continue_loop = replay_status[0]
if continue_loop:
self.logger.info("Still replaying rows for source %s" % ( source_name, ) )
self.logger.info("Checking if the schema mappings are correctly matched")
for source in self.sources:
schema_mappings = json.dumps(self.sources[source]["schema_mappings"])
self.pgsql_cur.execute(sql_mapping, (schema_mappings, ))
config_mapping = self.pgsql_cur.fetchone()
source_mapped = config_mapping[0]
list_mapped = config_mapping[1]
list_config = config_mapping[2]
if not source_mapped:
self.logger.error("Checks for source %s failed. Matched mappings %s, configured mappings %s" % (source, list_mapped, list_config))
upgrade_possible = False
if upgrade_possible:
try:
self.logger.info("Renaming the old schema %s in %s " % (self.__v2_schema, self.__v1_schema))
sql_rename_old = sql.SQL("ALTER SCHEMA {} RENAME TO {};").format(sql.Identifier(self.__current_schema), sql.Identifier(self.__v1_schema))
self.pgsql_cur.execute(sql_rename_old)
self.logger.info("Installing the new replica catalogue " )
self.create_replica_schema()
for source in self.sources:
self.source = source
self.add_source()
self.pgsql_cur.execute(sql_migrate_tables)
for source in self.sources:
self.source = source
self.set_source_id()
self.pgsql_cur.execute(sql_get_min_max, (self.i_id_source, ))
min_max = self.pgsql_cur.fetchone()
max_position = min_max[0]
min_position = min_max[1]
master_data = {}
master_status = []
master_data["File"] = min_position[0]
master_data["Position"] = min_position[1]
master_status.append(master_data)
self.save_master_status(master_status)
master_status = []
master_data["File"] = max_position[0]
master_data["Position"] = max_position[1]
master_status.append(master_data)
self.set_source_highwatermark(master_status, False)
except:
self.rollback_upgrade_v1()
else:
self.logger.error("Sanity checks for the schema mappings failed. Aborting the upgrade")
self.rollback_upgrade_v1()
self.disconnect_db()
def rollback_upgrade_v1(self):
"""
The procedure rollsback the upgrade dropping the schema sch_chameleon and renaming the version 1 to the
"""
sql_check="""
SELECT
count(*)
FROM
information_schema.schemata
WHERE
schema_name=%s
"""
self.pgsql_cur.execute(sql_check, (self.__v1_schema, ))
v1_schema = self.pgsql_cur.fetchone()
if v1_schema[0] == 1:
self.logger.info("The schema %s exists, rolling back the changes" % (self.__v1_schema))
self.pgsql_cur.execute(sql_check, (self.__current_schema, ))
curr_schema = self.pgsql_cur.fetchone()
if curr_schema[0] == 1:
self.logger.info("Renaming the current schema %s in %s" % (self.__current_schema, self.__v2_schema))
sql_rename_current = sql.SQL("ALTER SCHEMA {} RENAME TO {};").format(sql.Identifier(self.__current_schema), sql.Identifier(self.__v2_schema))
self.pgsql_cur.execute(sql_rename_current)
sql_rename_old = sql.SQL("ALTER SCHEMA {} RENAME TO {};").format(sql.Identifier(self.__v1_schema), sql.Identifier(self.__current_schema))
self.pgsql_cur.execute(sql_rename_old)
else:
self.logger.info("The old schema %s does not exists, aborting the rollback" % (self.__v1_schema))
sys.exit()
self.logger.info("Rollback successful. Please note the catalogue version 2 has been renamed to %s for debugging.\nYou will need to drop it before running another upgrade" % (self.__v2_schema, ))
def unregister_table(self, schema, table):
"""
This method is used to remove a table from the replica catalogue.
The table is just deleted from the table sch_chameleon.t_replica_tables.
:param schema: the schema name where the table is stored
:param table: the table name to remove from t_replica_tables
"""
self.logger.info("unregistering table %s.%s from the replica catalog" % (schema, table,))
sql_delete=""" DELETE FROM sch_chameleon.t_replica_tables
WHERE
v_table_name=%s
AND v_schema_name=%s
;
"""
self.pgsql_cur.execute(sql_delete, (table, schema))
def cleanup_source_tables(self):
"""
The method cleans up the tables for active source in sch_chameleon.t_replica_tables.
"""
self.logger.info("deleting all the table references from the replica catalog for source %s " % (self.source,))
sql_delete=""" DELETE FROM sch_chameleon.t_replica_tables
WHERE
i_id_source=%s
;
"""
self.pgsql_cur.execute(sql_delete, (self.i_id_source, ))
def cleanup_table_events(self):
"""
The method cleans up the log events in the source's log tables for the given tables
"""
sql_get_log_tables = """
SELECT
v_log_table
FROM
sch_chameleon.t_sources
WHERE
i_id_source=%s
;
"""
self.pgsql_cur.execute(sql_get_log_tables, (self.i_id_source, ))
log_tables = self.pgsql_cur.fetchone()
list_conditions = []
for schema in self.schema_tables:
for table_name in self.schema_tables[schema]:
table_schema = self.schema_loading[schema]["destination"]
where_cond = "format('%%I.%%I','%s','%s')" % (table_schema, table_name)
list_conditions.append(where_cond)
sql_cleanup = "DELETE FROM sch_chameleon.{} WHERE format('%%I.%%I',v_schema_name,v_table_name) IN ('%s') ;" % ' ,'.join(list_conditions)
#sql_cleanup = "DELETE FROM sch_chameleon.{} WHERE format('%%I.%%I',v_schema_name,v_table_name) IN (%s) ;" % ' ,'.join(list_conditions)
for log_table in log_tables[0]:
self.logger.debug("Cleaning up log events in log table %s " % (log_table,))
sql_clean_log = sql.SQL(sql_cleanup).format(sql.Identifier(log_table))
self.pgsql_cur.execute(sql_clean_log)
def __count_table_schema(self, table, schema):
"""
The method checks if the table exists in the given schema.
:param table: the table's name
:param schema: the postgresql schema where the table should exist
:return: the count from pg_tables where table name and schema name are the given parameters
:rtype: integer
"""
sql_check = """
SELECT
count(*)
FROM
pg_tables
WHERE
schemaname=%s
AND tablename=%s;
"""
self.pgsql_cur.execute(sql_check, (schema, table ))
count_table = self.pgsql_cur.fetchone()
return count_table[0]
def write_ddl(self, token, query_data, destination_schema):
"""
The method writes the DDL built from the tokenised sql into PostgreSQL.
:param token: the tokenised query
:param query_data: query's metadata (schema,binlog, etc.)
:param destination_schema: the postgresql destination schema determined using the schema mappings.
"""
pg_ddl = self.__generate_ddl(token, destination_schema)
self.logger.debug("Translated query: %s " % (pg_ddl,))
log_table = query_data["log_table"]
insert_vals = (
query_data["batch_id"],
token["name"],
query_data["schema"],
query_data["binlog"],
query_data["logpos"],
pg_ddl
)
sql_insert=sql.SQL("""
INSERT INTO "sch_chameleon".{}
(
i_id_batch,
v_table_name,
v_schema_name,
enm_binlog_event,
t_binlog_name,
i_binlog_position,
t_query
)
VALUES
(
%s,
%s,
%s,
'ddl',
%s,
%s,
%s
)
;
""").format(sql.Identifier(log_table), )
self.pgsql_cur.execute(sql_insert, insert_vals)
def get_tables_disabled(self, format="csv"):
"""
The method returns a CSV or a python list of tables excluded from the replica.
The origin's schema is determined from the source's schema mappings jsonb.
:return: CSV list of tables excluded from the replica
:rtype: text
"""
if format=='csv':
select_clause = """string_agg(format('%s.%s',(t_mappings).key,v_table_name),',') """
elif format=='list':
select_clause = """array_agg(format('%s.%s',(t_mappings).key,v_table_name)) """
sql_get = """
SELECT
%s
FROM
sch_chameleon.t_replica_tables tab
INNER JOIN
(
SELECT
i_id_source,
jsonb_each_text(jsb_schema_mappings) as t_mappings
FROM
sch_chameleon.t_sources
) src
ON
tab.i_id_source=src.i_id_source
AND tab.v_schema_name=(t_mappings).value
WHERE
NOT tab.b_replica_enabled
;
""" % select_clause
self.pgsql_cur.execute(sql_get)
tables_disabled = self.pgsql_cur.fetchone()
return tables_disabled[0]
def swap_source_log_table(self):
"""
The method swaps the sources's log table and returns the next log table stored in the v_log_table array.
The method expects an active database connection.
:return: The t_log_replica's active subpartition
:rtype: text
"""
sql_log_table="""
UPDATE sch_chameleon.t_sources
SET
v_log_table=ARRAY[v_log_table[2],v_log_table[1]]
WHERE
i_id_source=%s
RETURNING
v_log_table[1]
;
"""
self.set_source_id()
self.pgsql_cur.execute(sql_log_table, (self.i_id_source, ))
results = self.pgsql_cur.fetchone()
log_table = results[0]
self.logger.debug("New log table : %s " % (log_table,))
return log_table
def get_batch_data(self):
"""
The method updates the batch status to started for the given source_id and returns the
batch informations.
:return: psycopg2 fetchall results without any manipulation
:rtype: psycopg2 tuple
"""
sql_batch="""
WITH t_created AS
(
SELECT
max(ts_created) AS ts_created
FROM
sch_chameleon.t_replica_batch
WHERE
NOT b_processed
AND NOT b_replayed
AND i_id_source=%s
)
UPDATE sch_chameleon.t_replica_batch
SET
b_started=True
FROM
t_created
WHERE
t_replica_batch.ts_created=t_created.ts_created
AND i_id_source=%s
RETURNING
i_id_batch,
t_binlog_name,
i_binlog_position,
v_log_table,
t_gtid_set
;
"""
self.pgsql_cur.execute(sql_batch, (self.i_id_source, self.i_id_source, ))
return self.pgsql_cur.fetchall()
def drop_replica_schema(self):
"""
The method removes the service schema discarding all the replica references.
The replicated tables are kept in place though.
"""
self.logger.debug("Trying to connect to the destination database.")
self.connect_db()
file_schema = open(self.sql_dir+"drop_schema.sql", 'rb')
sql_schema = file_schema.read()
file_schema.close()
self.pgsql_cur.execute(sql_schema)
def get_catalog_version(self):
"""
The method returns if the replica schema's version
:return: the version string selected from sch_chameleon.v_version
:rtype: text
"""
schema_version = None
sql_version = """
SELECT
t_version
FROM
sch_chameleon.v_version
;
"""
self.connect_db()
try:
self.pgsql_cur.execute(sql_version)
schema_version = self.pgsql_cur.fetchone()
self.disconnect_db()
schema_version = schema_version[0]
except:
schema_version = None
return schema_version
def check_replica_schema(self):
"""
The method checks if the sch_chameleon exists
:return: count from information_schema.schemata
:rtype: integer
"""
sql_check="""
SELECT
count(*)
FROM
information_schema.schemata
WHERE
schema_name='sch_chameleon'
"""
self.pgsql_cur.execute(sql_check)
num_schema = self.pgsql_cur.fetchone()
return num_schema
def check_schema_mappings(self, exclude_current_source=False):
"""
The default is false.
The method checks if there is already a destination schema in the stored schema mappings.
As each schema should be managed by one mapping only, if the method returns None then
the source can be store safely. Otherwise the action. The method doesn't take any decision
leaving this to the calling methods.
The method assumes there is a database connection active.
The method returns a list or none.
If the list is returned then contains the count and the destination schema name
that are already present in the replica catalogue.
:param exclude_current_source: If set to true the check excludes the current source name from the check.
:return: the schema already mapped in the replica catalogue.
:rtype: list
"""
if exclude_current_source:
exclude_id = self.i_id_source
else:
exclude_id = -1
schema_mappings = json.dumps(self.sources[self.source]["schema_mappings"])
sql_check = """
SELECT
(jsonb_each_text(jsb_schema_mappings)).value AS dest_schema
FROM
sch_chameleon.t_sources
WHERE
i_id_source <> %s
;
"""
sql_check2 = """
SELECT
value AS dest_schema
FROM
json_each_text(%s::json)
;
"""
self.pgsql_cur.execute(sql_check, (exclude_id, ))
check_mappings = self.pgsql_cur.fetchall()
if check_mappings:
check_mappings = check_mappings + self.pgsql_cur.execute(sql_check2, (schema_mappings, ))
else:
check_mappings = self.pgsql_cur.execute(sql_check2, (schema_mappings, ))
if check_mappings is None or len(check_mappings) < 2 :
return None
return True
def check_source(self):
"""
The method checks if the source name stored in the class variable self.source is already present.
As this method is used in both add and drop source it just retuns the count of the sources.
Any decision about the source is left to the calling method.
The method assumes there is a database connection active.
"""
sql_check = """
SELECT
count(*)
FROM
sch_chameleon.t_sources
WHERE
t_source=%s;
"""
self.pgsql_cur.execute(sql_check, (self.source, ))
num_sources = self.pgsql_cur.fetchone()
return num_sources[0]
def add_source(self):
"""
The method adds a new source to the replication catalog.
The method calls the function fn_refresh_parts() which generates the | |
<reponame>UCLA-VAST/soda
import functools
import logging
import operator
from typing import List, TextIO
import soda.tensor
from haoda import ir, util
from soda import core, grammar
from soda import util as soda_util
logger = logging.getLogger().getChild(__name__)
STENCIL_DIM_FMT = util.MetaFmt('kStencilDim%s')
TYPE_FMT = util.MetaFmt('Type_%s')
WIDTH_FMT = util.MetaFmt('kWidth_%s')
def print_header(printer):
# C headers
for header in ('assert', 'float', 'math', 'stdbool', 'stddef', 'stdint',
'stdio', 'stdlib', 'string'):
printer.println(f'#include <c{header}>')
printer.println()
# C++ headers
for header in ('algorithm', 'iomanip', 'iostream', 'memory', 'random',
'regex', 'string', 'unordered_map', 'vector'):
printer.println(f'#include <{header}>')
printer.println()
# Other system headers
for header in ('frt',):
printer.println(f'#include <{header}.h>')
printer.println()
# using declarations
for name in ('clog', 'endl', 'regex', 'regex_match', 'string', 'unique_ptr',
'unordered_map', 'vector'):
printer.println(f'using std::{name};')
printer.println()
def print_func(printer: util.CppPrinter, stencil: soda.core.Stencil):
stmts = stencil.input_stmts + stencil.output_stmts
# factories for meta variables
data_fmt = util.MetaFmt('var_%s_ptr')
extent_fmt = util.MetaFmt('var_%s_extent')
stride_fmt = util.MetaFmt('var_%s_stride')
min_fmt = util.MetaFmt('var_%s_min')
# print function signature
params: List[str] = []
for stmt in stmts + stencil.param_stmts:
prefix = 'const ' if isinstance(stmt, grammar.InputStmt) else ''
params.extend((f'{prefix}{TYPE_FMT[stmt.name]}* {data_fmt[stmt.name]}',
f'const int32_t {extent_fmt[stmt.name]}[{stencil.dim}]',
f'const int32_t {stride_fmt[stmt.name]}[{stencil.dim}]',
f'const int32_t {min_fmt[stmt.name]}[{stencil.dim}]'))
tile_size_fmt = util.MetaFmt('tile_size_%s')
params.extend((
'const char* bitstream',
f'const int burst_width = {stencil.burst_width}',
*(f'const int {tile_size_fmt[d]} = {stencil.tile_size[d]}'
for d in range(stencil.dim - 1)),
f'const int unroll_factor = {stencil.unroll_factor}',
))
printer.print_func(name=f'int {stencil.app_name}', params=params, align=0)
printer.do_scope()
printer.printlns(
'// load bitstream',
'auto instance = fpga::Instance(bitstream);',
'auto args_info = instance.GetArgsInfo();'
'',
)
bank_count_fmt = util.MetaFmt('bank_count_%s')
regex_fmt = util.MetaFmt('regex_%s')
elem_count_per_cycle_fmt = util.MetaFmt('elem_count_per_cycle_%s')
tile_count_fmt = util.MetaFmt('tile_count_dim_%d')
printer.printlns(
'// find out how many banks are used for each tensor',
*(f'int {bank_count_fmt[x.name]} = 0;' for x in stmts),
*(f'const regex {regex_fmt[x.name]}'
f'(R"(^bank_\\d+_{x.name}$)");' for x in stmts),
)
with printer.for_('const auto& arg', 'args_info'):
printer.printlns(f'if (regex_match(arg.name, {regex_fmt[x.name]})) '
f'++{bank_count_fmt[x.name]};' for x in stmts)
printer.printlns(
'',
('auto round_up = [](int64_t a, int64_t b) -> int64_t '
'{ return ((a - 1) / b + 1) * b; };'),
'',
'// some run-time constants',
*(f'const int {elem_count_per_cycle_fmt[x.name]} = '
f'burst_width / {WIDTH_FMT[x.name]} * {bank_count_fmt[x.name]};'
for x in stmts),
)
for d in range(stencil.dim - 1):
printer.println(f'int32_t {tile_count_fmt[d]} = '
f'({extent_fmt[stencil.input_names[0]]}[{d}] - '
f'{STENCIL_DIM_FMT[d]} + 1 - 1) / ({tile_size_fmt[d]} - '
f'{STENCIL_DIM_FMT[d]} + 1) + 1;')
printer.printlns(
('int64_t tile_count = %s;' %
' * '.join(f'{tile_count_fmt[d]}' for d in range(stencil.dim - 1))),
'',
)
printer.printlns(
'// align each linearized tile to multiples of burst_width',
('int64_t elem_count_per_tile = %s * '
f'{extent_fmt[stencil.input_names[0]]}[{stencil.dim - 1}];' %
' * '.join(f'{tile_size_fmt[d]}' for d in range(stencil.dim - 1))),
('int64_t cycle_count_per_tile = (elem_count_per_tile - 1) / '
f'{elem_count_per_cycle_fmt[stencil.input_names[0]]} + 1;'),
('int64_t elem_count_aligned_per_tile_i = cycle_count_per_tile * '
f'{elem_count_per_cycle_fmt[stencil.input_stmts[0].name]};'),
('int64_t elem_count_aligned_per_tile_o = cycle_count_per_tile * '
f'{elem_count_per_cycle_fmt[stencil.output_stmts[0].name]};'),
'',
)
printer.println('// calculate size of each buffer')
buf_size_fmt = util.MetaFmt('buf_size_%s')
for stmt in stencil.input_stmts:
printer.println(
f'int64_t {buf_size_fmt[stmt.name]} = '
f'(tile_count * elem_count_aligned_per_tile_i + '
f'round_up(kStencilDistance, {elem_count_per_cycle_fmt[stmt.name]}))'
f' / {bank_count_fmt[stmt.name]} * sizeof({TYPE_FMT[stmt.name]});')
for stmt in stencil.output_stmts:
printer.println(
f'int64_t {buf_size_fmt[stmt.name]} = '
f'(tile_count * elem_count_aligned_per_tile_o + '
f'round_up(kStencilDistance, {elem_count_per_cycle_fmt[stmt.name]}))'
f' / {bank_count_fmt[stmt.name]} * sizeof({TYPE_FMT[stmt.name]});')
printer.println()
printer.println('// allocate memory for each buffer')
buf_fmt = util.MetaFmt('buf_%s')
for stmt in stmts:
printer.printlns(
(f'vector<unique_ptr<{TYPE_FMT[stmt.name]}, decltype(&free)>> '
f'{buf_fmt[stmt.name]};'),
f'{buf_fmt[stmt.name]}.reserve({bank_count_fmt[stmt.name]});',
)
with printer.for_('int bank = 0', f'bank < {bank_count_fmt[stmt.name]}',
'++bank'):
printer.println(
f'{buf_fmt[stmt.name]}.emplace_back('
f'static_cast<{TYPE_FMT[stmt.name]}*>(aligned_alloc('
f'4096, round_up({buf_size_fmt[stmt.name]}, 4096))), &free);')
printer.println()
printer.println('// tiling')
for dim in range(stencil.dim - 2, -1, -1):
printer.println(f'for(int32_t tile_index_dim_{dim} = 0; '
f'tile_index_dim_{dim} < {tile_count_fmt[dim]}; '
f'++tile_index_dim_{dim})')
printer.do_scope()
printer.println(f'int32_t actual_tile_size_dim_{dim} = '
f'(tile_index_dim_{dim}=={tile_count_fmt[dim]}-1) ? '
f'{extent_fmt[stencil.input_names[0]]}[{dim}] - '
f'({tile_size_fmt[dim]} - {STENCIL_DIM_FMT[dim]} + 1) * '
f'tile_index_dim_{dim} : {tile_size_fmt[dim]};')
printer.println('#pragma omp parallel for', 0)
var = soda_util.COORDS_IN_TILE[stencil.dim - 1]
printer.println(
f'for(int32_t {var} = 0; '
f'{var} < {extent_fmt[stencil.input_names[0]]}[{stencil.dim - 1}]; '
f'++{var})')
printer.do_scope()
for dim in range(stencil.dim - 2, -1, -1):
printer.println('for(int32_t {0} = 0; {0} < actual_tile_size_dim_{1}; '
'++{0})'.format(soda_util.COORDS_IN_TILE[dim], dim))
printer.do_scope()
printer.printlns(
('// (%s) is coordinates in tiled image' %
', '.join(soda_util.COORDS_TILED)),
('// (%s) is coordinates in original image' %
', '.join(soda_util.COORDS_IN_ORIG)),
'// (%s) is coordinates in a tile' % ', '.join(soda_util.COORDS_IN_TILE),
)
offset_in_tile = ' + '.join(
'%s%s' % (soda_util.COORDS_IN_TILE[x], ''.join(f' * {tile_size_fmt[d]}'
for d in range(x)))
for x in range(stencil.dim))
stmt = stencil.input_stmts[0]
printer.printlns(
(f'int32_t burst_index = ({offset_in_tile}) / '
f'{elem_count_per_cycle_fmt[stmt.name]};'),
(f'int32_t burst_residue = ({offset_in_tile}) % '
f'{elem_count_per_cycle_fmt[stmt.name]};'),
)
for dim in range(stencil.dim - 1):
printer.println(
f'int32_t {soda_util.COORDS_IN_ORIG[dim]} = tile_index_dim_{dim} * '
f'({tile_size_fmt[dim]} - {STENCIL_DIM_FMT[dim]}) + '
f'{soda_util.COORDS_IN_TILE[dim]};')
printer.printlns(
('int32_t %s = %s;' % (soda_util.COORDS_IN_ORIG[stencil.dim - 1],
soda_util.COORDS_IN_TILE[stencil.dim - 1])),
(f'int64_t tiled_offset = (%s) * elem_count_aligned_per_tile_i + '
f'burst_index * {elem_count_per_cycle_fmt[stmt.name]} + burst_residue;' %
' + '.join('%stile_index_dim_%d' % (''.join(f'{tile_count_fmt[d]} * '
for d in range(x)), x)
for x in range(stencil.dim - 1))),
('int64_t original_offset = %s;' %
' + '.join(f'%s * {stride_fmt[stencil.input_names[0]]}[%d]' %
(soda_util.COORDS_IN_ORIG[x], x)
for x in range(stencil.dim))),
)
printer.printlns(f'{buf_fmt[x]}'
f'[tiled_offset % {bank_count_fmt[x]}].get()'
f'[tiled_offset / {bank_count_fmt[x]}] = '
f'{data_fmt[x]}[std::max(int64_t(0), original_offset - '
f'{stencil.tensors[x].produce_offset})];'
for x in stencil.input_names)
for dim in range(stencil.dim * 2 - 1):
printer.un_scope()
printer.println()
for d in range(stencil.dim - 1):
printer.println(
f'clog << "INFO: tile_count[{d}] = " << {tile_count_fmt[d]} '
f'<< ", tile_size[{d}] = " << {tile_size_fmt[d]} << endl;')
for name in stencil.input_names + stencil.output_names:
for item in 'extent', 'stride', 'min':
fmt = locals()[item + '_fmt']
printer.println(
'clog << "INFO: %s" << endl;' %
', '.join(f'{name}.{item}[{d}] = " << {fmt[name]}[{d}] << "'
for d in range(stencil.dim)))
printer.println()
stmt = stencil.input_stmts[0]
printer.printlns(
('int64_t tile_data_count = '
f'((int64_t({extent_fmt[stmt.name]}[{stencil.dim - 1}])%s - 1) / '
f'{elem_count_per_cycle_fmt[stmt.name]} + 1) * '
f'{elem_count_per_cycle_fmt[stmt.name]} / '
'unroll_factor;' %
(''.join(f' * {tile_size_fmt[d]}' for d in range(stencil.dim - 1)))),
('int64_t cycle_count = '
f'((int64_t({extent_fmt[stmt.name]}[{stencil.dim - 1}])%s * %s + '
f'kStencilDistance - 1) / {elem_count_per_cycle_fmt[stmt.name]} + 1);' %
(''.join(f' * {tile_size_fmt[d]}' for d in range(stencil.dim - 1)),
' * '.join(tile_count_fmt[d] for d in range(stencil.dim - 1)))),
('clog << "INFO: tile_data_count = " << tile_data_count '
'<< ", cycle_count = " << cycle_count << endl;'),
'',
)
printer.println('int arg_idx = 0;')
iter_fmt = util.MetaFmt('iter_%s')
for stmt in stmts:
printer.println(
f'auto {iter_fmt[stmt.name]} = {buf_fmt[stmt.name]}.begin();')
with printer.for_('const auto& arg', 'args_info'):
with printer.if_('arg.name == "coalesced_data_num"'):
printer.printlns(
'instance.SetArg(arg_idx, cycle_count);',
'++arg_idx;',
)
for stmt in stmts:
direction = 'Write' if isinstance(stmt, grammar.InputStmt) else 'Read'
with printer.elif_(f'regex_match(arg.name, {regex_fmt[stmt.name]})'):
printer.printlns(
(f'auto buf = fpga::{direction}Only('
f'{iter_fmt[stmt.name]}->get(), '
f'{buf_size_fmt[stmt.name]} / sizeof({TYPE_FMT[stmt.name]}));'),
'instance.AllocBuf(arg_idx, buf);',
'instance.SetArg(arg_idx, buf);',
f'++{iter_fmt[stmt.name]};',
'++arg_idx;',
)
printer.printlns(
'',
'instance.WriteToDevice();',
'instance.Exec();',
'instance.ReadFromDevice();',
'instance.Finish();',
'',
('clog << "Load throughput: " << std::setprecision(3) '
'<< instance.LoadThroughputGbps() << " GB/s" << endl;'),
('clog << "Compute latency: " << std::setprecision(3) '
'<< instance.ComputeTimeSeconds() << " s" << endl;'),
('clog << "Store throughput: " << std::setprecision(3) '
'<< instance.StoreThroughputGbps() <<" GB/s" << endl;'),
'',
)
for dim in range(stencil.dim - 2, -1, -1):
printer.println(
f'for(int32_t tile_index_dim_{dim} = 0; tile_index_dim_{dim} < '
f'{tile_count_fmt[dim]}; ++tile_index_dim_{dim})')
printer.do_scope()
printer.println(f'int32_t actual_tile_size_dim_{dim} = '
f'(tile_index_dim_{dim} == {tile_count_fmt[dim]}-1) ? '
f'{extent_fmt[stencil.input_names[0]]}[{dim}] - '
f'({tile_size_fmt[dim]} - {STENCIL_DIM_FMT[dim]} + 1)'
f' * tile_index_dim_{dim} : {tile_size_fmt[dim]};')
overall_stencil_window = core.get_overall_stencil_window(
stencil.tensors[stencil.input_names[0]],
stencil.tensors[stencil.output_names[0]])
overall_stencil_offset = core.get_stencil_window_offset(
overall_stencil_window)
overall_stencil_dim = core.get_stencil_dim(overall_stencil_window)
printer.println('#pragma omp parallel for', 0)
printer.println('for(int32_t {var} = {}; {var} < '
f'{extent_fmt[stencil.output_names[0]]}[{stencil.dim - 1}]'
' - {}; ++{var})'.format(
max(0, overall_stencil_offset[stencil.dim - 1]),
max(0, (overall_stencil_dim[stencil.dim - 1] - 1 -
overall_stencil_offset[stencil.dim - 1])),
var=soda_util.COORDS_IN_TILE[stencil.dim - 1]))
printer.do_scope()
for dim in range(stencil.dim - 2, -1, -1):
printer.println(
'for(int32_t {var} = {}; {var} < actual_tile_size_dim_{} - {}; '
'++{var})'.format(
max(0, overall_stencil_offset[dim]),
dim,
max(0, overall_stencil_dim[dim] - 1 - overall_stencil_offset[dim]),
var=soda_util.COORDS_IN_TILE[dim]))
printer.do_scope()
printer.printlns(
('// (%s) is coordinates in tiled image' %
', '.join(soda_util.COORDS_TILED)),
('// (%s) is coordinates in original image' %
', '.join(soda_util.COORDS_IN_ORIG)),
'// (%s) is coordinates in a tile' % ', '.join(soda_util.COORDS_IN_TILE),
)
offset_in_tile = ' + '.join(
'%s%s' % (soda_util.COORDS_IN_TILE[x], ''.join(f' * {tile_size_fmt[d]}'
for d in range(x)))
for x in range(stencil.dim))
for dim in range(stencil.dim - 1):
printer.println(
f'int32_t {soda_util.COORDS_IN_ORIG[dim]} = tile_index_dim_{dim} '
f'* ({tile_size_fmt[dim]}-{STENCIL_DIM_FMT[dim]} + 1) + '
f'{soda_util.COORDS_IN_TILE[dim]};')
printer.printlns(
('int32_t %s = %s;' % (soda_util.COORDS_IN_ORIG[stencil.dim - 1],
soda_util.COORDS_IN_TILE[stencil.dim - 1])),
('int64_t original_offset = %s;' %
' + '.join(f'%s * {stride_fmt[stencil.output_names[0]]}[%d]' %
(soda_util.COORDS_IN_ORIG[x], x)
for x in range(stencil.dim))),
)
for stmt in stencil.output_stmts:
overall_stencil_window = core.get_overall_stencil_window(
map(stencil.tensors.get, stencil.input_names),
stencil.tensors[stmt.name])
overall_stencil_distance = core.get_stencil_distance(
overall_stencil_window, stencil.tile_size)
stencil_offset = overall_stencil_distance - soda_util.serialize(
core.get_stencil_window_offset(overall_stencil_window),
stencil.tile_size)
printer.printlns(
(f'int32_t burst_index_{stmt.name} = '
f'({offset_in_tile} + {stencil_offset}) / '
f'{elem_count_per_cycle_fmt[stmt.name]};'),
(f'int32_t burst_residue_{stmt.name} = | |
fraction of vertical tail span where horizontal tail is mounted = 0. for body mounted (Default for transports with all engines on the wing and for fighters) = 1. for T tail (Default for transports with multiple engines on the fuselage)')
nvert = Int(1, desc='Number of vertical tails')
svt = Float(0.0, units='ft*ft', desc='Vertical tail theoretical area (per tail)')
swpvt = Float(-100.0, units='deg', desc='Vertical tail sweep angle at 25% chord (Default = SWPHT)')
arvt = Float(-100.0, desc='Vertical tail theoretical aspect ratio (Default = ARHT/2)')
trvt = Float(-100.0, desc='Vertical tail theoretical taper ratio (Default = TRHT)')
tcvt = Float(0.0, desc='Thickness-chord ratio for the vertical tail (Default = TCHT)')
nfin = Int(0, desc='Number of fins')
sfin = Float(0.0, units='ft*ft', desc='Vertical fin theoretical area')
arfin = Float(-100.0, desc='Vertical fin theoretical aspect ratio')
trfin = Float(-100.0, desc='Vertical fin theoretical taper ratio')
swpfin = Float(-100.0, units='deg', desc='Vertical fin sweep angle at 25% chord')
tcfin = Float(0.0, desc='Vertical fin thickness - chord ratio')
scan = Float(0.0, units='ft*ft', desc='Canard theoretical area')
swpcan = Float(-100.0, units='deg', desc='Canard sweep angle at 25% chord')
arcan = Float(-100.0, desc='Canard theoretical aspect ratio')
trcan = Float(-100.0, desc='Canard theoretical taper ratio')
tccan = Float(0.0, desc='Canard thickness-chord ratio (Default = TCHT)')
class FlopsWrapper_input_wtin_Propulsion(VariableTree):
"""Container for input.wtin.Propulsion"""
# OpenMDAO Public Variables
new = Int(0, desc='Number of wing mounted engines')
nef = Int(0, desc='Number of fuselage mounted engines')
thrso = Float(0.0, units='lb', desc='Rated thrust of baseline engine as described in Engine Deck (Default = THRUST, see &CONFIN)')
weng = Float(0.0, units='lb', desc='Weight of each baseline engine or bare engine if WINL and WNOZ (below) are supplied (Default = THRSO/5.5 for transports and THRSO/8 for fighters)')
eexp = Float(1.15, desc='Engine weight scaling parameter\nW(Engine) = WENG*(THRUST/THRSO)**EEXP\nIf EEXP is less than 0.3,\nW(Engine) = WENG + (THRUST-THRSO)*EEXP')
winl = Float(0.0, units='lb', desc='Inlet weight for baseline engine if not included in WENG above')
einl = Float(1.0, desc='Inlet weight scaling exponent\nW(Inlet) = WINL*(THRUST/THRSO)**EINL')
wnoz = Float(0.0, units='lb', desc='Nozzle weight for baseline engine if not included in WENG above')
enoz = Float(1.0, desc='Nozzle weight scaling exponent\nW(Nozzle) = WNOZ*(THRUST/THRSO)**ENOZ')
xnac = Float(0.0, units='ft', desc='Average length of baseline engine nacelles. Scaled by SQRT(THRUST/THRSO)')
dnac = Float(0.0, units='ft', desc='Average diameter of baseline engine nacelles. Scaled by SQRT(THRUST/THRSO)')
wpmisc = Float(0.0, desc='Additional miscellaneous propulsion system weight or fraction of engine weight if < 1. This is added to the engine control and starter weight and may be overridden if WPMSC is input.')
class FlopsWrapper_input_wtin_Override(VariableTree):
"""Container for input.wtin.Override"""
# OpenMDAO Public Variables
frwi = Float(1.0, desc='Total wing weight - fixed weight overrides FRWI1, FRWI2, FRWI3, FRWI4 below, scale factor is cumulative \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component\n \n')
frwi1 = Float(1.0, desc='First term in wing weight equation - loosely corresponds to bending material weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component\n')
frwi2 = Float(1.0, desc='Second term in wing weight equation - loosely corresponds to control surfaces, spars and ribs \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component\n')
frwi3 = Float(1.0, desc='Third term in wing weight equation - miscellaneous, just because it')
frwi4 = Float(1.0, desc='Fourth term in wing weight equation - miscellaneous, just because it')
frht = Float(1.0, desc='Horizontal tail weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
frvt = Float(1.0, desc='Vertical tail weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
frfin = Float(1.0, desc='Wing vertical fin weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
frcan = Float(1.0, desc='Canard weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
frfu = Float(1.0, desc='Fuselage weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
frlgn = Float(1.0, desc='Landing gear weight, nose \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
frlgm = Float(1.0, desc='Landing gear weight, main \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
frna = Float(1.0, desc='Total weight of nacelles and/or air induction system \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
wthr = Float(0.0, desc='Total weight of thrust reversers\n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
wpmsc = Float(1.0, desc='Weight of miscellaneous propulsion systems such as engine controls, starter and wiring \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
wfsys = Float(1.0, desc='Weight of fuel system \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
frsc = Float(1.0, desc='Surface controls weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component')
wapu = Float(1.0, | |
see `Diagnostic Settings REST API
<https://go.microsoft.com/fwlink/?linkid=2033207>`_
or `Diagnostic Settings PowerShell <https://go.microsoft.com/fwlink/?linkid=2033043>`_.
:type is_azure_monitor_target_enabled: bool
:param queue_delay_ms: Specifies the amount of time in milliseconds that can elapse before
audit actions are forced to be processed.
The default minimum value is 1000 (1 second). The maximum is 2,147,483,647.
:type queue_delay_ms: int
:param state: Specifies the state of the audit. If state is Enabled, storageEndpoint or
isAzureMonitorTargetEnabled are required. Possible values include: "Enabled", "Disabled".
:type state: str or ~azure.mgmt.sql.models.BlobAuditingPolicyState
:param storage_endpoint: Specifies the blob storage endpoint (e.g.
https://MyAccount.blob.core.windows.net). If state is Enabled, storageEndpoint or
isAzureMonitorTargetEnabled is required.
:type storage_endpoint: str
:param storage_account_access_key: Specifies the identifier key of the auditing storage
account.
If state is Enabled and storageEndpoint is specified, not specifying the
storageAccountAccessKey will use SQL server system-assigned managed identity to access the
storage.
Prerequisites for using managed identity authentication:
#. Assign SQL Server a system-assigned managed identity in Azure Active Directory (AAD).
#. Grant SQL Server identity access to the storage account by adding 'Storage Blob Data
Contributor' RBAC role to the server identity.
For more information, see `Auditing to storage using Managed Identity authentication
<https://go.microsoft.com/fwlink/?linkid=2114355>`_.
:type storage_account_access_key: str
:param storage_account_subscription_id: Specifies the blob storage subscription Id.
:type storage_account_subscription_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_devops_audit_enabled': {'key': 'properties.isDevopsAuditEnabled', 'type': 'bool'},
'predicate_expression': {'key': 'properties.predicateExpression', 'type': 'str'},
'retention_days': {'key': 'properties.retentionDays', 'type': 'int'},
'audit_actions_and_groups': {'key': 'properties.auditActionsAndGroups', 'type': '[str]'},
'is_storage_secondary_key_in_use': {'key': 'properties.isStorageSecondaryKeyInUse', 'type': 'bool'},
'is_azure_monitor_target_enabled': {'key': 'properties.isAzureMonitorTargetEnabled', 'type': 'bool'},
'queue_delay_ms': {'key': 'properties.queueDelayMs', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'storage_endpoint': {'key': 'properties.storageEndpoint', 'type': 'str'},
'storage_account_access_key': {'key': 'properties.storageAccountAccessKey', 'type': 'str'},
'storage_account_subscription_id': {'key': 'properties.storageAccountSubscriptionId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExtendedServerBlobAuditingPolicy, self).__init__(**kwargs)
self.is_devops_audit_enabled = kwargs.get('is_devops_audit_enabled', None)
self.predicate_expression = kwargs.get('predicate_expression', None)
self.retention_days = kwargs.get('retention_days', None)
self.audit_actions_and_groups = kwargs.get('audit_actions_and_groups', None)
self.is_storage_secondary_key_in_use = kwargs.get('is_storage_secondary_key_in_use', None)
self.is_azure_monitor_target_enabled = kwargs.get('is_azure_monitor_target_enabled', None)
self.queue_delay_ms = kwargs.get('queue_delay_ms', None)
self.state = kwargs.get('state', None)
self.storage_endpoint = kwargs.get('storage_endpoint', None)
self.storage_account_access_key = kwargs.get('storage_account_access_key', None)
self.storage_account_subscription_id = kwargs.get('storage_account_subscription_id', None)
class ExtendedServerBlobAuditingPolicyListResult(msrest.serialization.Model):
"""A list of server extended auditing settings.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.ExtendedServerBlobAuditingPolicy]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ExtendedServerBlobAuditingPolicy]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExtendedServerBlobAuditingPolicyListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class FailoverGroup(ProxyResource):
"""A failover group.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param read_write_endpoint: Read-write endpoint of the failover group instance.
:type read_write_endpoint: ~azure.mgmt.sql.models.FailoverGroupReadWriteEndpoint
:param read_only_endpoint: Read-only endpoint of the failover group instance.
:type read_only_endpoint: ~azure.mgmt.sql.models.FailoverGroupReadOnlyEndpoint
:ivar replication_role: Local replication role of the failover group instance. Possible values
include: "Primary", "Secondary".
:vartype replication_role: str or ~azure.mgmt.sql.models.FailoverGroupReplicationRole
:ivar replication_state: Replication state of the failover group instance.
:vartype replication_state: str
:param partner_servers: List of partner server information for the failover group.
:type partner_servers: list[~azure.mgmt.sql.models.PartnerInfo]
:param databases: List of databases in the failover group.
:type databases: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
'replication_role': {'readonly': True},
'replication_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'read_write_endpoint': {'key': 'properties.readWriteEndpoint', 'type': 'FailoverGroupReadWriteEndpoint'},
'read_only_endpoint': {'key': 'properties.readOnlyEndpoint', 'type': 'FailoverGroupReadOnlyEndpoint'},
'replication_role': {'key': 'properties.replicationRole', 'type': 'str'},
'replication_state': {'key': 'properties.replicationState', 'type': 'str'},
'partner_servers': {'key': 'properties.partnerServers', 'type': '[PartnerInfo]'},
'databases': {'key': 'properties.databases', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(FailoverGroup, self).__init__(**kwargs)
self.location = None
self.tags = kwargs.get('tags', None)
self.read_write_endpoint = kwargs.get('read_write_endpoint', None)
self.read_only_endpoint = kwargs.get('read_only_endpoint', None)
self.replication_role = None
self.replication_state = None
self.partner_servers = kwargs.get('partner_servers', None)
self.databases = kwargs.get('databases', None)
class FailoverGroupListResult(msrest.serialization.Model):
"""A list of failover groups.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.FailoverGroup]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[FailoverGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FailoverGroupListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class FailoverGroupReadOnlyEndpoint(msrest.serialization.Model):
"""Read-only endpoint of the failover group instance.
:param failover_policy: Failover policy of the read-only endpoint for the failover group.
Possible values include: "Disabled", "Enabled".
:type failover_policy: str or ~azure.mgmt.sql.models.ReadOnlyEndpointFailoverPolicy
"""
_attribute_map = {
'failover_policy': {'key': 'failoverPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FailoverGroupReadOnlyEndpoint, self).__init__(**kwargs)
self.failover_policy = kwargs.get('failover_policy', None)
class FailoverGroupReadWriteEndpoint(msrest.serialization.Model):
"""Read-write endpoint of the failover group instance.
All required parameters must be populated in order to send to Azure.
:param failover_policy: Required. Failover policy of the read-write endpoint for the failover
group. If failoverPolicy is Automatic then failoverWithDataLossGracePeriodMinutes is required.
Possible values include: "Manual", "Automatic".
:type failover_policy: str or ~azure.mgmt.sql.models.ReadWriteEndpointFailoverPolicy
:param failover_with_data_loss_grace_period_minutes: Grace period before failover with data
loss is attempted for the read-write endpoint. If failoverPolicy is Automatic then
failoverWithDataLossGracePeriodMinutes is required.
:type failover_with_data_loss_grace_period_minutes: int
"""
_validation = {
'failover_policy': {'required': True},
}
_attribute_map = {
'failover_policy': {'key': 'failoverPolicy', 'type': 'str'},
'failover_with_data_loss_grace_period_minutes': {'key': 'failoverWithDataLossGracePeriodMinutes', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FailoverGroupReadWriteEndpoint, self).__init__(**kwargs)
self.failover_policy = kwargs['failover_policy']
self.failover_with_data_loss_grace_period_minutes = kwargs.get('failover_with_data_loss_grace_period_minutes', None)
class FailoverGroupUpdate(msrest.serialization.Model):
"""A failover group update request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param read_write_endpoint: Read-write endpoint of the failover group instance.
:type read_write_endpoint: ~azure.mgmt.sql.models.FailoverGroupReadWriteEndpoint
:param read_only_endpoint: Read-only endpoint of the failover group instance.
:type read_only_endpoint: ~azure.mgmt.sql.models.FailoverGroupReadOnlyEndpoint
:param databases: List of databases in the failover group.
:type databases: list[str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'read_write_endpoint': {'key': 'properties.readWriteEndpoint', 'type': 'FailoverGroupReadWriteEndpoint'},
'read_only_endpoint': {'key': 'properties.readOnlyEndpoint', 'type': 'FailoverGroupReadOnlyEndpoint'},
'databases': {'key': 'properties.databases', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(FailoverGroupUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.read_write_endpoint = kwargs.get('read_write_endpoint', None)
self.read_only_endpoint = kwargs.get('read_only_endpoint', None)
self.databases = kwargs.get('databases', None)
class ResourceWithWritableName(msrest.serialization.Model):
"""ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:param name: Resource name.
:type name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceWithWritableName, self).__init__(**kwargs)
self.id = None
self.name = kwargs.get('name', None)
self.type = None
class ProxyResourceWithWritableName(ResourceWithWritableName):
"""ARM proxy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:param name: Resource name.
:type name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResourceWithWritableName, self).__init__(**kwargs)
class FirewallRule(ProxyResourceWithWritableName):
"""A server firewall rule.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:param name: Resource name.
:type name: str
:ivar type: Resource type.
:vartype type: str
:param start_ip_address: The start IP address of the firewall rule. Must be IPv4 format. Use
value '0.0.0.0' for all Azure-internal IP addresses.
:type start_ip_address: str
:param end_ip_address: The end IP address of the firewall rule. Must be IPv4 format. Must be
greater than or equal to startIpAddress. Use value '0.0.0.0' for all | |
should be
most_matches = []
for row_index in range(num_header_merged_rows+1):
matches = re.findall("[-.a-zA-Z0-9]+ ?[-.a-zA-Z0-9]* ?[-.a-zA-Z0-9]*",
grid[row_index])
if len(matches) > len(most_matches):
most_matches = matches
# if there are too many headers, it should be fixed
# NOTE: this only works for non-merged headers... mainly
# because those are the only ones affected.
if len(merged_grid[0]) > len(most_matches):
most_matches = map(lambda x: x.strip(), most_matches)
accounted = 0
to_merge = []
cell_index = 0
while cell_index < len(merged_grid[0])-1:
if accounted == len(most_matches):
# this can indicate that it wasnt a grid
#print "i guess there weren't enough matches...? ..."
break
if merged_grid[0][cell_index] == most_matches[accounted]:
# we want to look at the next one UNLESS we
# currently have a column of empties and the
# the next guy is empty
#print "BOOOO", cell_index
#print [row[cell_index] == "" for row in merged_grid[1:]]
#print merged_grid[0][cell_index+1]
if all([row[cell_index] == "" for row in \
merged_grid[1:]]) and \
merged_grid[0][cell_index+1] == "":
to_merge.append((cell_index, cell_index+1))
accounted += 1
cell_index += 2
else:
accounted += 1
# if we see that we can combine two columns, we do so
# TODO: We probably want to examine the contents
# of the columns instead of just the headers. We
# could peg certain headers and then fill in the rest
# The way it works now kinda assumes nastran will
# always left align their numbers... which I think
# is trueish
elif merged_grid[0][cell_index] + " " + merged_grid[0][cell_index+1] == most_matches[accounted] or merged_grid[0][cell_index] + merged_grid[0][cell_index+1] == most_matches[accounted] or (merged_grid[0][cell_index].startswith(most_matches[accounted]) and merged_grid[0][cell_index+1] == ""):
to_merge.append((cell_index, cell_index+1))
accounted += 1
cell_index += 1
else: cell_index += 1
normalized_grid = _merge_columns(merged_grid, to_merge)
if normalized_grid:
merged_grid = normalized_grid
# we continue our process of normalization, but this time
# from the bottom. We are going to consider how the data is
# layed out and if there are not enough headers, we will make
# them. Do note, we will not delete headers here if it
# seems there are too many because (1) this is done in other
# places and (2) empty columns can exist.
empties = []
# we only want data
for row in grid[num_header_merged_rows+1:]:
empties.append(set())
for index in range(max_len_row):
if index >= len(row) or row[index] == " ":
empties[-1].add(index)
# get all communal empties.
common = sorted(list(reduce(lambda x, y: x.intersection(y), \
empties)))
divisions = []
last = common[0]
for item in common[1:]:
if item != last+1:
divisions.append(last)
last = item
# note, this cannot apply to two column thingies,
# cuz we are purposely reducing the number of headers
if len(divisions) > len(merged_grid[0]) and not identical \
and num_header_merged_rows > 0:
# this stuff also only applies to multi-header
# stuff. If not, where would the header go?
# let's try and line up our data columns with
# the last row of headers, instead of entire things.
# when only considering the last row, sometimes its
# pretty sane.
little_grid = self._parse_grid(grid[num_header_merged_rows:])
# now try to match up the family to the elements. In
# general, the elements will be under the family. So
# we just have to go through the headers and see
# who got stuck together. If more than one little header
# got stuck, it was probably in a family.
complex_header_index = 0
last_complex_header = merged_grid[0][complex_header_index]
complex_groups = [[]]
for index, little_header in enumerate(little_grid[0]):
if not little_header:
continue
if little_header in last_complex_header:
complex_groups[-1].append(index)
_, _, last_complex_header = \
last_complex_header.partition(little_header)
# okay, if the next guy is not in the
# the reamining thing, then just go onto the next
# one.
if index+1 < len(little_grid[0]) and \
(little_grid[0][index+1] not in last_complex_header):
complex_groups.append([])
complex_header_index += 1
last_complex_header = merged_grid[0][complex_header_index]
else:
complex_groups.append([])
complex_header_index += 1
last_complex_header = merged_grid[0][complex_header_index]
# now we have to change the family member jumbo
# into difference family-member1, family-member2 column
# names
seen = 0
for gindex, group in enumerate(complex_groups):
if len(group) <= 1:
little_grid[0][seen] = merged_grid[0][gindex]
seen += len(group)
continue
# implicit assumption that there are only two
# rows of headers
family_name = divided_grid[0][gindex]
for element in group:
little_grid[0][element] = family_name + " " + \
little_grid[0][element]
seen += len(group)
merged_grid = little_grid
return merged_grid
# row width is to enable you to specify a wide row (perhaps each
# id has two values for a certain column
def get(self, header, subcase, constraints, column_names, row_width=1):
"""
Get some data from the grid that we parsed previously.
You specify the grid you want with the header and a subcase.
If you don't care about the subcase, set it to None. You
can also give it a dictionary of constraints and also specify
which columns you'd liked returned. The ``row_width`` optional
attribute is useful if you have something that looks like:
ELEMENT ID PRICES
1 4.00
5.00
2 38.30
60.00
As you can see, each element has two prices. In this case,
if we had a constraint that selected only the second element,
we would want both prices returned. Therefore, we would set
``row_width`` to 2.
header: str
This can be the actual header or a part of the header
you want to select.
subcase: None or int
If None, then just take the first one you see.
constraints: { row_name: value }
A dictionary of constraints. str: str
column_names: [ column_name ] or "*"
Specify a list of column names or the asterisk
character for all of them.
row_width: int
Optional. Sometimes there are two values per
item -- in different rows. In that case, ``row_width=2``.
If you specify the ``row_width``,
the constraints won't get rid of good data.
"""
# find the grid we're talking about my matching
# the header
myindex = None
maybeindex = None # for partial matches
for index in range(len(self.grids)):
if self.headers[index]["actual"].strip() == header or \
self.headers[index]["clean"] == header:
if not subcase or \
(subcase and self.subcases[index] == subcase):
myindex = index
break
else:
print "subcase mismatch!"
print "should be subcase", subcase
print "but the header's subcase is", self.subcases[index]
if header in self.headers[index]["actual"].strip() or \
header in self.headers[index]["clean"]:
if not subcase or \
(subcase and self.subcases[index] == subcase):
maybeindex = index
if myindex is None:
if maybeindex is None:
raise RuntimeError("Could not find " + header + \
" in:\n" + \
"\n".join(map(lambda x: x["actual"].strip(), self.headers)) + "\n - or -\n" + \
"\n".join(map(operator.itemgetter("clean"), self.headers)))
else:
myindex = maybeindex
# apply the dictionary of constraints in order
# to eliminate rows that don't work (simple where clause)
mygrid = self.grids[myindex]
if mygrid is None:
raise RuntimeError("The grid you are wanted (under header " +\
self.headers[myindex] + ") could not or " +\
"was not parsed.")
available_rows = range(1, len(mygrid)) # ignore header
to_delete = set([])
for cname, cvalue in constraints.iteritems():
column_num = mygrid[0].index(cname)
row = 1 # ignore header
while row < len(mygrid):
if mygrid[row][column_num] != cvalue:
to_delete.add(row)
else:
# skip the next (row_width-1) rows
row += row_width-1
row += 1
to_delete = sorted(list(to_delete), reverse=True)
for row in to_delete:
available_rows.remove(row)
# now, in the remaining rows, we will
# take the columns specified
if column_names == "*":
column_nums = range(len(mygrid[0]))
else:
try:
column_nums = map(mygrid[0].index, column_names)
except ValueError:
print "Could not find column names", column_names, \
"in", mygrid[0]
raise
result = []
for row in available_rows:
result.append([])
for column in column_nums:
#if subcase == 6:
# print "rula row column, my_grid", row, column, mygrid[row][column]
result[-1].append(mygrid[row][column])
if row_width > 1:
big_result = []
for i in range(0, row_width * int(len(result)/row_width), row_width):
big_result.append([])
for j in range(row_width):
big_result[-1].append(result[i+j])
return big_result
return result
def _header_score(line, row):
"""A helper function to assign the most likely headers.
Teturns int between 0-200. 200 is a sure header...
but it's really just that big numbers are more
likely than smaller numbers to be good headers.
Criterion: how centered is it, | |
See the tutorial for a worked example.
The HDF5 library allows for the application of "filters" when writing
data, which can provide moderate to significant levels of compression
for the datasets in question. Testing has shown that for some typical
cases of UVData objects (empty/sparse flag_array objects, and/or uniform
nsample_arrays), the built-in LZF filter provides significant
compression for minimal computational overhead.
Note that for typical HERA data files written after mid-2018, the
bitshuffle filter was applied to the data_array. Because of the lack of
portability, it is not included as an option here; in the future, it may
be added. Note that as long as bitshuffle is installed on the system in
a way that h5py can find it, no action needs to be taken to _read_ a
data_array encoded with bitshuffle (or an error will be raised).
"""
if os.path.exists(filename):
if clobber:
print("File exists; clobbering")
else:
raise IOError("File exists; skipping")
# write header and empty arrays to file
with h5py.File(filename, "w") as f:
# write header
header = f.create_group("Header")
self._write_header(header)
# initialize the data groups on disk
if self.future_array_shapes:
data_size = (self.Nblts, self.Nfreqs, self.Npols)
else:
data_size = (self.Nblts, 1, self.Nfreqs, self.Npols)
dgrp = f.create_group("Data")
if data_write_dtype is None:
# we don't know what kind of data we'll get--default to double-precision
data_write_dtype = "c16"
if data_write_dtype not in ("c8", "c16"):
# make sure the data type is correct
_check_uvh5_dtype(data_write_dtype)
dgrp.create_dataset(
"visdata",
data_size,
chunks=chunks,
dtype=data_write_dtype,
compression=data_compression,
)
dgrp.create_dataset(
"flags",
data_size,
chunks=chunks,
dtype="b1",
compression=flags_compression,
)
dgrp.create_dataset(
"nsamples",
data_size,
chunks=chunks,
dtype="f4",
compression=nsample_compression,
)
return
def _check_header(
self, filename, run_check_acceptability=True, background_lsts=True
):
"""
Check that the metadata in a file header matches the object's metadata.
Parameters
----------
header : h5py datagroup
A reference to an h5py data group that contains the header
information. For UVH5 files conforming to the spec, this should be
"/Header".
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
Returns
-------
None
Notes
-----
This function creates a new UVData object an reads in the header
information saved on disk to compare with the object in memory. Note
that this adds some small memory overhead, but this amount is typically
much smaller than the size of the data.
"""
uvd_file = UVH5()
with h5py.File(filename, "r") as f:
header = f["/Header"]
uvd_file._read_header(
header,
filename,
run_check_acceptability=run_check_acceptability,
background_lsts=background_lsts,
)
# temporarily remove data, flag, and nsample arrays, so we only check metadata
if self.data_array is not None:
data_array = self.data_array
self.data_array = None
replace_data = True
else:
replace_data = False
if self.flag_array is not None:
flag_array = self.flag_array
self.flag_array = None
replace_flags = True
else:
replace_flags = False
if self.nsample_array is not None:
nsample_array = self.nsample_array
self.nsample_array = None
replace_nsamples = True
else:
replace_nsamples = False
# also ignore filename attribute
uvd_file.filename = self.filename
uvd_file._filename.form = self._filename.form
if self != uvd_file:
raise AssertionError(
"The object metadata in memory and metadata on disk are different"
)
else:
# clean up after ourselves
if replace_data:
self.data_array = data_array
if replace_flags:
self.flag_array = flag_array
if replace_nsamples:
self.nsample_array = nsample_array
del uvd_file
return
def write_uvh5_part(
self,
filename,
data_array,
flag_array,
nsample_array,
check_header=True,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
run_check_acceptability=True,
add_to_history=None,
):
"""
Write out a part of a UVH5 file that has been previously initialized.
Parameters
----------
filename : str
The file on disk to write data to. It must already exist,
and is assumed to have been initialized with initialize_uvh5_file.
data_array : array of float
The data to write to disk. A check is done to ensure that
the dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
flag_array : array of bool
The flags array to write to disk. A check is done to ensure
that the dimensions of the data passed in conform to the ones specified
by the "selection" arguments.
nsample_array : array of float
The nsample array to write to disk. A check is done to ensure
that the dimensions fo the data passed in conform to the ones specified
by the "selection" arguments.
check_header : bool
Option to check that the metadata present in the header
on disk matches that in the object.
run_check_acceptability : bool
If check_header, additional option to check
acceptable range of the values of parameters after reading in the file.
antenna_nums : array_like of int, optional
The antennas numbers to include when writing data into
the object (antenna positions and names for the excluded antennas
will be retained). This cannot be provided if antenna_names is
also provided.
antenna_names : array_like of str, optional
The antennas names to include when writing data into
the object (antenna positions and names for the excluded antennas
will be retained). This cannot be provided if antenna_nums is
also provided.
bls : list of tuples, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to write to the file. For length-2 tuples, the ordering of the numbers
within the tuple does not matter. For length-3 tuples, the polarization
string is in the order of the two antennas. If length-3 tuples are provided,
the polarizations argument below must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when writing data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y').
See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be written for both baselines (1, 2) and (2, 3) to reflect a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of the above antenna
args or the polarizations arg.
frequencies : array_like of float, optional
The frequencies to include when writing data to the file.
freq_chans : array_like of int, optional
The frequency channel numbers to include when writing data to the file.
times : array_like of float, optional
The times in Julian Day to include when writing data to the file.
time_range : array_like of float, optional
The time range in Julian Date to include when writing data to the
file, must be length 2. Some of the times in the object should fall
between the first and last elements. Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int, optional
The polarizations to include when writing data to the file.
blt_inds : array_like of int, optional
The baseline-time indices to include when writing data to the file.
This is not commonly used.
add_to_history : str
String to append to history before write out. Default is no appending.
Returns
-------
None
Raises
------
AssertionError
An AsserionError is raised if: (1) the location specified by
`filename` does not exist; (2) the data_array, flag_array, and
nsample_array do not all have the same shape; (3) the shape of the
| |
file
emsgs += '\n' + text['09-001-00021'].format(drsfile)
# loop around keys in this drs_file
for key in header_error:
# get this iterations entry
entry = header_error[key]
# get the argname
argname = entry[1]
# construct error message
eargs = [key, entry[3], entry[2]]
if not entry[0]:
emsgs += '\n' + text['09-001-00022'].format(*eargs)
if len(emsgs) > 0:
emsg0 = TextEntry('09-001-00023', args=[argname])
else:
emsg0 = TextEntry(None)
return emsg0 + emsgs
def _get_arg(recipe, argname):
"""
Find an argument in the DrsRecipes argument dictionary or if not found
find argument in the DrsRecipes keyword argument dictionary or it not found
at all return None
:param recipe: DrsRecipe instance
:param argname: string, the argument/keyword argument to look for
:return: DrsArgument instance, the argument in DrsRecipe.args or
DrsRecipe.kwargs
"""
if argname in recipe.args:
arg = recipe.args[argname]
elif argname in recipe.kwargs:
arg = recipe.kwargs[argname]
else:
arg = None
# return arg
return arg
def _group_drs_files(params, drstable, **kwargs):
# set function name
func_name = display_func(params, '_group_drs_files', __NAME__)
# get properties from params
night_col = pcheck(params, 'REPROCESS_NIGHTCOL', 'night_col', kwargs,
func_name)
seq_colname = pcheck(params, 'REPROCESS_SEQCOL', 'seq_col', kwargs,
func_name)
time_colname = pcheck(params, 'REPROCESS_TIMECOL', 'time_col', kwargs,
func_name)
limit = kwargs.get('limit', None)
# deal with limit unset
if limit is None:
limit = np.inf
# sort drstable by time column
sortmask = np.argsort(drstable[time_colname])
drstable = drstable[sortmask]
# st up empty groups
groups = np.zeros(len(drstable))
# get the sequence column
sequence_col = drstable[seq_colname]
# start the group number at 1
group_number = 0
# set up night mask
valid = np.zeros(len(drstable), dtype=bool)
# by night name
for night in np.unique(list(drstable[night_col])):
# deal with just this night name
nightmask = drstable[night_col] == night
# deal with only one file in nightmask
if np.sum(nightmask) == 1:
group_number += 1
groups[nightmask] = group_number
valid |= nightmask
continue
# set invalid sequence numbers to 1
sequence_mask = sequence_col.astype(str) == ''
sequence_col[sequence_mask] = 1
# get the sequence number
sequences = sequence_col[nightmask].astype(int)
indices = np.arange(len(sequences))
# get the raw groups
rawgroups = np.array(-(sequences - indices) + 1)
# set up group mask
nightgroup = np.zeros(np.sum(nightmask))
# loop around the unique groups and assign group number
for rgroup in np.unique(rawgroups):
# new group
group_number += 1
# set up sub group parameters
subgroupnumber, it = 0, 0
# get group mask
groupmask = rawgroups == rgroup
# get positions
positions = np.where(groupmask)[0]
# deal with limit per group
if np.sum(groupmask) > limit:
# loop around all elements in group (using groupmask)
while it < np.sum(groupmask):
# find how many are in this grup
subgroupnumber = np.sum(nightgroup == group_number)
# if we are above limit then start a new group
if subgroupnumber >= limit:
group_number += 1
nightgroup[positions[it]] = group_number
# iterate
it += 1
else:
# push the group number into night group
nightgroup[groupmask] = group_number
# add the night group to the full group
groups[nightmask] = nightgroup
# add to the valid mask
valid |= nightmask
# add groups and valid to dict
drstable['GROUPS'] = groups
# mask by the valid mask
drstable = drstable[valid]
# now work out mean time for each group
# start of mean dates as zeros
meandate = np.zeros(len(drstable))
# get groups from table
groups = drstable['GROUPS']
# loop around each group and change the mean date for the files
for g_it in range(1, int(max(groups)) + 1):
# group mask
groupmask = (groups == g_it)
# group mean
groupmean = np.mean(drstable[time_colname][groupmask])
# save group mean
meandate[groupmask] = groupmean
# add meandate to drstable
drstable['MEANDATE'] = meandate
# return the group
return drstable
def _get_runorder(recipe, argdict, kwargdict):
# set function name
func_name = display_func(recipe.drs_params, '_get_runorder', __NAME__)
# set up storage
runorder = OrderedDict()
# get args/kwargs from recipe
args = recipe.args
kwargs = recipe.kwargs
# iterator for non-positional variables
it = 0
# loop around args
for argname in args.keys():
# must be in rundict keys
if argname not in argdict.keys():
continue
# get arg
arg = args[argname]
# deal with non-required arguments when argdict has no values
# these are allowed only if arg.reprocess is True
# we skip adding to runorder
if hasattr(argdict[argname], '__len__'):
arglen = len(argdict[argname])
if arg.reprocess and not arg.required and (arglen == 0):
continue
# get position or set it using iterator
if arg.pos is not None:
runorder[arg.pos] = argname
else:
runorder[1000 + it] = argname
it += 1
# loop around args
for kwargname in kwargs.keys():
# must be in rundict keys
if kwargname not in kwargdict.keys():
continue
# get arg
kwarg = kwargs[kwargname]
# deal with non-required arguments when argdict has no values
# these are allowed only if arg.reprocess is True
# we skip adding to runorder
if hasattr(kwargdict[kwargname], '__len__'):
kwarglen = len(kwargdict[kwargname])
if kwarg.reprocess and not kwarg.required and (kwarglen == 0):
continue
# get position or set it using iterator
if kwarg.pos is not None:
runorder[kwarg.pos] = kwargname
else:
runorder[1000 + it] = kwargname
it += 1
sortrunorder = np.argsort(list(runorder.keys()))
runorder = np.array(list(runorder.values()))[sortrunorder]
# merge argdict and kwargdict
rundict = dict()
for rorder in runorder:
if rorder in argdict:
rundict[rorder] = argdict[rorder]
else:
rundict[rorder] = kwargdict[rorder]
# return run order and run dictionary
return runorder, rundict
def _gen_run(params, rundict, runorder, nightname=None, meantime=None,
arg0=None, gtable0=None, file_col=None, masternight=False):
# deal with unset values (not used)
if arg0 is None:
arg0 = ''
if gtable0 is None:
gtable0 = dict(filecol=None)
if nightname is None:
nightname = params['NIGHTNAME']
if masternight:
nightname = params['MASTER_NIGHT']
if meantime is None:
meantime = 0.0
# need to find any argument that is not files but is a list
pkeys, pvalues = [], []
for argname in runorder:
# only do this for numpy arrays and lists (not files)
if isinstance(rundict[argname], (np.ndarray, list)):
# append values to storage
pvalues.append(list(rundict[argname]))
pkeys.append(argname)
# convert pkey to array
pkeys = np.array(pkeys)
# deal with no list values
if len(pkeys) == 0:
combinations = [None]
# else we assume we want every combination of arguments (otherwise it is
# more complicated)
else:
combinations = list(itertools.product(*pvalues))
# storage for new runs
new_runs = []
# loop around combinations
for combination in combinations:
# get dictionary storage
new_run = dict()
# loop around argnames
for argname in runorder:
# deal with having combinations
if combination is not None and argname in pkeys:
# find position in combinations
pos = np.where(pkeys == argname)[0][0]
# get value from combinations
value = combination[pos]
else:
value = rundict[argname]
# ------------------------------------------------------------------
# if we are dealing with the first argument we have this
# groups files (gtable0)
if argname == arg0:
new_run[argname] = list(gtable0['OUT'])
# if we are dealing with 'directory' set it from nightname
elif argname == 'directory':
new_run[argname] = nightname
# if we are not dealing with a list of files just set value
elif not isinstance(value, OrderedDict):
new_run[argname] = value
# else we are dealing with another list and must find the
# best files (closeest in time) to add that match this
# group
else:
margs = [params, argname, rundict, nightname, meantime]
new_run[argname] = _match_group(*margs)
# append new run to new runs
new_runs.append(new_run)
# return new_runs
return new_runs
def _find_first_filearg(params, runorder, argdict, kwargdict):
# set function name
func_name = display_func(params, '_find_first_filearg', __NAME__)
# loop around the run order
for argname in runorder:
if argname in argdict:
if isinstance(argdict[argname], OrderedDict):
return argname, argdict[argname]
elif argname in kwargdict:
if isinstance(kwargdict[argname], OrderedDict):
return argname, kwargdict[argname]
return None
# def _find_next_group(argname, drstable, usedgroups, groups, ugroups):
# # make sure argname is in usedgroups
# if argname not in usedgroups:
# usedgroups[argname] = []
#
# arggroup = list(usedgroups[argname])
# # loop around unique groups
# for group in ugroups:
# # if used skip
# if group in arggroup:
# continue
# else:
# # find rows in this group
# mask = groups == group
# # add group to used groups
# usedgroups[argname].append(group)
# # return masked table and usedgroups
# return Table(drstable[mask]), usedgroups
# # return None if all groups used
# return None, usedgroups
def _find_next_group(argname, drstable, usedgroups, groups, ugroups):
# | |
# -*- coding:utf-8 -*-
"""
/***************************************************************************
Plugin Installer module
-------------------
Date : May 2013
Copyright : (C) 2013 by <NAME>
Email : info <EMAIL>
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 <NAME>
Copyright (C) 2008-2013 <NAME>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import (pyqtSignal, QObject, QCoreApplication, QFile,
QDir, QDirIterator, QDate, QUrl, QFileInfo,
QLocale, QByteArray)
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtNetwork import QNetworkRequest, QNetworkReply
from qgis.core import Qgis, QgsSettings, QgsNetworkRequestParameters
import sys
import os
import codecs
import re
import configparser
import qgis.utils
from qgis.core import QgsNetworkAccessManager, QgsApplication
from qgis.utils import iface, plugin_paths
from .version_compare import pyQgisVersion, compareVersions, normalizeVersion, isCompatible
"""
Data structure:
mRepositories = dict of dicts: {repoName : {"url" unicode,
"enabled" bool,
"valid" bool,
"Relay" Relay, # Relay object for transmitting signals from QPHttp with adding the repoName information
"Request" QNetworkRequest,
"xmlData" QNetworkReply,
"state" int, (0 - disabled, 1-loading, 2-loaded ok, 3-error (to be retried), 4-rejected)
"error" unicode}}
mPlugins = dict of dicts {id : {
"id" unicode # module name
"name" unicode, # human readable plugin name
"description" unicode, # short description of the plugin purpose only
"about" unicode, # longer description: how does it work, where does it install, how to run it?
"category" unicode, # will be removed?
"tags" unicode, # comma separated, spaces allowed
"changelog" unicode, # may be multiline
"author_name" unicode, # author name
"author_email" unicode, # author email
"homepage" unicode, # url to the plugin homepage
"tracker" unicode, # url to a tracker site
"code_repository" unicode, # url to the source code repository
"version_installed" unicode, # installed instance version
"library" unicode, # absolute path to the installed library / Python module
"icon" unicode, # path to the first:(INSTALLED | AVAILABLE) icon
"pythonic" const bool=True # True if Python plugin
"readonly" boolean, # True if core plugin
"installed" boolean, # True if installed
"available" boolean, # True if available in repositories
"status" unicode, # ( not installed | new ) | ( installed | upgradeable | orphan | newer )
"error" unicode, # NULL | broken | incompatible | dependent
"error_details" unicode, # error description
"experimental" boolean, # true if experimental, false if stable
"deprecated" boolean, # true if deprecated, false if actual
"trusted" boolean, # true if trusted, false if not trusted
"version_available" unicode, # available version
"zip_repository" unicode, # the remote repository id
"download_url" unicode, # url for downloading the plugin
"filename" unicode, # the zip file name to be unzipped after downloaded
"downloads" unicode, # number of downloads
"average_vote" unicode, # average vote
"rating_votes" unicode, # number of votes
"plugin_dependencies" unicode, # PIP-style comma separated list of plugin dependencies
}}
"""
translatableAttributes = ["name", "description", "about", "tags"]
settingsGroup = "app/plugin_installer"
reposGroup = "app/plugin_repositories"
officialRepo = (QCoreApplication.translate("QgsPluginInstaller", "QGIS Official Plugin Repository"), "https://plugins.qgis.org/plugins/plugins.xml")
# --- common functions ------------------------------------------------------------------- #
def removeDir(path):
result = ""
if not QFile(path).exists():
result = QCoreApplication.translate("QgsPluginInstaller", "Nothing to remove! Plugin directory doesn't exist:") + "\n" + path
elif QFile(path).remove(): # if it is only link, just remove it without resolving.
pass
else:
fltr = QDir.Dirs | QDir.Files | QDir.Hidden
iterator = QDirIterator(path, fltr, QDirIterator.Subdirectories)
while iterator.hasNext():
item = iterator.next()
if QFile(item).remove():
pass
fltr = QDir.Dirs | QDir.Hidden
iterator = QDirIterator(path, fltr, QDirIterator.Subdirectories)
while iterator.hasNext():
item = iterator.next()
if QDir().rmpath(item):
pass
if QFile(path).exists():
result = QCoreApplication.translate("QgsPluginInstaller", "Failed to remove the directory:") + "\n" + path + "\n" + QCoreApplication.translate("QgsPluginInstaller", "Check permissions or remove it manually")
# restore plugin directory if removed by QDir().rmpath()
pluginDir = qgis.utils.home_plugin_path
if not QDir(pluginDir).exists():
QDir().mkpath(pluginDir)
return result
# --- /common functions ------------------------------------------------------------------ #
# --- class Relay ----------------------------------------------------------------------- #
class Relay(QObject):
""" Relay object for transmitting signals from QPHttp with adding the repoName information """
# ----------------------------------------- #
anythingChanged = pyqtSignal(str, int, int)
def __init__(self, key):
QObject.__init__(self)
self.key = key
def stateChanged(self, state):
self.anythingChanged.emit(self.key, state, 0)
# ----------------------------------------- #
def dataReadProgress(self, done, total):
state = 4
if total > 0:
progress = int(float(done) / float(total) * 100)
else:
progress = 0
self.anythingChanged.emit(self.key, state, progress)
# --- /class Relay ---------------------------------------------------------------------- #
# --- class Repositories ----------------------------------------------------------------- #
class Repositories(QObject):
""" A dict-like class for handling repositories data """
# ----------------------------------------- #
anythingChanged = pyqtSignal(str, int, int)
repositoryFetched = pyqtSignal(str)
checkingDone = pyqtSignal()
def __init__(self):
QObject.__init__(self)
self.mRepositories = {}
self.httpId = {} # {httpId : repoName}
self.mInspectionFilter = None
# ----------------------------------------- #
def all(self):
""" return dict of all repositories """
return self.mRepositories
# ----------------------------------------- #
def allEnabled(self):
""" return dict of all enabled and valid repositories """
if self.mInspectionFilter:
return {self.mInspectionFilter: self.mRepositories[self.mInspectionFilter]}
repos = {}
for i in self.mRepositories:
if self.mRepositories[i]["enabled"] and self.mRepositories[i]["valid"]:
repos[i] = self.mRepositories[i]
return repos
# ----------------------------------------- #
def allUnavailable(self):
""" return dict of all unavailable repositories """
repos = {}
if self.mInspectionFilter:
# return the inspected repo if unavailable, otherwise empty dict
if self.mRepositories[self.mInspectionFilter]["state"] == 3:
repos[self.mInspectionFilter] = self.mRepositories[self.mInspectionFilter]
return repos
for i in self.mRepositories:
if self.mRepositories[i]["enabled"] and self.mRepositories[i]["valid"] and self.mRepositories[i]["state"] == 3:
repos[i] = self.mRepositories[i]
return repos
# ----------------------------------------- #
def urlParams(self):
""" return GET parameters to be added to every request """
# Strip down the point release segment from the version string
return "?qgis={}".format(re.sub(r'\.\d*$', '', pyQgisVersion()))
# ----------------------------------------- #
def setRepositoryData(self, reposName, key, value):
""" write data to the mRepositories dict """
self.mRepositories[reposName][key] = value
# ----------------------------------------- #
def remove(self, reposName):
""" remove given item from the mRepositories dict """
del self.mRepositories[reposName]
# ----------------------------------------- #
def rename(self, oldName, newName):
""" rename repository key """
if oldName == newName:
return
self.mRepositories[newName] = self.mRepositories[oldName]
del self.mRepositories[oldName]
# ----------------------------------------- #
def checkingOnStart(self):
""" return true if checking for news and updates is enabled """
settings = QgsSettings()
return settings.value(settingsGroup + "/checkOnStart", False, type=bool)
# ----------------------------------------- #
def setCheckingOnStart(self, state):
""" set state of checking for news and updates """
settings = QgsSettings()
settings.setValue(settingsGroup + "/checkOnStart", state)
# ----------------------------------------- #
def checkingOnStartInterval(self):
""" return checking for news and updates interval """
settings = QgsSettings()
try:
# QgsSettings may contain non-int value...
i = settings.value(settingsGroup + "/checkOnStartInterval", 1, type=int)
except:
# fallback do 1 day by default
i = 1
if i < 0:
i = 1
# allowed values: 0,1,3,7,14,30 days
interval = 0
for j in [1, 3, 7, 14, 30]:
if i >= j:
interval = j
return interval
# ----------------------------------------- #
def setCheckingOnStartInterval(self, interval):
""" set checking for news and updates interval """
settings = QgsSettings()
settings.setValue(settingsGroup + "/checkOnStartInterval", interval)
# ----------------------------------------- #
def saveCheckingOnStartLastDate(self):
""" set today's date as the day of last checking """
settings = QgsSettings()
settings.setValue(settingsGroup + "/checkOnStartLastDate", QDate.currentDate())
# ----------------------------------------- #
def timeForChecking(self):
""" determine whether it's the time for checking for news and updates now """
if self.checkingOnStartInterval() == 0:
return True
settings = QgsSettings()
try:
# QgsSettings may contain ivalid value...
interval = settings.value(settingsGroup + "/checkOnStartLastDate", type=QDate).daysTo(QDate.currentDate())
except:
interval = 0
if interval >= self.checkingOnStartInterval():
return True
else:
return False
# ----------------------------------------- #
def load(self):
""" populate the mRepositories dict"""
self.mRepositories = {}
settings = QgsSettings()
settings.beginGroup(reposGroup)
# first, update repositories in QgsSettings if needed
officialRepoPresent = False
for key in settings.childGroups():
url = settings.value(key + "/url", "", type=str)
if url == officialRepo[1]:
officialRepoPresent = True
if not officialRepoPresent:
settings.setValue(officialRepo[0] + "/url", officialRepo[1])
for key in settings.childGroups():
self.mRepositories[key] = {}
self.mRepositories[key]["url"] = settings.value(key + "/url", "", type=str)
self.mRepositories[key]["authcfg"] = settings.value(key + "/authcfg", "", type=str)
self.mRepositories[key]["enabled"] = settings.value(key + "/enabled", True, type=bool)
self.mRepositories[key]["valid"] = settings.value(key + "/valid", True, type=bool)
self.mRepositories[key]["Relay"] = Relay(key)
self.mRepositories[key]["xmlData"] = None
self.mRepositories[key]["state"] = 0
self.mRepositories[key]["error"] = ""
settings.endGroup()
# ----------------------------------------- #
def requestFetching(self, key, url=None, redirectionCounter=0):
""" start fetching the repository given by key """
self.mRepositories[key]["state"] = 1
if not url:
url = QUrl(self.mRepositories[key]["url"] + self.urlParams())
# v=str(Qgis.QGIS_VERSION_INT)
# url.addQueryItem('qgis', '.'.join([str(int(s)) for s in [v[0], v[1:3]]]) ) # don't | |
Argument.
:type y: float32
:rtype: float32
"""
def fadd_rn(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_rn.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fadd_ru(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_ru.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fadd_rz(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fadd_rz.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fast_cosf(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_cosf.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fast_exp10f(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_exp10f.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fast_expf(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_expf.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fast_fdividef(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_fdividef.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fast_log10f(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_log10f.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fast_log2f(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_log2f.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fast_logf(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_logf.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fast_powf(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_powf.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fast_sincosf(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_sincosf.html
:param x: Argument.
:type x: float32
:rtype: UniTuple(float32 x 2)
"""
def fast_sinf(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_sinf.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fast_tanf(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_tanf.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fdim(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdim.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:rtype: float64
"""
def fdimf(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdimf.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fdiv_rd(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rd.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fdiv_rn(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rn.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fdiv_ru(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_ru.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fdiv_rz(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fdiv_rz.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def ffs(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ffs.html
:param x: Argument.
:type x: int32
:rtype: int32
"""
def ffsll(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_ffsll.html
:param x: Argument.
:type x: int64
:rtype: int32
"""
def finitef(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_finitef.html
:param x: Argument.
:type x: float32
:rtype: int32
"""
def float2half_rn(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2half_rn.html
:param f: Argument.
:type f: float32
:rtype: int16
"""
def float2int_rd(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rd.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
def float2int_rn(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rn.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
def float2int_ru(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_ru.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
def float2int_rz(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rz.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
def float2ll_rd(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rd.html
:param f: Argument.
:type f: float32
:rtype: int64
"""
def float2ll_rn(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rn.html
:param f: Argument.
:type f: float32
:rtype: int64
"""
def float2ll_ru(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_ru.html
:param f: Argument.
:type f: float32
:rtype: int64
"""
def float2ll_rz(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ll_rz.html
:param f: Argument.
:type f: float32
:rtype: int64
"""
def float2uint_rd(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rd.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
def float2uint_rn(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rn.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
def float2uint_ru(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_ru.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
def float2uint_rz(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2uint_rz.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
def float2ull_rd(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rd.html
:param f: Argument.
:type f: float32
:rtype: int64
"""
def float2ull_rn(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rn.html
:param f: Argument.
:type f: float32
:rtype: int64
"""
def float2ull_ru(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_ru.html
:param f: Argument.
:type f: float32
:rtype: int64
"""
def float2ull_rz(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2ull_rz.html
:param f: Argument.
:type f: float32
:rtype: int64
"""
def float_as_int(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float_as_int.html
:param x: Argument.
:type x: float32
:rtype: int32
"""
def floor(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_floor.html
:param f: Argument.
:type f: float64
:rtype: float64
"""
def floorf(f):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_floorf.html
:param f: Argument.
:type f: float32
:rtype: float32
"""
def fma(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:param z: Argument.
:type z: float64
:rtype: float64
"""
def fma_rd(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rd.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:param z: Argument.
:type z: float64
:rtype: float64
"""
def fma_rn(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rn.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:param z: Argument.
:type z: float64
:rtype: float64
"""
def fma_ru(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_ru.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:param z: Argument.
:type z: float64
:rtype: float64
"""
def fma_rz(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fma_rz.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:param z: Argument.
:type z: float64
:rtype: float64
"""
def fmaf(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:param z: Argument.
:type z: float32
:rtype: float32
"""
def fmaf_rd(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rd.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:param z: Argument.
:type z: float32
:rtype: float32
"""
def fmaf_rn(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rn.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:param z: Argument.
:type z: float32
:rtype: float32
"""
def fmaf_ru(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_ru.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:param z: Argument.
:type z: float32
:rtype: float32
"""
def fmaf_rz(x, y, z):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaf_rz.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:param z: Argument.
:type z: float32
:rtype: float32
"""
def fmax(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmax.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:rtype: float64
"""
def fmaxf(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmaxf.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fmin(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmin.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:rtype: float64
"""
def fminf(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fminf.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fmod(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmod.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:rtype: float64
"""
def fmodf(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmodf.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fmul_rd(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rd.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fmul_rn(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rn.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fmul_ru(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_ru.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fmul_rz(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rz.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def frcp_rd(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rd.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def frcp_rn(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rn.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def frcp_ru(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_ru.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def frcp_rz(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frcp_rz.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def frexp(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frexp.html
:param x: Argument.
:type x: float64
:rtype: Tuple(float64, int32)
"""
def frexpf(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frexpf.html
:param x: Argument.
:type x: float32
:rtype: Tuple(float32, int32)
"""
def frsqrt_rn(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_frsqrt_rn.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fsqrt_rd(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rd.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fsqrt_rn(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rn.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fsqrt_ru(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_ru.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fsqrt_rz(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsqrt_rz.html
:param x: Argument.
:type x: float32
:rtype: float32
"""
def fsub_rd(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rd.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fsub_rn(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rn.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fsub_ru(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_ru.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def fsub_rz(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fsub_rz.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
"""
def hadd(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_hadd.html
:param x: Argument.
:type x: int32
:param y: Argument.
:type y: int32
:rtype: int32
"""
def | |
default_pictures_path + default_profile_picture
member = db.get_membership(current_user)
if request.method == "GET":
return render_template("group_settings.html", group_id=group.ID, user_id=current_user.ID, username=current_user.Login,
img_src=profile_pic, **member, admin=admin, owner=owner, form=request.form)
name = request.form.get("group_name", None)
description = request.form.get("description", None)
image = request.files["group_image"]
visibility = request.form.get("visibility", None)
# Values check
if len(name) > 30:
flash("Group name is too long. Maximum is 30 characters.")
return render_template("group_settings.html", group_id=group.ID, user_id=current_user.ID, username=current_user.Login,
img_src=profile_pic, **member, admin=admin, owner=owner, form=request.form)
if not db.check_groupname(name):
flash("Group name is already taken. Please use different name.")
return render_template("group_settings.html", group_id=group.ID, user_id=current_user.ID, username=current_user.Login,
img_src=profile_pic, **member, admin=admin, owner=owner, form=request.form)
if description and len(description) > 2000:
flash("Group description is too long. Maximum is 2000 characters.")
return render_template("group_settings.html", group_id=group.ID, user_id=current_user.ID, username=current_user.Login,
img_src=profile_pic, **member, admin=admin, owner=owner, form=request.form)
if image:
blob = image.read()
if sys.getsizeof(blob) > (2 * 1024 * 1024):
flash("Group image is too big. Maximum allowed size is 2MB.")
return render_template("group_settings.html", group_id=group.ID, user_id=current_user.ID, username=current_user.Login,
img_src=profile_pic, **member, admin=admin, owner=owner, form=request.form)
mimetype = image.mimetype
image = (blob, mimetype)
if visibility:
visibility = int(visibility)
id = db.insert_to_group(id=group.ID, name=name, description=description, image=image, mode=visibility)
flash("Your changes have been applied.")
return redirect(url_for("group", group_id=id))
@app.route("/group_notifications/<group_id>/")
@login_required
def group_notifications(group_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
admin = current_user.Mode & 2
owner = False
if not admin:
owner = current_user.ID == group.User_ID
moderator = Moderate.query.filter_by(User=current_user.ID, Group=group.ID).first()
if not admin and not owner and not moderator:
return redirect(url_for("tresspass"))
if current_user.Image is not None:
profile_pic = "/profile_picture/" + str(current_user.ID)
else:
profile_pic = default_pictures_path + default_profile_picture
member = db.get_membership(current_user)
notifications = db.get_applicants(group)
return render_template("notifications.html", group_id=group.ID, notifications=notifications, user_id=current_user.ID, username=current_user.Login,
img_src=profile_pic, **member, admin=admin, owner=owner, moderator=moderator, form=request.form)
@app.route("/group_members/<group_id>/")
def members(group_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
private = group.Mode & 1
if private and current_user.is_anonymous:
flash("You will need to log in to gain access to this page.")
return redirect(url_for("welcome"))
rights = db.getuserrights(current_user, group)
closed = group.Mode & 2
if closed and (rights["user"] or rights["visitor"]):
return redirect(url_for("tresspass"))
group_owner = User.query.filter_by(ID=group.User_ID).first()
if group_owner is None:
return redirect(url_for("lost"))
if group_owner.Image is not None:
owner_src = "/profile_picture/" + str(group_owner.ID)
else:
owner_src = default_pictures_path + default_profile_picture
moderators = db.get_moderators(group)
members = db.get_members(group)
if current_user.is_anonymous:
user_id = None
username = "Visitor"
profile_pic = default_pictures_path + default_profile_picture
else:
user_id = current_user.ID
username = current_user.Login
if current_user.Image is not None:
profile_pic = "/profile_picture/" + str(current_user.ID)
else:
profile_pic = default_pictures_path + default_profile_picture
member = db.get_membership(current_user)
return render_template("group_members.html", group_id=group.ID, group_owner=group_owner, owner_src=owner_src,
moderators=moderators, members=members, user_id=user_id, username=username,
img_src=profile_pic, **member, **rights)
@app.route("/apply/member/<group_id>/")
@login_required
def ask_mem(group_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
owner = current_user.ID == group.User_ID
moderator = Moderate.query.filter_by(User=current_user.ID, Group=group.ID).first()
member = Is_member.query.filter_by(User=current_user.ID, Group=group.ID).first()
if owner or moderator or member:
return redirect(url_for("lost"))
db.insert_to_applications(current_user.ID, group.ID, True)
flash("Your request has been sent for a review.")
return redirect(url_for("home"))
@app.route("/apply/moderator/<group_id>/")
@login_required
def ask_mod(group_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
member = Is_member.query.filter_by(User=current_user.ID, Group=group.ID).first()
if not member:
return redirect(url_for("lost"))
db.insert_to_applications(current_user.ID, group.ID, False)
flash("Your request has been sent for a review.")
return redirect(url_for("home"))
@app.route("/accept/<application_id>")
@login_required
def accept_application(application_id):
application = Applications.query.filter_by(ID=application_id).first()
if application is None:
return redirect(url_for("lost"))
group = Group.query.filter_by(ID=application.Group).first()
if group is None:
db.delete_from_db(application)
return redirect(url_for("home"))
# User rights
admin = current_user.Mode & 2
owner = current_user.ID == group.User_ID or admin
moderator = Moderate.query.filter_by(User=current_user.ID, Group=group.ID).first()
# Moderator request
if not application.Membership and not owner:
return redirect(url_for("tresspass"))
# Membership request
if not owner and not moderator:
return redirect(url_for("tresspass"))
user = User.query.filter_by(ID=application.User).first()
if user is None:
db.delete_from_db(application)
return redirect(url_for("group_notifications", group_id=group.ID))
membership = Is_member.query.filter_by(User=user.ID, Group=group.ID).first()
moderatorship = Moderate.query.filter_by(User=user.ID, Group=group.ID).first()
if application.Membership and not membership and not moderatorship:
db.insert_to_membership(user.ID, group.ID)
elif not application.Membership and membership and not moderatorship:
db.insert_to_moderate(user.ID, group.ID)
db.delete_from_db(membership)
db.delete_from_db(application)
return redirect(url_for("group_notifications", group_id=group.ID))
@app.route("/reject/<application_id>")
@login_required
def reject_application(application_id):
application = Applications.query.filter_by(ID=application_id).first()
if application is None:
return redirect(url_for("lost"))
group = Group.query.filter_by(ID=application.Group).first()
if group is None:
db.delete_from_db(application)
return redirect(url_for("home"))
# User rights
admin = current_user.Mode & 2
owner = current_user.ID == group.User_ID or admin
moderator = Moderate.query.filter_by(User=current_user.ID, Group=group.ID).first()
# Moderator request
if not application.Membership and not owner:
return redirect(url_for("tresspass"))
# Membership request
if not owner and not moderator:
return redirect(url_for("tresspass"))
db.delete_from_db(application)
return redirect(url_for("group_notifications", group_id=group.ID))
@app.route("/leave/<group_id>/")
@login_required
def leave_group(group_id):
return redirect(url_for("kick", group_id=group_id, user_id=current_user.ID))
@app.route("/kick/group/<group_id>/<user_id>/")
@app.route("/kick/groups/<group_id>/<user_id>/")
@login_required
def kick(group_id, user_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
user = User.query.filter_by(ID=user_id).first()
if user is None:
return redirect(url_for("lost"))
# Kicked user rights
is_member = Is_member.query.filter_by(User=user.ID, Group=group.ID).first()
is_moderator = Moderate.query.filter_by(User=user.ID, Group=group.ID).first()
if not is_member and not is_moderator:
return redirect(url_for("lost"))
# Kicking user rights
himself = current_user.ID == user.ID
admin = current_user.Mode & 2
owner = current_user.ID == group.User_ID or admin or himself
moderator = Moderate.query.filter_by(User=current_user.ID, Group=group.ID).first()
if not owner and not moderator:
return redirect(url_for("tresspass"))
if is_moderator and not owner:
return redirect(url_for("tresspass"))
if is_member:
db.delete_from_db(is_member)
if is_moderator:
db.delete_from_db(is_moderator)
if himself:
flash("You have left the group " + group.Name + ".")
else:
flash("User " + user.Login + " was kicked from the group " + group.Name + ".")
return redirect(url_for("group", group_id=group.ID))
@app.route("/delete/group/<group_id>/")
@app.route("/delete/groups/<group_id>/")
@login_required
def delete_group(group_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
admin = current_user.Mode & 2
owner = current_user.ID == group.User_ID
if not admin and not owner:
return redirect(url_for("tresspass"))
flash("You have deleted the group " + group.Name + ".")
db.delete_from_db(group)
return redirect(url_for("group", group_id=default_group_ID))
################################################################################
# Threads
################################################################################
@app.route("/create/thread/<group_id>/", methods=["POST"])
@login_required
def create_thread(group_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
admin = current_user.Mode & 2
owner = current_user.ID == group.User_ID
moderator = Moderate.query.filter_by(User=current_user.ID, Group=group.ID).first()
member = Is_member.query.filter_by(User=current_user.ID, Group=group.ID).first()
if not admin and not owner and not moderator and not member:
return redirect(url_for("tresspass"))
name = request.form.get("thread_subject", None)
description = request.form.get("description", None)
# Values check
if len(name) > 30:
flash("Subject is too long. Maximum is 30 characters.")
return redirect(url_for("group", group_id=group.ID, form=json.dumps(request.form)))
if not db.check_threadname(group, name):
flash("Subject is already taken.")
return redirect(url_for("group", group_id=group.ID, form=json.dumps(request.form)))
if description and len(description) > 2000:
flash("Description is too long. Maximum is 2000 characters.")
return redirect(url_for("group", group_id=group.ID, form=json.dumps(request.form)))
id = db.insert_to_thread(group_id=group.ID, thread_name=name, description=description)
return redirect(url_for("thread", group_id=group.ID, thread_id=id))
@app.route("/group/<group_id>/<thread_id>/")
@app.route("/groups/<group_id>/<thread_id>/")
def thread(group_id, thread_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("group", group_id=default_group_ID))
thread = Thread.query.filter_by(Group_ID=group.ID, ID=thread_id).first()
if thread is None:
return redirect(url_for("group", group_id=group.ID))
closed = group.Mode & 2
private = group.Mode & 1
if private and current_user.is_anonymous:
flash("You will need to log in to gain access to this page.")
return redirect(url_for("welcome"))
rights = db.getuserrights(current_user, group)
if closed and (rights["user"] or rights["visitor"]):
return redirect(url_for("tresspass"))
if current_user.is_anonymous:
user_id = None
username = "Visitor"
profile_pic = default_pictures_path + default_profile_picture
else:
user_id = current_user.ID
username = current_user.Login
if current_user.Image is not None:
profile_pic = "/profile_picture/" + str(current_user.ID)
else:
profile_pic = default_pictures_path + default_profile_picture
member = db.get_membership(current_user)
return render_template("thread_page.html", group_id=group.ID, thread_id=thread.ID, groupname=group.Name, threadname=thread.Name,
description=thread.Description, posts=db.get_messages(thread, 50), user_id=user_id, username=username,
img_src=profile_pic, db=db, **member, **rights)
@app.route("/delete/thread/<group_id>/<thread_id>/")
@login_required
def delete_thread(group_id, thread_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
thread = Thread.query.filter_by(Group_ID=group.ID, ID=thread_id).first()
if thread is None:
return redirect(url_for("lost"))
# User rights
admin = current_user.Mode & 2
owner = current_user.ID == group.User_ID
moderator = Moderate.query.filter_by(User=current_user.ID, Group=group.ID).first()
if not admin and not owner and not moderator:
return redirect(url_for("tresspass"))
flash("Thread " + thread.Name + " was succesfully deleted.")
db.delete_from_db(thread)
return redirect(url_for("group", group_id=group.ID))
################################################################################
# Messages
################################################################################
@app.route("/create_message/<group_id>/<thread_id>/", methods=["POST"])
@login_required
def create_message(group_id, thread_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
thread = Thread.query.filter_by(Group_ID=group.ID, ID=thread_id).first()
if thread is None:
return redirect(url_for("lost"))
db.insert_to_messages(current_user, thread, message=request.form.get("content", None))
return redirect(url_for('thread', group_id=group.ID, thread_id=thread.ID))
''' TODO previous version
thread = Thread.query.filter_by(ID=thread_id).first()
eprint(request.form.keys())
db.insert_to_messages(current_user, thread, message=request.form['content'])
return redirect(url_for('thread', group_id=group_id, thread_id=thread_id))
'''
@app.route("/get_messages/<group_id>/<thread_id>/", methods=["GET"])
def get_messages(group_id, thread_id):
return db.messages_to_json(db.get_messages(Thread.query.filter_by(ID=thread_id).first(), 200))
@app.route("/group/<group_id>/<thread_id>/<message_id>/delete/")
@app.route("/groups/<group_id>/<thread_id>/<message_id>/delete/")
@login_required
def delete_message(group_id, thread_id, message_id):
group = Group.query.filter_by(ID=group_id).first()
if group is None:
return redirect(url_for("lost"))
thread = Thread.query.filter_by(Group_ID=group.ID, ID=thread_id).first()
if thread is None:
return redirect(url_for("lost"))
message = Messages.query.filter_by(ID_group=group.ID, Thread_name=thread.Name, ID=message_id).first()
if message is None:
return redirect(url_for("lost"))
admin = current_user.Mode & 2
author = current_user.ID == message.User_ID
owner = current_user.ID == group.User_ID
moderator = Moderate.query.filter_by(User=current_user.ID, Group=group.ID).first()
if not admin and not author and not owner and not moderator:
return redirect(url_for("tresspass"))
db.delete_from_db(message)
return redirect(url_for('thread', group_id=group.ID, thread_id=thread.ID))
@app.route("/group/<group_id>/<thread_id>/<message_id>/inc/")
@app.route("/groups/<group_id>/<thread_id>/<message_id>/inc/")
@login_required
def increment(group_id, thread_id, message_id):
rank_mutex.acquire()
group = Group.query.filter_by(ID=group_id).first()
if group is None:
rank_mutex.release()
return redirect(url_for("lost"))
thread = Thread.query.filter_by(Group_ID=group.ID, ID=thread_id).first()
if thread is None:
rank_mutex.release()
return redirect(url_for("lost"))
message = Messages.query.filter_by(ID_group=group.ID, Thread_name=thread.Name, ID=message_id).first()
if message is None:
rank_mutex.release()
return redirect(url_for("lost"))
rank = 0
ranking = Ranking.query.filter_by(User=current_user.ID, Message=message.ID, Thread_name=thread.Name, ID_group=group.ID).first()
if not ranking:
rank = rank + 1
db.insert_to_ranking(message=message, user=current_user, inc=True)
elif ranking.Inc:
rank = rank - 1
db.delete_from_db(ranking)
else:
rank = rank + 1
db.delete_from_db(ranking)
db.insert_to_messages(id=message.ID, ranking=message.Rank + rank, author=message.User_ID, thread=thread)
rank_mutex.release()
| |
<reponame>IngenuityEngine/ftrack-connect-foundry
from FnAssetAPI.specifications import ShotSpecification, ImageSpecification
from FnAssetAPI.ui.toolkit import QtCore, QtGui
from PublishShotClipsSummaryWidget import PublishShotClipsSummaryWidget
import FnAssetAPI.logging
from .. import utils as cmdUtils
class PublishShotClipsDialog(QtGui.QDialog):
"""
"""
def __init__(self, context=None, parent=None):
super(PublishShotClipsDialog, self).__init__(parent)
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
session = FnAssetAPI.ui.UISessionManager.currentSession()
if session is None:
FnAssetAPI.logging.error("There is currently no Session started with an "
+"Asset Management System, unable to create shots.")
self.reject()
if context is None:
context = session.createContext()
self.setWindowTitle(FnAssetAPI.l("{publish} Source Clips to {shots} in {manager}"))
self.widget = PublishShotClipsWidget(context)
layout.addWidget(self.widget)
self.__buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok
| QtGui.QDialogButtonBox.Cancel)
self.widget.optionsChanged.connect(self.__setButtonTitle)
self.__setButtonTitle()
layout.addWidget(self.__buttons)
self.__buttons.accepted.connect(self.accept)
self.__buttons.rejected.connect(self.reject)
def getOptions(self):
return self.widget.getOptions()
def setOptions(self, options):
self.widget.setOptions(options)
def setTrackItems(self, trackItems):
self.widget.setTrackItems(trackItems)
def sizeHint(self):
return QtCore.QSize(850, 650)
def __setButtonTitle(self):
title, enabled = self.widget.getButtonState()
self.__buttons.button(QtGui.QDialogButtonBox.Ok).setText(title)
self.__buttons.button(QtGui.QDialogButtonBox.Ok).setEnabled(enabled)
class PublishShotClipsWidget(QtGui.QWidget):
"""
A dialog to present the user with options pertaining to creating shots in an
asset manager, based on a number of selected track items. Clips from these
TrackItems can also be published to the shots, or, if shared with multiple
TrackItems, they can be published to an alternate location.
@specUsage FnAssetAPI.specifications.ImageSpecification
@specUsage FnAssetAPI.specifications.ShotSpecification
"""
optionsChanged = QtCore.Signal()
## @name Constants for Option Keys
## @{
kTargetEntityRef = 'targetEntityRef'
kPublishClips = 'publishClips'
kClipsUseCustomName = 'clipsUseCustomName'
kCustomClipName = 'customClipName'
kPublishSharedClips = 'publishSharedClips'
kUsePublishedClips = 'usePublishedClips'
kIgnorePublishedClips = 'ignorePublishedClips'
kSharedClipEntityRef = 'sharedClipTargetEntityRef'
kManagerOptionsClip = 'managerOptionsClip'
## @}
## @todo We currently require a context at initialisation time, as we need to
# create Manager UI elements. Ideally, we'd let this be set later, and
# re-create any context-dependent UI as necessary.
def __init__(self, context, parent=None, options=None):
super(PublishShotClipsWidget, self).__init__(parent=parent)
self.__trackItems = []
self.__shotItems = []
self.__clipItems = []
self.__sharedClipItems = []
self.__updatingOptions = False
self.__options = {
self.kTargetEntityRef : '',
self.kPublishClips : True,
self.kPublishSharedClips : False,
self.kUsePublishedClips : True,
self.kSharedClipEntityRef : '',
self.kClipsUseCustomName : False,
self.kCustomClipName : 'plate',
self.kIgnorePublishedClips : True,
}
self._session = FnAssetAPI.ui.UISessionManager.currentSession()
self._context = context # Note, this is a reference
self._context.access = context.kWriteMultiple
# Make some caches for these, to avoid thrashing the API
self.__clipPolicy = cmdUtils.policy.clipPolicy(forWrite=True)
self.__perEntityClipPolicy = {}
# We'll need to keep track of some lookups to avoid excess traffic
self._parentEntity = None
self._newShots = []
self._existi_existingShotsLabelngShots = []
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
self._buildUI(layout)
self._connectUI()
if options:
self.setOptions(options)
else:
self._readOptions()
def _buildUI(self, layout):
## @todo Some of these should probably be widgets in their own right, but
## it needs a little though due to the interaction between them.
# Add the 'Create Under' section, to choose the parent entity that should
# receive the new shots.
specification = ShotSpecification()
pickerCls = self._session.getManagerWidget(
FnAssetAPI.ui.constants.kInlinePickerWidgetId, instantiate=False)
# Parent Picker
parentPickerLayout = QtGui.QHBoxLayout()
parentPickerLayout.addWidget(QtGui.QLabel("Look for matching Shots under:"))
self._shotParentPicker = pickerCls(specification, self._context)
parentPickerLayout.addWidget(self._shotParentPicker)
layout.addLayout(parentPickerLayout)
mediaWidget = self._buildClipsTab()
layout.addWidget(mediaWidget)
def _buildClipsTab(self):
l = FnAssetAPI.l
imageSpecification = ImageSpecification()
# > Media Ta
mediaWidget = QtGui.QWidget()
mediaWidget.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
mediaWidgetLayout = QtGui.QVBoxLayout()
mediaWidgetLayout.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
mediaWidget.setLayout(mediaWidgetLayout)
# - Shared Media
self._sharedClipsGroup = QtGui.QGroupBox(l("Some Source Clips are Shared "+
"and used in more than one Shot in the Edit"))
mediaWidgetLayout.addWidget(self._sharedClipsGroup)
sharedClipsGroupLayout = QtGui.QVBoxLayout()
self._sharedClipsGroup.setLayout(sharedClipsGroupLayout)
self._sharedIgnoredRadio = QtGui.QRadioButton(l("Don't {publish}"))
self._sharedToSequenceRadio = QtGui.QRadioButton(l("{publish} at the level above the Shots"))
self._sharedToCustomRadio = QtGui.QRadioButton(l("{publish} to another location"))
self._sharedIgnoredRadio.setChecked(True)
sharedClipsGroupLayout.addWidget(self._sharedIgnoredRadio)
sharedClipsGroupLayout.addWidget(self._sharedToSequenceRadio)
sharedClipsGroupLayout.addWidget(self._sharedToCustomRadio)
## @todo Use the project entityReferences Parent if we have one?
pickerCls = self._session.getManagerWidget(
FnAssetAPI.ui.constants.kInlinePickerWidgetId, instantiate=False)
self._sharedClipParentPicker = pickerCls(imageSpecification, self._context)
self._sharedClipParentPicker.setVisible(False)
sharedClipsGroupLayout.addWidget(self._sharedClipParentPicker)
self._previewWidget = PublishShotClipsSummaryWidget()
self._previewWidget.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
mediaWidgetLayout.addWidget(self._previewWidget)
# - Options
self._clipOptionsGroup = QtGui.QGroupBox(l("Options"))
optionsGroupLayout = QtGui.QVBoxLayout()
self._clipOptionsGroup.setLayout(optionsGroupLayout)
mediaWidgetLayout.addWidget(self._clipOptionsGroup)
# See if we have any options from the manager
self._managerOptionsClip = self._session.getManagerWidget(
FnAssetAPI.ui.constants.kRegistrationManagerOptionsWidgetId,
throw=False, args=(imageSpecification, self._context))
if self._managerOptionsClip:
optionsGroupLayout.addWidget(self._managerOptionsClip)
optionsGroupLayout.addSpacing(10)
hieroOptionsGrid = QtGui.QGridLayout()
## @todo we should have some base widget for this
hieroOptionsGrid.addWidget(QtGui.QLabel(l("{asset} name:")), 0, 0)
self._clipNameCombo = QtGui.QComboBox()
self._clipNameCombo.addItems(("Clip Name", "Custom"))
hieroOptionsGrid.addWidget(self._clipNameCombo, 0, 1)
self._clipNameCustomField = QtGui.QLineEdit()
hieroOptionsGrid.addWidget(self._clipNameCustomField, 0, 2)
self._replaceClipSource = QtGui.QCheckBox(l("Link Source Clips to "+
"{published} {assets}"))
hieroOptionsGrid.addWidget(self._replaceClipSource, 1, 1, 1, 2)
self._ignorePublishedClips = QtGui.QCheckBox(l("Ignore Source Clips that are "+
"already {published}"))
hieroOptionsGrid.addWidget(self._ignorePublishedClips, 2, 1, 1, 2)
# Make sure we don't stretch the grid layout too much and make the last
#column really wide
hieroOptionsHBox = QtGui.QHBoxLayout()
optionsGroupLayout.addLayout(hieroOptionsHBox)
hieroOptionsHBox.addLayout(hieroOptionsGrid)
hieroOptionsHBox.addStretch()
return mediaWidget
def _connectUI(self):
self._shotParentPicker.selectionChanged.connect(
lambda v: self.__updateOption(self.kTargetEntityRef, v[0] if v else ''))
# Make sure the shared clip destination is updated too
self._shotParentPicker.selectionChanged.connect(self.__sharedClipDestToggle)
self._replaceClipSource.toggled.connect(
lambda s: self.__updateOption(self.kUsePublishedClips, s))
self._ignorePublishedClips.toggled.connect(
lambda s: self.__updateOption(self.kIgnorePublishedClips, s,
clearItems=True))
self._sharedToSequenceRadio.toggled.connect(self.__sharedClipDestToggle)
self._sharedToCustomRadio.toggled.connect(self.__sharedClipDestToggle)
self._sharedIgnoredRadio.toggled.connect(self.__sharedClipDestToggle)
self._sharedClipParentPicker.selectionChanged.connect(self.__sharedClipDestToggle)
self._clipNameCustomField.editingFinished.connect(self.__clipNameOptionsChanged)
self._clipNameCombo.currentIndexChanged.connect(self.__clipNameOptionsChanged)
## @todo Do we need to connect up the manager options widget too?
def __clipNameOptionsChanged(self):
if self.__updatingOptions:
return
source = self._clipNameCombo.currentText()
self.__updateOption(self.kClipsUseCustomName, source == "Custom", refresh=False)
name = self._clipNameCustomField.text()
self.__updateOption(self.kCustomClipName, name, refresh=True, clearItems=True)
def __sharedClipDestToggle(self):
ignore = self._sharedIgnoredRadio.isChecked()
useCustom = self._sharedToCustomRadio.isChecked()
self._sharedClipParentPicker.setVisible(useCustom)
if self.__updatingOptions:
return
if useCustom:
sharedTarget = self._sharedClipParentPicker.getSelectionSingle()
else:
sharedTarget = self._shotParentPicker.getSelectionSingle()
self.__updateOption(self.kPublishSharedClips, not ignore)
self.__updateOption(self.kSharedClipEntityRef, sharedTarget)
def __updateOption(self, option, value, refresh=True, clearParent=False,
clearItems=False):
if self.__updatingOptions:
return
self.__options[option] = value
if refresh:
if clearParent:
self._parentEntity = None
if clearItems:
self.__shotItems = []
self.refresh()
self._validateOptions()
def _readOptions(self):
self.__updatingOptions = True
# Drive some defaults if the options aren't set
publishSharedClips = self.__options.get(self.kPublishSharedClips, False)
# Update UI, this will set the options in the defaulted case due to the
# signal connections on the toggled event
targetEntityRef = self.__options.get(self.kTargetEntityRef, '')
sharedTargetEntityRef = self.__options.get(self.kSharedClipEntityRef, '')
# Update the radios first due to signal connections
if publishSharedClips:
if sharedTargetEntityRef or sharedTargetEntityRef == targetEntityRef:
self._sharedToSequenceRadio.setChecked(True)
else:
self._sharedIgnoredRadio.setChecked(True)
else:
try:
self._sharedClipParentPicker.setSelectionSingle(sharedTargetEntityRef)
except Exception as e:
FnAssetAPI.logging.debug(e)
self._sharedToCustomRadio.setChecked(True)
# Update main picked value
try:
self._shotParentPicker.setSelectionSingle(targetEntityRef)
except Exception as e:
FnAssetAPI.logging.debug(e)
replaceClips = self.__options.get(self.kUsePublishedClips, True)
self._replaceClipSource.setChecked(replaceClips)
# Manager Options
managerOptionsClip = self.__options.get(self.kManagerOptionsClip, None)
if managerOptionsClip and self._managerOptionsClip:
self._managerOptionsClip.setOptions(managerOptionsClip)
clipCustomName = self.__options.get(self.kCustomClipName, '')
self._clipNameCustomField.setText(clipCustomName)
useClipCustomName = self.__options.get(self.kClipsUseCustomName, False)
self._clipNameCombo.setCurrentIndex( 1 if useClipCustomName else 0 )
ignorePublished = self.__options.get(self.kIgnorePublishedClips, True)
self._ignorePublishedClips.setChecked(ignorePublished)
self.__updatingOptions = False
# Make sure that the shared clip options are correctly configured - there
# isn't a 1:1 mapping between options and controls, so the case of 'publish
# to shot parent' lets just double check that the options dict contain the
# right parent
self.__sharedClipDestToggle()
def _validateOptions(self):
# Make sure that the asset manager can take us publishing a clip
clipsAllowed = self.__clipPolicy != FnAssetAPI.constants.kIgnored
## @todo disable dialog if clips not allowed
# If people are choosing to publish shared clips to the main sequence,
# make sure that the parent is capable of taking them (some cases, its not)
# Disable the radio button if its not applicable
sharedPublishEnabled = True
if self._sharedToSequenceRadio.isChecked():
dest = self.__options.get(self.kSharedClipEntityRef, None)
if dest:
if dest not in self.__perEntityClipPolicy:
self.__perEntityClipPolicy[dest] = cmdUtils.policy.clipPolicy(
forWrite=True, entityRef=dest)
sharedClipPolicy = self.__perEntityClipPolicy.get(dest,
FnAssetAPI.constants.kIgnored)
if sharedClipPolicy == FnAssetAPI.constants.kIgnored:
sharedPublishEnabled = False
if not sharedPublishEnabled:
self._sharedToCustomRadio.setChecked(True)
## @todo For some reason, this doesn't seem to take effect, so it looks a bit
# confusing to the user :(
self._sharedToSequenceRadio.setEnabled(sharedPublishEnabled)
self._sharedToSequenceRadio.setCheckable(sharedPublishEnabled)
self._clipNameCustomField.setEnabled(
self.__options.get(self.kClipsUseCustomName, False))
def sizeHint(self):
return QtCore.QSize(600, 400)
def setTrackItems(self, trackItems):
self.__trackItems = []
self.__trackItems = trackItems
self.__shotItems = [] # Clear cache
self.refresh()
def getTrackItems(self):
return self.__trackItems
def getOptions(self):
options = dict(self.__options)
managerOptionsClip = {}
if self._managerOptionsClip:
managerOptionsClip = self._managerOptionsClip.getOptions()
options[self.kManagerOptionsClip] = managerOptionsClip
return options
def setOptions(self, options):
self.__options.update(options)
self._readOptions()
if self._managerOptionsClip:
managerOptions = options.get(self.kManagerOptionsClip, {})
self._managerOptionsClip.setOptions(managerOptions)
self.refresh()
# This refreshes the UI based on its current state, it doesn't re-read the
# options dict directly. If required, call _readOptions() first
def refresh(self):
## @todo Call managementPolicy on an image sequence to the chosen sequence
## in case, say someone selected a project as the destination and the ams
## can't handle image sequences at the project level...
session = FnAssetAPI.SessionManager.currentSession()
if not session:
raise RuntimeError("No Asset Management session available")
if not self.__shotItems:
self.__shotItems = cmdUtils.object.trackItemsToShotItems(self.__trackItems,
self.getOptions(), coalesseByName=True)
self._parentEntity = None
self._previewWidget.setShotItems(self.__shotItems)
# Update Shot Creation
parentRef = self.__options.get(self.kTargetEntityRef, None)
# Ensure we don't waste time repeatedly looking under the same parent
if not self._parentEntity or self._parentEntity.reference != parentRef:
self._parentEntity = session.getEntity(parentRef)
if self._parentEntity:
# Ensure we have the entity for any existing shots
cmdUtils.shot.analyzeHieroShotItems(self.__shotItems, self._parentEntity,
checkForConflicts=False, adopt=True)
self.__clipItems, self.__sharedClipItems = \
cmdUtils.shot.analyzeHeiroShotItemClips(self.__shotItems, asItems=True)
haveShared = bool(self.__sharedClipItems)
if self.__options.get(self.kIgnorePublishedClips, True):
itemFilter = lambda i | |
# coding=utf-8
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner
Modified from https://github.com/google-research/bert from The Google AI Language Team Authors
."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pandas as pd
import tensorflow as tf
import bert.tokenization as tokenization
import bert.modeling as modeling
from causal_bert import bert_predictors as predictors
from reddit.dataset.dataset import make_input_fn_from_file, \
make_subreddit_based_simulated_labeler, make_propensity_based_simulated_labeler, \
make_subreddit_standardized_scores, make_log_scores
from reddit.data_cleaning.reddit_posts import subreddit_idx_to_subreddit
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"input_files_or_glob", None,
"The tf_record file (or files) containing the pre-processed data. Probably output of a data_cleaning script.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 10000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "number of warmup steps to take")
# flags.DEFINE_float(
# "warmup_proportion", 0.1,
# "Proportion of training to perform linear learning rate warmup for. "
# "E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 5000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("keep_checkpoints", 1,
"How many checkpoints to keep")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_integer("seed", 0, "Seed for rng.")
flags.DEFINE_bool("label_pred", True, "Whether to do (only) label prediction.")
flags.DEFINE_bool("unsupervised", False, "Whether to do (only) unsupervised training.")
flags.DEFINE_integer("num_splits", 10,
"number of splits")
flags.DEFINE_string("dev_splits", '', "indices of development splits")
flags.DEFINE_string("test_splits", '', "indices of test splits")
flags.DEFINE_string("subreddits", '', "the list of subreddits to train on")
flags.DEFINE_string("simulated", 'real', "whether to use real data ('real'), attribute based ('attribute'), "
"or propensity score-based ('propensity') simulation"),
flags.DEFINE_bool("use_subreddit", False, "whether to use the subreddit index as a feature")
flags.DEFINE_float("beta0", 0.0, "param passed to simulated labeler, treatment strength")
flags.DEFINE_float("beta1", 0.0, "param passed to simulated labeler, confounding strength")
flags.DEFINE_float("gamma", 0.0, "param passed to simulated labeler, noise level")
flags.DEFINE_float("exogenous_confounding", 0.0, "amount of exogenous confounding in propensity based simulation")
flags.DEFINE_string("base_propensities_path", '', "path to .tsv file containing a 'propensity score' for each unit,"
"used for propensity score-based simulation")
flags.DEFINE_string("simulation_mode", 'simple', "simple, multiplicative, or interaction")
def main(_):
# tf.enable_eager_execution() # for debugging
tf.set_random_seed(FLAGS.seed)
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoints,
# save_checkpoints_steps=None,
# save_checkpoints_secs=None,
save_summary_steps=10,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
# Estimator and data pipeline setup
if FLAGS.simulated == 'real':
# params = {'outcome': 'standard_score', 'treatment': 'gender'}
# labeler = make_subreddit_standardized_scores()
params = {'outcome': 'log_score', 'treatment': 'gender'}
labeler = make_log_scores()
elif FLAGS.simulated == 'attribute':
params = {'outcome': 'outcome', 'treatment': 'gender'}
labeler = make_subreddit_based_simulated_labeler(FLAGS.beta0, FLAGS.beta1, FLAGS.gamma, FLAGS.simulation_mode,
seed=FLAGS.seed)
elif FLAGS.simulated == 'propensity':
params = {'outcome': 'outcome', 'treatment': 'treatment'}
output = pd.read_csv(FLAGS.base_propensities_path, '\t')
base_propensity_scores = output['treatment_probability'].values
example_indices = output['index'].values
labeler = make_propensity_based_simulated_labeler(treat_strength=FLAGS.beta0,
con_strength=FLAGS.beta1,
noise_level=FLAGS.gamma,
base_propensity_scores=base_propensity_scores,
example_indices=example_indices,
exogeneous_con=FLAGS.exogenous_confounding,
setting=FLAGS.simulation_mode,
seed=FLAGS.seed)
else:
Exception("simulated flag not recognized")
dev_splits = [int(s) for s in str.split(FLAGS.dev_splits)]
test_splits = [int(s) for s in str.split(FLAGS.test_splits)]
if FLAGS.subreddits == '':
subreddits = None
else:
subreddits = [int(s) for s in FLAGS.subreddits.split(',')]
num_train_steps = FLAGS.num_train_steps
num_warmup_steps = FLAGS.num_warmup_steps
model_fn = predictors.binary_treat_cont_outcome_model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
label_pred=FLAGS.label_pred,
unsupervised=FLAGS.unsupervised,
polyak=False,
use_extra_features=FLAGS.use_subreddit)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size,
params=params)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
# subsample and process the data
with tf.name_scope("training_data"):
train_input_fn = make_input_fn_from_file(
input_files_or_glob=FLAGS.input_files_or_glob,
seq_length=FLAGS.max_seq_length,
num_splits=FLAGS.num_splits,
dev_splits=dev_splits,
test_splits=test_splits,
tokenizer=tokenizer,
is_training=True,
shuffle_buffer_size=int(1e6), # note: bert hardcoded this, and I'm following suit
seed=FLAGS.seed,
labeler=labeler,
subreddits=subreddits)
# additional logging
hooks = []
if FLAGS.label_pred:
hooks += [
tf.train.LoggingTensorHook({
# 'labels_float': 'dragon_net/outcome_st_no_treatment/loss/labels_float',
# 'prediction': 'dragon_net/outcome_st_no_treatment/loss/prediction',
# 'per_example_loss': 'dragon_net/outcome_st_no_treatment/loss/per_example_loss'
# 'token_ids': 'token_ids',
# 'token_mask': 'token_mask',
# 'label_ids': 'label_ids',
# 'pred_in': 'summary/in_split/predictions',
# 'pred_out': 'summary/out_split/predictions',
# 'ra_in': 'summary/in_split/labels/kappa/batch_random_agreement/random_agreement',
# 'ra_out': 'summary/out_split/labels/kappa/batch_random_agreement/random_agreement',
},
every_n_iter=1)
]
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=hooks)
if FLAGS.do_train and (FLAGS.do_eval or FLAGS.do_predict):
# reload the model to get rid of dropout and input token masking
trained_model_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
model_fn = predictors.binary_treat_cont_outcome_model_fn_builder(
bert_config=bert_config,
init_checkpoint=trained_model_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
label_pred=True,
unsupervised=False,
polyak=False,
use_extra_features=FLAGS.use_subreddit
)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size,
params=params)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
# tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
pass
# eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
def _do_eval(subreddits):
eval_input_fn = make_input_fn_from_file(
input_files_or_glob=FLAGS.input_files_or_glob,
seq_length=FLAGS.max_seq_length,
num_splits=FLAGS.num_splits,
dev_splits=dev_splits,
test_splits=test_splits,
tokenizer=tokenizer,
is_training=False,
filter_test=False,
seed=FLAGS.seed,
labeler=labeler,
subreddits=subreddits)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
if len(subreddits) == 1:
output_eval_file = os.path.join(
FLAGS.output_dir, 'eval', "eval_results_{}.txt".format(subreddit_idx_to_subreddit(subreddits[0])))
else:
output_eval_file = os.path.join(FLAGS.output_dir, 'eval', "eval_results_all.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
os.makedirs(os.path.join(FLAGS.output_dir, 'eval'), exist_ok=True)
_do_eval(subreddits)
if FLAGS.do_predict:
tf.logging.info("***** Running prediction*****")
if FLAGS.use_tpu:
# Warning: According to tpu_estimator.py Prediction on TPU is an
# experimental feature and hence not supported here
raise ValueError("Prediction in TPU not supported")
def _do_predict(subreddits):
predict_input_fn = make_input_fn_from_file(
input_files_or_glob=FLAGS.input_files_or_glob,
seq_length=FLAGS.max_seq_length,
num_splits=FLAGS.num_splits,
dev_splits=dev_splits,
test_splits=test_splits,
tokenizer=tokenizer,
is_training=False,
filter_test=False,
subreddits=subreddits,
seed=FLAGS.seed,
labeler=labeler)
result = estimator.predict(input_fn=predict_input_fn)
if len(subreddits) == 1:
output_predict_file = os.path.join(
FLAGS.output_dir, 'predict',
"test_results_{}.tsv".format(subreddit_idx_to_subreddit(subreddits[0])))
else:
output_predict_file = os.path.join(FLAGS.output_dir, 'predict', "test_results_all.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
tf.logging.info("***** Predict results *****")
attribute_names = ['in_test',
'treatment_probability',
'expected_outcome_st_treatment', 'expected_outcome_st_no_treatment',
'outcome', 'treatment',
'index']
header = "\t".join(
attribute_name for attribute_name in attribute_names) + "\n"
writer.write(header)
for prediction in result:
# prediction['subreddit'] = subreddit_idx_to_subreddit(prediction['subreddit'])
output_line = "\t".join(
str(prediction[attribute_name]) for attribute_name in attribute_names) + "\n"
writer.write(output_line)
os.makedirs(os.path.join(FLAGS.output_dir, 'predict'), exist_ok=True)
# for subreddit in range(20):
# _do_predict(subreddit)
_do_predict(subreddits)
if __name__ == | |
#!/usr/bin/env python
import os
constants = {
# physical units
"kBoltz" : 1.38064852e-23, # Boltzmann constant, J/K
"rho" : 1000., # water density, kg/m^3
"Ndumps" : 250, # dumber of dumps
# numerical approximations
"Cdt" : 0.25, # time step constraint coefficient: sonic and acceleration terms
"Cdt_visc" : 0.125, # time step constraint coefficient: viscous term
"Gka" : 4.0e-3, # numerical rbc global area constraint, J / m^2
"Gkv" : 4.0e4, # numerical rbc global volume constraint, J / m^3
"a3" : -2.0, # higher order stretch coefficients, LWM strain law
"a4" : 8.0, # higher order stretch coefficients, LWM strain law
"b1" : 0.7, # higher order shear coefficients, LWM strain law
"b2" : 0.75, # higher order shear coefficients, LWM strain law
# simulation units
"rc" : 1.0, # cutoff radius (DPD units, L)
"mass" : 1, # mass of a DPD particle, (DPD units, M)
"nd" : 10, # number density (DPD units, 1/L**3)
"AIJ" : 50, # dimensionless DPD compressibility: aij * rc / kBT
"kpow" : 0.125, # DPD weight function exponent, dimensionless
"R0" : 4.0, # effective cell radius in DPD length units
"rho0" : 10, # density used in viscosity surrogate (DPD units, M/L^3)
"correctfreq" : 1000, # number of timesteps for bounce correction
"statsfreq" : 10000, # number of timesteps for bounce correction
"safety" : 1.5, # safety factor for domain size
"dtmax" : 5.e-3 # maximum dt allowed
}
def simulation(u, plasma_par, hemogl_par, rbc_par, mesh_par, sim_par, ply_dir):
import mirheo as mir
"""
u : Mirheo object
plasma : Parameters for outer solvent
hemogl : Parameters for inner solvent
rbc : Parameters for rbc mechanical properties
mesh : Parameters for rbc mesh
sim : Parameters related to simulation setup
"""
logfile = open(ply_dir + "config.txt", "a")
if u.isComputeTask()==True:
logfile.write("\n~~~ Simulation parameters:")
logfile.write("\n" + str(sim_par))
logfile.write("\n~~~ RBC mechanical properties:")
logfile.write("\n" + str(rbc_par))
logfile.write("\n~~~ Plasma parameters:")
logfile.write("\n" + str(plasma_par))
logfile.write("\n~~~ Hemoglobin parameters:")
logfile.write("\n" + str(hemogl_par))
# ~~~ Outer solvent: plasma
plasma_pv = mir.ParticleVectors.ParticleVector('outer', mass = plasma_par['mass'])
ic_plasma = mir.InitialConditions.Uniform(number_density = plasma_par['nd'])
u.registerParticleVector(pv=plasma_pv, ic=ic_plasma)
# ~~~ RBC mesh
mesh_ini = mesh_par['ini']
mesh_ref = mesh_par['ref']
mesh_rbc = mir.ParticleVectors.MembraneMesh(mesh_ini.vertices.tolist(), mesh_ref.vertices.tolist(), mesh_ini.faces.tolist())
rbc_pv = mir.ParticleVectors.MembraneVector('rbc', mass=mesh_par['mass'], mesh=mesh_rbc)
ic_rbc = mir.InitialConditions.Membrane([[sim_par['domain'][0]*0.5, sim_par['domain'][1]*0.5, sim_par['domain'][2]*0.5, 1.0, 0.0, 0.0, 0.0]])
u.registerParticleVector(pv=rbc_pv, ic=ic_rbc)
# ~~~ Inner solvent
checker = mir.BelongingCheckers.Mesh('checker')
u.registerObjectBelongingChecker(checker, rbc_pv)
hemogl_pv = u.applyObjectBelongingChecker(checker=checker, pv=plasma_pv, inside='inner', correct_every=sim_par['correctfreq'])
# ~~~ Bouncer
bouncer = mir.Bouncers.Mesh("bounce_rbc", "bounce_maxwell", kBT=0.0)
u.registerBouncer(bouncer)
# ~~~ Interactions
dpd_int = mir.Interactions.Pairwise('dpd', rc=plasma_par['rc'], kind='DPD', a=plasma_par['alpha'], gamma=plasma_par['gamma'], kBT=plasma_par['kbt'], power=plasma_par['kpow'])
rbc_int = mir.Interactions.MembraneForces("int_rbc", **rbc_par, stress_free=True)
u.registerInteraction(dpd_int)
u.registerInteraction(rbc_int)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
subvv = mir.Integrators.SubStep('subvv', sim_par['substeps'], [rbc_int])
u.registerIntegrator(subvv)
if u.isComputeTask():
dpd_int.setSpecificPair(rbc_pv, plasma_pv, a=0, gamma=sim_par['gfsi_o'])
dpd_int.setSpecificPair(rbc_pv, hemogl_pv, a=0, gamma=sim_par['gfsi_i'])
dpd_int.setSpecificPair(hemogl_pv, plasma_pv, gamma=0, kBT=0)
dpd_int.setSpecificPair(hemogl_pv, hemogl_pv, gamma=hemogl_par['gamma'])
u.setInteraction(dpd_int, plasma_pv, plasma_pv)
u.setInteraction(dpd_int, hemogl_pv, plasma_pv)
u.setInteraction(dpd_int, rbc_pv, plasma_pv)
u.setInteraction(dpd_int, hemogl_pv, hemogl_pv)
u.setInteraction(dpd_int, rbc_pv, hemogl_pv)
# ~~~ Integration
u.setIntegrator(vv, hemogl_pv)
u.setIntegrator(vv, plasma_pv)
u.setIntegrator(subvv, rbc_pv)
# ~~~ Membrane bounce
u.setBouncer(bouncer, rbc_pv, hemogl_pv)
u.setBouncer(bouncer, rbc_pv, plasma_pv)
# ~~~ Dumps
logfile.write('Saving results to: ' + ply_dir)
logfile.write('Current Path: ' + os.getcwd())
u.registerPlugins(mir.Plugins.createDumpMesh('rbcs', rbc_pv, dump_every=sim_par['dumpfreq'], path = ply_dir))
logfile.close()
# ~~~ Run
u.run(sim_par['iend'])
def run_korali( comm_address, gammaC, NDATA, TEND, ini_mesh_fname, ref_mesh_fname, ply_dir, verbose=False, dryrun=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Constants
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
lm = 10.
factor = 0.005
kT_s = 0.02
T_p = factor * (23.+273.)
etai_p = factor * 10.e-3
etao_p = etai_p / lm
mu_p = factor * 2.5e-6
Ka_p = factor * 5.e-6
kb_p = factor * 2.e-19
area_p = 140.e-12
volume_p = 100.e-18
run( comm_address, gammaC, NDATA, TEND, kT_s, T_p, etao_p, etai_p, mu_p, Ka_p, kb_p, area_p, volume_p, verbose, dryrun, ini_mesh_fname, ref_mesh_fname, ply_dir )
def run( comm_address, gammaC, NDATA, TEND, kT_s, T_p, etao_p, etai_p, mu_p, Ka_p, kb_p, area_p, volume_p, verbose, dryrun, ini_mesh_fname, ref_mesh_fname, ply_dir='ply/'):
import trimesh
import numpy as np
from pint import UnitRegistry
import mirheo as mir
import dpdParams as D
import rbcParams as R
logfile = open(ply_dir + "config.txt", "a")
ureg = UnitRegistry()
def effective_radius_from_area(A):
return np.sqrt(A / (4.0 * np.pi))
@ureg.wraps(None, ureg.dimensionless)
def to_sim(a):
return a
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Physical values (_p), in SI units
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A_p = area_p * ureg.m**2
V_p = volume_p * ureg.m**3
R_p = effective_radius_from_area(A_p)
mu_p = mu_p * ureg.N / ureg.m
Ka_p = Ka_p * ureg.N / ureg.m
kb_p = kb_p * ureg.J
kB_p = constants.get('kBoltz') * ureg.J / ureg.K
T_p = T_p * ureg.K
kT_p = kB_p * T_p
Gka_p = constants.get('Gka') * ureg.J / ureg.m**2
Gkv_p = constants.get('Gkv') * ureg.J / ureg.m**3
a3 = constants.get('a3')
a4 = constants.get('a4')
b1 = constants.get('b1')
b2 = constants.get('b2')
rho_p = constants.get('rho') * ureg.kg / ureg.m**3
etai_p = etai_p * ureg.Pa * ureg.s
etao_p = etao_p * ureg.Pa * ureg.s
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Non-dimensional numbers:
#
# FvK_p : Foeppl-von-Karman number in healthy cells
# fKa_p : Ka / mu
# lm_p : ratio between inner and outer viscosity
# FGKa_p : dimensionless area constraint coefficient
# FGKv_p : dimensionless volume constraint coefficient
# Ftherm : ratio between bending modulus and thermal energy
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
FvK_p = mu_p * R_p*R_p / kb_p
fKa_p = Ka_p / mu_p
FGKa_p = Gka_p * R_p**2 / kb_p
FGKv_p = Gkv_p * R_p**3 / kb_p
Ftherm = kb_p / kT_p
lm_p = etai_p / etao_p
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Simulation (_s)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Simulation length of rbc
suggested_radius = constants.get('R0')
# Scale initial mesh to suggested radius
ini = trimesh.load(ini_mesh_fname, process=False)
ref = trimesh.load(ref_mesh_fname, process=False)
original_radius = effective_radius_from_area(ini.area)
rbc_scaling = suggested_radius / original_radius
ini.vertices *= rbc_scaling
ref.vertices *= rbc_scaling
# set length scale (R_s)
Nv = len(ini.vertices)
Nv_ref = len(ref.vertices)
A_s = ini.area
V_s = ini.volume
R_s = suggested_radius
# set mass scale (mass_s)
nd = constants.get('nd')
mass_s = constants.get('mass')
rho_s = mass_s * nd
# rbc mass, assume membrane is 2D surface
M_s = rho_s * A_s
mmass_s = M_s / Nv
# set time scale (based on kBT)
kT_s = kT_s
# unit scalings
L_UNIT = R_p / R_s
M_UNIT = rho_p / rho_s * (L_UNIT**3)
T_UNIT = np.sqrt( kT_s/kT_p * L_UNIT**2 * M_UNIT )
F_UNIT = M_UNIT * L_UNIT / T_UNIT**2
E_UNIT = F_UNIT * L_UNIT
VISC_UNIT = F_UNIT / L_UNIT**2 * T_UNIT
# Numerical parameters
AIJ = constants.get('AIJ')
rc = constants.get('rc')
rho0_s = constants.get('rho0')
aij = AIJ * kT_s / rc
cs_s = D.get_Cs_(aij, nd, mass_s, kT_s)
kpow = constants.get('kpow')
kb_s = to_sim(Ftherm * kT_s)
mu_s = to_sim(FvK_p * kb_s / (R_s**2))
Ka_s = to_sim(fKa_p * mu_s)
kade_s = 0. # use Minimum rbc model
DA0D_s = 0.
C0_s = 0.
Gka_s = to_sim(FGKa_p * kb_s / (R_s**2))
Gkv_s = to_sim(FGKv_p * kb_s / (R_s**3))
kT_rbc = kT_s
etao_s = to_sim(etao_p / VISC_UNIT)
etai_s = to_sim(lm_p * etao_s)
nuo_s = etao_s / rho_s
nui_s = etai_s / rho_s
gij = D.get_gij(aij, nuo_s*rho0_s)
gin = D.get_gij(aij, nui_s*rho0_s)
gfsi_o = R.get_gammafsiDPD(nuo_s*rho0_s, kpow, A_s, Nv, nd, rc)
gfsi_i = R.get_gammafsiDPD(nui_s*rho0_s, kpow, A_s, Nv, nd, rc)
gT = 0.
gC = gammaC
etam_s = np.sqrt(3)/4. * gC # approximation, Fedosov2010
FvK_s = mu_s * R_s**2 / kb_s
Ftherm_s = kb_s / kT_s
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Timestep estimation
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~ Solvent timestep:
# computed based on the timesteps
# from sonic, viscous and acceleration
# constraints as defined in Morris1997
LX_min = min(ini.vertices[:,0]); LX_max = max(ini.vertices[:,0]); DX = LX_max - LX_min
LX_min = min(ref.vertices[:,0]); LX_max = max(ref.vertices[:,0]); DX0 = LX_max - LX_min
vcell_s = 0.5*(DX-DX0) / to_sim(0.2*ureg.s / T_UNIT)
h = D.interparticle_distance(nd)
Cdt = constants.get('Cdt')
Cdt_visc = constants.get('Cdt_visc')
dtmax = constants.get('dtmax')
dt_sonic = D.get_dt_sonic(h, cs_s, C=Cdt)
dt_visc = D.get_dt_viscous(h, max([etao_s, etai_s]), rho_s, C=Cdt_visc)
Fdpd = D.get_total_dpd_force(nd, rc, aij, max([gfsi_o, gfsi_i, gij, gin]), vcell_s, kT_s, min([dt_sonic, dt_visc]))
dt_acc = D.get_dt_accel_(h, Fdpd, mass_s, C=Cdt)
dt_fluid = min([dtmax, dt_sonic, dt_visc, dt_acc])
dt = dt_fluid
# ~ Membrane substeps:
# Computed based on the timesteps
# from viscous and acceleration
# constraints as defined in Morris1997
hm = R.intervertex_distance(A_s, Nv)
Fdpd = D.get_total_dpd_force(nd, rc, aij, max([gfsi_o, gfsi_i]), vcell_s, kT_s, dt_fluid)
Fintern = (mu_s+Ka_s)*hm + | |
<reponame>MattFellows/cassandra-medusa
# -*- coding: utf-8 -*-
# Copyright 2020- Datastax, Inc. All rights reserved.
# Copyright 2020 Spotify Inc. All rights reserved.
# Copyright 2019 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fileinput
import itertools
import logging
import os
import pathlib
import shlex
import socket
import subprocess
import time
from ssl import SSLContext, PROTOCOL_TLSv1_2, CERT_REQUIRED
from subprocess import PIPE
import yaml
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster, ExecutionProfile
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.util import Version
from retrying import retry
from medusa.host_man import HostMan
from medusa.network.hostname_resolver import HostnameResolver
from medusa.nodetool import Nodetool
from medusa.service.snapshot import SnapshotService
from medusa.utils import null_if_empty
class SnapshotPath(object):
def __init__(self, path, keyspace, table):
self.path = path
self.keyspace = keyspace
self.columnfamily = table
def list_files(self):
# important to use the _r_glob() to recursively descend into subdirs if there are any
return filter(lambda p: p.is_file(), self.path.rglob('*'))
class CqlSessionProvider(object):
def __init__(self, ip_addresses, cassandra_config):
self._ip_addresses = ip_addresses
self._auth_provider = None
self._ssl_context = None
self._cassandra_config = cassandra_config
if null_if_empty(cassandra_config.cql_username) and null_if_empty(cassandra_config.cql_password):
auth_provider = PlainTextAuthProvider(username=cassandra_config.cql_username,
password=cassandra_config.cql_password)
self._auth_provider = auth_provider
if cassandra_config.certfile is not None:
ssl_context = SSLContext(PROTOCOL_TLSv1_2)
ssl_context.load_verify_locations(cassandra_config.certfile)
ssl_context.verify_mode = CERT_REQUIRED
if cassandra_config.usercert is not None and cassandra_config.userkey is not None:
ssl_context.load_cert_chain(
certfile=cassandra_config.usercert,
keyfile=cassandra_config.userkey)
self._ssl_context = ssl_context
load_balancing_policy = WhiteListRoundRobinPolicy(ip_addresses)
self._execution_profiles = {
'local': ExecutionProfile(load_balancing_policy=load_balancing_policy)
}
def new_session(self, retry=False):
"""
Creates a new CQL session. If retry is True then attempt to create a CQL session with retry logic. The max
number of retries is currently hard coded at 5 and the delay between attempts is also hard coded at 5 sec. If
no session can be created after the max retries is reached, an exception is raised.
"""
cluster = Cluster(contact_points=self._ip_addresses,
auth_provider=self._auth_provider,
execution_profiles=self._execution_profiles,
ssl_context=self._ssl_context)
if retry:
max_retries = 5
attempts = 0
while attempts < max_retries:
try:
session = cluster.connect()
return CqlSession(session, self._cassandra_config.resolve_ip_addresses)
except Exception as e:
logging.debug('Failed to create session', exc_info=e)
delay = 5 * (2 ** (attempts + 1))
time.sleep(delay)
attempts = attempts + 1
raise CassandraCqlSessionException('Could not establish CQL session '
'after {attempts}'.format(attempts=attempts))
else:
session = cluster.connect()
return CqlSession(session, self._cassandra_config.resolve_ip_addresses)
class CqlSession(object):
EXCLUDED_KEYSPACES = ['system_traces']
def __init__(self, session, resolve_ip_addresses=True):
self._session = session
self.hostname_resolver = HostnameResolver(resolve_ip_addresses)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
def shutdown(self):
self.session.shutdown()
self.cluster.shutdown()
@property
def cluster(self):
return self._session.cluster
@property
def session(self):
return self._session
def token(self):
listen_address = self.cluster.contact_points[0]
token_map = self.cluster.metadata.token_map
for token, host in token_map.token_to_host_owner.items():
if host.address == listen_address:
return token.value
raise RuntimeError('Unable to get current token')
def placement(self):
logging.debug('Checking placement using dc and rack...')
listen_address = socket.gethostbyname(self.cluster.contact_points[0])
token_map = self.cluster.metadata.token_map
for host in token_map.token_to_host_owner.values():
socket_host = self.hostname_resolver.resolve_fqdn(listen_address)
logging.debug('Checking host {} against {}/{}'.format(host.address, listen_address, socket_host))
if host.address == listen_address or self.hostname_resolver.resolve_fqdn(host.address) == socket_host:
return host.datacenter, host.rack
raise RuntimeError('Unable to get current placement')
def tokenmap(self):
token_map = self.cluster.metadata.token_map
dc_rack_pair = self.placement()
def get_host(host_token_pair):
return host_token_pair[0]
def get_host_address(host_token_pair):
return host_token_pair[0].address
def get_token(host_token_pair):
return host_token_pair[1]
host_token_pairs = sorted(
[(host, token.value) for token, host in token_map.token_to_host_owner.items()],
key=get_host_address
)
host_tokens_groups = itertools.groupby(host_token_pairs, key=get_host)
host_tokens_pairs = [(host, list(map(get_token, tokens))) for host, tokens in host_tokens_groups]
return {
self.hostname_resolver.resolve_fqdn(host.address): {
'tokens': tokens,
'is_up': host.is_up,
'rack': host.rack,
'dc': host.datacenter
}
for host, tokens in host_tokens_pairs
if host.datacenter == dc_rack_pair[0]
}
def dump_schema(self):
keyspaces = self.session.cluster.metadata.keyspaces
return '\n\n'.join(metadata.export_as_string()
for keyspace, metadata in keyspaces.items()
if keyspace not in self.EXCLUDED_KEYSPACES)
def schema_path_mapping(self):
query = 'SELECT keyspace_name, columnfamily_name, cf_id FROM system.schema_columnfamilies'
return (row for row in self.session.execute(query)
if row.keyspace_name not in self.EXCLUDED_KEYSPACES)
def execute(self, query):
return self.session.execute(query)
class CassandraConfigReader(object):
DEFAULT_CASSANDRA_CONFIG = '/etc/cassandra/cassandra.yaml'
def __init__(self, cassandra_config=None, release_version=None):
self._release_version = release_version
config_file = pathlib.Path(cassandra_config or self.DEFAULT_CASSANDRA_CONFIG)
if not config_file.is_file():
raise RuntimeError('{} is not a file'.format(config_file))
with open(config_file, 'r') as f:
self._config = yaml.load(f, Loader=yaml.BaseLoader)
@property
def root(self):
data_file_directories = self._config.get('data_file_directories', ['/var/lib/cassandra/data'])
if not data_file_directories:
raise RuntimeError('data_file_directories must be properly configured')
if len(data_file_directories) > 1:
raise RuntimeError('Medusa only supports one data directory')
return pathlib.Path(data_file_directories[0])
@property
def commitlog_directory(self):
commitlog_directory = self._config.get('commitlog_directory', '/var/lib/cassandra/commitlog')
if not commitlog_directory:
raise RuntimeError('commitlog_directory must be properly configured')
return pathlib.Path(commitlog_directory)
@property
def saved_caches_directory(self):
saved_caches_directory = self._config.get('saved_caches_directory', '/var/lib/cassandra/saved_caches')
if not saved_caches_directory:
raise RuntimeError('saved_caches_directory must be properly configured')
return pathlib.Path(saved_caches_directory)
@property
def listen_address(self):
if 'listen_address' in self._config and self._config['listen_address']:
return self._config['listen_address']
return socket.gethostbyname(socket.getfqdn())
@property
def storage_port(self):
"""
SSL port, for legacy encrypted communication. The ssl_storage_port is unused unless enabled in
server_encryption_options. As of cassandra 4.0, this property is deprecated
as a single port can be used for either/both secure and insecure connections.
"""
if 'server_encryption_options' in self._config and \
self._config['server_encryption_options']['internode_encryption'] is not None and \
self._config['server_encryption_options']['internode_encryption'] != "none":
# Secure connections, ssl_storage_port is specified.
if 'ssl_storage_port' in self._config and self._config['ssl_storage_port'] is not None:
logging.warning("ssl_storage_port is deprecated as of Apache Cassandra 4.x")
return self._config['ssl_storage_port']
else:
# ssl_storage_port not specified, and found a version of c* 4+
if self._release_version is not None and Version(self._release_version) >= Version('4-a') and \
'storage_port' in self._config and self._config['storage_port'] is not None:
return self._config['storage_port']
return "7001"
# Insecure connection handling of storage_port for any version of c*
if 'storage_port' in self._config and self._config['storage_port'] is not None:
return self._config['storage_port']
return "7000"
@property
def native_port(self):
"""
Condition for client encryption enabled.
When setting native_transport_port_ssl, expecting that non-encrypted will still be over existing
native_transport_port. The encrypted will be over the native_transport_port_ssl.
When not setting an alternate native_transport_port_ssl , the encrypted will be over
the existing native_transport_port
"""
# Conditions for client encryption enabled, default encrypted port.
if 'client_encryption_options' in self._config and \
self._config['client_encryption_options']['enabled'] is not None and \
self._config['client_encryption_options']['enabled'] == "true":
if 'native_transport_port_ssl' in self._config and \
self._config['native_transport_port_ssl'] is not None:
return self._config['native_transport_port_ssl']
elif 'native_transport_port' in self._config and \
self._config['native_transport_port'] is not None:
return self._config['native_transport_port']
return "9142"
if 'native_transport_port' in self._config and self._config['native_transport_port']:
return self._config['native_transport_port']
return "9042"
@property
def rpc_port(self):
if 'rpc_port' in self._config and self._config['rpc_port']:
return self._config['rpc_port']
return "9160"
@property
def seeds(self):
seeds = list()
if 'seed_provider' in self._config and self._config['seed_provider'] and \
self._config['seed_provider'][0]['class_name'].endswith('SimpleSeedProvider'):
return self._config.get('seed_provider')[0]['parameters'][0]['seeds'].replace(' ', '').split(',')
return seeds
class Cassandra(object):
SNAPSHOT_PATTERN = '*/*/snapshots/{}'
SNAPSHOT_PREFIX = 'medusa-'
def __init__(self, config, contact_point=None, release_version=None):
self._release_version = release_version
cassandra_config = config.cassandra
self._start_cmd = shlex.split(cassandra_config.start_cmd)
self._stop_cmd = shlex.split(cassandra_config.stop_cmd)
self._is_ccm = int(shlex.split(cassandra_config.is_ccm)[0])
self._os_has_systemd = self._has_systemd()
self._nodetool = Nodetool(cassandra_config)
logging.warning('is ccm : {}'.format(self._is_ccm))
config_reader = CassandraConfigReader(cassandra_config.config_file, release_version)
self._cassandra_config_file = cassandra_config.config_file
self._root = config_reader.root
self._commitlog_path = config_reader.commitlog_directory
self._saved_caches_path = config_reader.saved_caches_directory
self._hostname = contact_point if contact_point is not None else config_reader.listen_address
self._cql_session_provider = CqlSessionProvider(
[self._hostname],
cassandra_config)
self._storage_port = config_reader.storage_port
self._native_port = config_reader.native_port
self._rpc_port = config_reader.rpc_port
self.seeds = config_reader.seeds
self.grpc_config = config.grpc
self.kubernetes_config = config.kubernetes
self.snapshot_service = SnapshotService(config=config).snapshot_service
@staticmethod
def _has_systemd():
try:
result = subprocess.run(['systemctl', '--version'], stdout=PIPE, stderr=PIPE)
logging.debug('This server has systemd: {}'.format(result.returncode == 0))
return result.returncode == 0
except (AttributeError, FileNotFoundError):
# AttributeError is thrown when subprocess.run is not found, which happens on Trusty
# Trusty doesn't have systemd, so the semantics of this code still hold
logging.debug('This server has systemd: False')
return False
def new_session(self):
return self._cql_session_provider.new_session()
@property
def root(self):
return self._root
@property
def commit_logs_path(self):
return self._commitlog_path
@property
def saved_caches_path(self):
return self._saved_caches_path
@property
def hostname(self):
return self._hostname
@property
def storage_port(self):
return int(self._storage_port)
@property
def native_port(self):
return int(self._native_port)
@property
def rpc_port(self):
return int(self._rpc_port)
@property
def release_version(self):
return self._release_version
class Snapshot(object):
def __init__(self, parent, tag):
self._parent = parent
self._tag = tag
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
logging.debug('Cleaning up snapshot')
self.delete()
@property
def cassandra(self):
return self._parent
@property
def tag(self):
return self._tag
@property
def root(self):
return self._parent.root
def find_dirs(self):
return [
SnapshotPath(
pathlib.Path(snapshot_dir),
*snapshot_dir.relative_to(self.root).parts[:2]
)
for snapshot_dir in self.root.glob(
Cassandra.SNAPSHOT_PATTERN.format(self._tag)
)
if (snapshot_dir.is_dir() and snapshot_dir.parts[-4]
not in CqlSession.EXCLUDED_KEYSPACES)
]
def delete(self):
self._parent.delete_snapshot(self._tag)
def __repr__(self):
return '{}<{}>'.format(self.__class__.__qualname__, self._tag)
def create_snapshot(self, backup_name):
tag = "{}{}".format(self.SNAPSHOT_PREFIX, backup_name)
if not self.snapshot_exists(tag):
self.snapshot_service.create_snapshot(tag=tag)
return Cassandra.Snapshot(self, tag)
def delete_snapshot(self, tag):
if self.snapshot_exists(tag):
self.snapshot_service.delete_snapshot(tag=tag)
def list_snapshotnames(self):
return {
snapshot.name
for snapshot in | |
<reponame>fox2mike/FlightDeck
"""
Views for the Jetpack application
"""
import commonware.log
import os
import shutil
import codecs
import tempfile
import urllib2
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.views.static import serve
from django.shortcuts import get_object_or_404
from django.http import (HttpResponseRedirect, HttpResponse,
HttpResponseForbidden, HttpResponseServerError,
HttpResponseNotAllowed, Http404) # , QueryDict
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
from django.db import IntegrityError
from django.db.models import Q, ObjectDoesNotExist
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from django.template.defaultfilters import escape
from django.conf import settings
from django.utils import simplejson
from django.forms.fields import URLField
from base.shortcuts import get_object_with_related_or_404
from utils import validator
from utils.helpers import pathify, render, render_json
from jetpack.package_helpers import (get_package_revision,
create_package_from_xpi)
from jetpack.models import (Package, PackageRevision, Module, Attachment, SDK,
EmptyDir)
from jetpack.errors import FilenameExistException, DependencyException
from person.models import Profile
log = commonware.log.getLogger('f.jetpack')
def browser(request, page_number=1, type_id=None, username=None):
"""
Display a list of addons or libraries with pages
Filter based on the request (type_id, username).
"""
# calculate which template to use
template_suffix = ''
packages = Package.objects.active()
author = None
if username:
try:
profile = Profile.objects.get_user_by_username_or_nick(username)
except ObjectDoesNotExist:
raise Http404
author = profile.user
packages = packages.filter(author__pk=author.pk)
template_suffix = '%s_user' % template_suffix
if type_id:
other_type = 'l' if type_id == 'a' else 'a'
other_packages_number = len(packages.filter(type=other_type))
packages = packages.filter(type=type_id)
template_suffix = '%s_%s' % (template_suffix,
settings.PACKAGE_PLURAL_NAMES[type_id])
packages = packages.sort_recently_active()
limit = request.GET.get('limit', settings.PACKAGES_PER_PAGE)
try:
pager = Paginator(
packages,
per_page=limit,
orphans=1
).page(page_number)
except (EmptyPage, InvalidPage):
raise Http404
return render(request,
'package_browser%s.html' % template_suffix, {
'pager': pager,
'single': False,
'author': author,
'other_packages_number': other_packages_number
})
def view_or_edit(request, id_number, type_id, revision_number=None,
version_name=None, latest=False):
"""
Edit if user is the author, otherwise view
"""
revision = get_package_revision(id_number, type_id,
revision_number, version_name, latest)
edit_available = True
if revision.package.deleted:
edit_available = False
if not request.user.is_authenticated():
raise Http404
try:
Package.objects.active_with_deleted(viewer=request.user).get(
pk=revision.package.pk)
except ObjectDoesNotExist:
raise Http404
if not revision.package.active:
if not request.user.is_authenticated():
raise Http404
try:
Package.objects.active_with_disabled(viewer=request.user).get(
pk=revision.package.pk)
except ObjectDoesNotExist:
raise Http404
if (edit_available
and request.user.is_authenticated()
and request.user.pk == revision.author.pk):
return edit(request, revision)
else:
return view(request, revision)
@login_required
def edit(request, revision):
"""
Edit package - only for the author
"""
if request.user.pk != revision.author.pk:
# redirecting to view mode without displaying an error
messages.info(request,
"Not sufficient priviliges to edit the source. "
"You've been redirected to view mode.")
return HttpResponseRedirect(
reverse(
"jp_%s_revision_details" % revision.package.get_type_name(),
args=[revision.package.id_number, revision.revision_number])
)
#return HttpResponseForbidden('You are not the author of this Package')
libraries = revision.dependencies.all()
library_counter = len(libraries)
sdk_list = None
if revision.package.is_addon():
library_counter += 1
sdk_list = SDK.objects.all()
return render(request,
"%s_edit.html" % revision.package.get_type_name(), {
'revision': revision,
'item': revision.package,
'single': True,
'libraries': libraries,
'library_counter': library_counter,
'readonly': False,
'edit_mode': True,
'sdk_list': sdk_list})
def view(request, revision):
"""
Show package - read only
"""
libraries = revision.dependencies.all()
library_counter = len(libraries)
if revision.package.is_addon():
library_counter += 1
# prepare the json for the Tree
tree = simplejson.dumps({'Lib': revision.get_modules_tree(),
'Data': revision.get_attachments_tree(),
'Plugins': revision.get_dependencies_tree()})
return render(request,
"%s_view.html" % revision.package.get_type_name(), {
'revision': revision,
'libraries': libraries,
'library_counter': library_counter,
'readonly': True,
'tree': tree
})
def download_module(request, pk):
"""
return a JSON with all module info
"""
module = get_object_with_related_or_404(Module, pk=pk)
if not module.can_view(request.user):
log_msg = ("[security] Attempt to download private module (%s) by "
"non-owner (%s)" % (pk, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this module.')
return HttpResponse(module.get_json())
def get_module(request, id_number, revision_number, filename):
"""
return a JSON with all module info
"""
try:
revision = PackageRevision.objects.get(
package__id_number=id_number,
revision_number=revision_number)
mod = revision.modules.get(filename=filename)
except PackageRevision.DoesNotExist, Module.DoesNotExist:
log_msg = 'No such module %s' % filename
log.error(log_msg)
raise Http404
if not mod.can_view(request.user):
log_msg = ("[security] Attempt to download private module (%s) by "
"non-owner (%s)" % (mod, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this module.')
return HttpResponse(mod.get_json())
@login_required
def copy(request, id_number, type_id,
revision_number=None, version_name=None):
"""
Copy package - create a duplicate of the Package, set user as author
"""
source = get_package_revision(id_number, type_id, revision_number,
version_name)
try:
package = Package.objects.get(
full_name=source.package.get_copied_full_name(),
author__username=request.user.username
)
except Package.DoesNotExist:
package = source.package.copy(request.user)
source.save_new_revision(package)
return render_json(request,
"json/%s_copied.json" % package.get_type_name(),
{'revision': source})
return HttpResponseForbidden('You already have a %s with that name' %
escape(source.package.get_type_name()))
@login_required
def disable(request, id_number):
"""
Disable Package and return confirmation
"""
package = get_object_or_404(Package, id_number=id_number)
if request.user.pk != package.author.pk:
log_msg = 'User %s wanted to disable not his own Package %s.' % (
request.user, id_number)
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this %s' % escape(
package.get_type_name()))
package.disable()
return render_json(request,
"json/package_disabled.json",
{'package': package})
@login_required
def activate(request, id_number):
"""
Undelete Package and return confirmation
"""
package = get_object_or_404(Package, id_number=id_number)
if request.user.pk != package.author.pk:
log_msg = ("[security] Attempt to activate package (%s) by "
"non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this %s' % escape(
package.get_type_name()))
package.enable()
return render_json(request,
"json/package_activated.json",
{'package': package})
@login_required
def delete(request, id_number):
"""
Delete Package and return confirmation
"""
package = get_object_or_404(Package, id_number=id_number)
if request.user.pk != package.author.pk:
log_msg = ("[security] Attempt to delete package (%s) by "
"non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this %s' % escape(
package.get_type_name()))
package.delete()
return render_json(request, "json/package_deleted.json")
@require_POST
@login_required
def add_module(request, id_number, type_id, revision_number=None,
version_name=None):
"""
Add new module to the PackageRevision
"""
revision = get_package_revision(id_number, type_id, revision_number,
version_name)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to add a module to package (%s) by "
"non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this %s' % escape(
revision.package.get_type_name()))
filename = request.POST.get('filename')
mod = Module(
filename=filename,
author=request.user,
code="""// %s.js - %s's module
// author: %s""" % (filename, revision.package.full_name,
request.user.get_profile())
)
try:
mod.save()
revision.module_add(mod)
except FilenameExistException, err:
mod.delete()
return HttpResponseForbidden(escape(str(err)))
return render_json(request,
"json/module_added.json",
{'revision': revision, 'module': mod})
@require_POST
@login_required
def rename_module(request, id_number, type_id, revision_number):
"""
Rename a module in a PackageRevision
"""
revision = get_package_revision(id_number, type_id, revision_number)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to rename a module to package (%s) by "
"non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this Package')
old_name = request.POST.get('old_filename')
new_name = request.POST.get('new_filename')
if old_name == 'main':
return HttpResponseForbidden(
'Sorry, you cannot change the name of the main module.'
)
if not revision.validate_module_filename(new_name):
return HttpResponseForbidden(
('Sorry, there is already a module in your add-on '
'with the name "%s". Each module in your add-on '
'needs to have a unique name.') % new_name
)
modules = revision.modules.all()
module = None
for mod in modules:
if mod.filename == old_name:
module = mod
if not module:
log_msg = 'Attempt to rename a non existing module %s from %s.' % (
old_name, id_number)
log.warning(log_msg)
return HttpResponseForbidden(
'There is no such module in %s' % escape(
revision.package.full_name))
module.filename = new_name
revision.add_commit_message('module renamed')
revision.update(module)
return render_json(request,
"json/module_renamed.json",
{'revision': revision, 'module': module})
@require_POST
@login_required
def remove_module(request, id_number, type_id, revision_number):
"""
Remove module from PackageRevision
"""
revision = get_package_revision(id_number, type_id, revision_number)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to remove a module from package (%s) "
"by non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this Package')
filenames = request.POST.get('filename').split(',')
revision.add_commit_message('module removed')
try:
removed_modules, removed_dirs = revision.modules_remove_by_path(
filenames)
except Module.DoesNotExist:
log_msg = 'Attempt to delete a non existing module(s) %s from %s.' % (
str(filenames), id_number)
log.warning(log_msg)
return HttpResponseForbidden(
'There is no such module in %s' % escape(
revision.package.full_name))
return render_json(request,
"json/module_removed.json",
{'revision': revision,
'removed_modules': simplejson.dumps(removed_modules),
'removed_dirs': simplejson.dumps(removed_dirs)})
@require_POST
@login_required
def add_folder(request, id_number, type_id, revision_number):
" adds an EmptyDir to a revision "
revision = get_package_revision(id_number, type_id, revision_number)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to add a folder to package (%s) by "
"non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this Package')
foldername, root = (
request.POST.get('name', ''),
request.POST.get('root_dir'))
dir = EmptyDir(name=foldername, author=request.user, root_dir=root)
try:
dir.save()
revision.folder_add(dir)
except FilenameExistException, err:
dir.delete()
return HttpResponseForbidden(escape(str(err)))
return render_json(request,
"json/folder_added.json",
{'revision': revision, 'folder': dir})
@require_POST
@login_required
def remove_folder(request, id_number, type_id, revision_number):
" removes an EmptyDir from a revision "
revision = get_package_revision(id_number, type_id, revision_number)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to remove a folder from package (%s) "
"by non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this Package')
foldername, root = (
pathify(request.POST.get('name', '')),
request.POST.get('root_dir'))
try:
folder = revision.folders.get(name=foldername, root_dir=root)
except EmptyDir.DoesNotExist:
response = None
if root == 'data':
response = revision.attachment_rmdir(foldername)
if not response:
log_msg = 'Attempt to delete | |
self.gold = _get_annotations(self.tarsqidoc_gold.tags)
self.system = _get_annotations(self.tarsqidoc_system.tags)
self.events = EntityStatistics(self, EVENT, display_dir, display_choices)
self.timexes = EntityStatistics(self, TIMEX, display_dir, display_choices)
self.alinks = LinkStatistics(self.filename, ALINK, self.gold, self.system)
self.slinks = LinkStatistics(self.filename, SLINK, self.gold, self.system)
self.tlinks = LinkStatistics(self.filename, TLINK, self.gold, self.system)
def __str__(self):
return "%s\n%s\n%s\n%s\n%s" % (self.events, self.timexes,
self.alinks, self.slinks, self.tlinks)
class DirectoryStatistics(FileStatistics):
def __init__(self, directory, statslist):
self.filename = directory
self.statistics = statslist
self.events = AggregateEntityStatistics(directory, [s.events for s in statslist])
self.timexes = AggregateEntityStatistics(directory, [s.timexes for s in statslist])
self.alinks = AggregateLinkStatistics(directory, [s.alinks for s in statslist])
self.slinks = AggregateLinkStatistics(directory, [s.slinks for s in statslist])
self.tlinks = AggregateLinkStatistics(directory, [s.tlinks for s in statslist])
def __str__(self):
return "%s\n%s\n%s\n%s\n%s" % (
self.events, self.timexes, self.alinks, self.slinks, self.tlinks)
def pp(self):
print("\n%s\n" % self)
class EntityStatistics(object):
def __init__(self, file_statistics, tagname, display_dir, display_choices):
self.filename = file_statistics.filename
self.tagname = tagname
self.tarsqidoc_gold = file_statistics.tarsqidoc_gold
self.tarsqidoc_system = file_statistics.tarsqidoc_system
self.gold_tags = file_statistics.gold[self.tagname]
self.system_tags = file_statistics.system[self.tagname]
self.tp = 0
self.fp = 0
self.fn = 0
self._collect_counts()
# the following code presents the differences between the gold and the
# system, the underlying counting should probably be used for the P&R as
# well (allowing strict versus relaxed matching, whereas the above only
# has strict matching).
if display_dir is not None:
Viewer(self, display_dir, display_choices)
def __str__(self):
return "<Statistics %s %s tp:%s fp:%s fn:%s precision=%s recall=%s f-score=%s>" % \
(self.tagname, self.filename, self.tp, self.fp, self.fn,
_as_float_string(self.precision()),
_as_float_string(self.recall()),
_as_float_string(self.fscore()))
def precision(self):
return precision(self.tp, self.fp)
def recall(self):
return recall(self.tp, self.fn)
def fscore(self):
return fscore(self.tp, self.fp, self.fn)
def _collect_counts(self):
"""Collect the counts for true positives, false positives and false
negatives."""
# TODO. This does not take the full-range into account and therefore
# gives much lower numbers for cases where multi-token events were
# imported. It also does not allow for relaxed matching.
for t in self.system_tags.keys():
if t in self.gold_tags:
self.tp += 1
else:
self.fp += 1
for t in self.gold_tags.keys():
if t not in self.system_tags:
self.fn += 1
class LinkStatistics(object):
def __init__(self, filename, tagname, gold_annotations, system_annotations):
self.filename = filename
self.tagname = tagname
self.gold_tags = gold_annotations[tagname]
self.system_tags = system_annotations[tagname]
self.overlap = self._overlap(self.gold_tags, self.system_tags)
self.correct = 0
self.incorrect = 0
for offset in self.overlap:
if self.gold_tags[offset][RELTYPE] == self.system_tags[offset][RELTYPE]:
self.correct += 1
else:
self.incorrect += 1
def __str__(self):
accuracy = self.accuracy()
astring = "nil" if accuracy is None else "%.2f" % accuracy
return "<Statistics %s %s correct:%s incorrect:%s accuracy:%s>" % \
(self.tagname, self.filename, self.correct, self.incorrect, astring)
@staticmethod
def _overlap(annotations1, annotations2):
"""Now just gets the keys that both have in common, should include links where
source and target are reversed."""
return [val for val in annotations1 if val in annotations2]
def accuracy(self):
try:
return self.correct / (self.correct + self.incorrect)
except ZeroDivisionError:
return None
class AggregateEntityStatistics(EntityStatistics):
def __init__(self, directory, statistics_list):
self.tagname = statistics_list[0].tagname
self.filename = directory
self.statistics = statistics_list
self.tp = sum([stats.tp for stats in statistics_list])
self.fp = sum([stats.fp for stats in statistics_list])
self.fn = sum([stats.fn for stats in statistics_list])
class AggregateLinkStatistics(LinkStatistics):
def __init__(self, directory, statistics_list):
self.tagname = statistics_list[0].tagname
self.filename = directory
self.statistics = statistics_list
self.correct = sum([stats.correct for stats in statistics_list])
self.incorrect = sum([stats.incorrect for stats in statistics_list])
class Viewer(object):
"""Creates the HTML files that show the differences between the entities in
two files."""
def __init__(self, entity_statistics, display_dir, display_choices):
"""Take the data from the EntityStatistics instance (which got most of those
from the FileStatistics instance)."""
self.entity_stats = entity_statistics
self.filename = entity_statistics.filename
self.tagname = entity_statistics.tagname
self.tarsqidoc_gold = entity_statistics.tarsqidoc_gold
self.tarsqidoc_system = entity_statistics.tarsqidoc_system
self.gold_tags = entity_statistics.gold_tags
self.system_tags = entity_statistics.system_tags
self.display_dir = display_dir
self.display_choices = display_choices
self._build_idxs()
self._align_tags()
self._display_aligned_tags()
def _build_idxs(self):
"""Builds indexes that store the begin and end offset of s, ng and vg
tags. In addition, it stores the end offset of a lex tag and the lex
tag's associated pos."""
self.open_idx = { 's': set(), 'ng': set(), 'vg': set() }
self.close_idx = { 's': set(), 'ng': set(), 'vg': set(), 'lex': {} }
s_tags = self.tarsqidoc_system.tags.find_tags('s')
vg_tags = self.tarsqidoc_system.tags.find_tags('vg')
ng_tags = self.tarsqidoc_system.tags.find_tags('ng')
lex_tags = self.tarsqidoc_system.tags.find_tags('lex')
open_idx = { 's': set(), 'ng': set(), 'vg': set() }
close_idx = { 's': set(), 'ng': set(), 'vg': set(), 'lex': {} }
self._update_idxs(s_tags, 's')
self._update_idxs(ng_tags, 'ng')
self._update_idxs(vg_tags, 'vg')
for lex in lex_tags:
self.close_idx['lex'][lex.end] = lex.attrs['pos']
def _update_idxs(self, tags, tagname):
for t in tags:
self.open_idx[tagname].add(t.begin)
self.close_idx[tagname].add(t.end)
def _align_tags(self):
"""Takes two lists of annotations ordered on text position and returns
them as lists of paired up annotations. Annotations will only pair up if
they overlap, if a gold or system annotation does not overlap with a
counterpart on the other side then it will be in a pair with None."""
gold = [EntityAnnotation(k, v) for k, v in self.gold_tags.items()]
system = [EntityAnnotation(k, v) for k, v in self.system_tags.items()]
# Removing duplicates also sorts the annotations
gold = self._remove_duplicates(gold)
system = self._remove_duplicates(system)
self.alignments = []
while gold or system:
if not gold:
self.alignments.append(Alignment(self, None, system.pop(0)))
elif not system:
self.alignments.append(Alignment(self, gold.pop(0), None))
elif gold[0].overlaps_with(system[0]):
self.alignments.append(Alignment(self, gold.pop(0), system.pop(0)))
elif gold[0].end < system[0].begin:
self.alignments.append(Alignment(self, gold.pop(0), None))
elif gold[0].begin > system[0].end:
self.alignments.append(Alignment(self, None, system.pop(0)))
else:
exit("ERROR: no option available, infinite loop starting...")
@staticmethod
def _remove_duplicates(annotations):
"""This is to remove duplicates from the annotations. The reason why
this was put in is that with tag import there are cases when an imported
tag spans two chunks and it will be imported into each chunk. This needs
to be fixed in the tag import of course, but in th emean time we do not
want it dilute results here. The result is sorted on text position."""
tmp = {}
for annotation in sorted(annotations):
tmp[annotation.offsets()] = annotation
return sorted(tmp.values())
def _display_aligned_tags(self):
# NOTE: when we run this we are in the ttk directory, even though we
# started in the testing subdirectory, adjust paths as needed
fname = os.path.join(self.display_dir, os.path.basename(self.filename))
fh = open("%s.%s.html" % (fname, self.tagname), 'w')
fh.write("<html>\n<head>%s</head>\n\n" % CSS)
fh.write("<body class=scores>\n\n")
fh.write("<h2>Precision and recall on this file</h2>\n\n")
self._display_p_and_r(fh)
fh.write("<h2>Aligning the key and response %s tags</h2>\n\n" % self.tagname)
self._display_legend(fh)
for alignment in self.alignments:
if self.display_choices[alignment.status]:
alignment.html(fh)
fh.write("</body>\n</html>\n")
def _display_p_and_r(self, fh):
stats = self.entity_stats
# P&R as calculated on the EntityStatistics
p1, r1, f1 = stats.precision(), stats.recall(), stats.fscore()
# P&R as calculated here, which uses the alignments array which takes
# into account the full-range attribute, so it gets much higher results
# for cases when we impoerted tags.
tp, fp, fn = self._count_matches(strict=True)
p2, r2, f2 = precision(tp, fp), recall(tp, fn), fscore(tp, fp, fn)
tp, fp, fn = self._count_matches(strict=False)
p3, r3, f3 = precision(tp, fp), recall(tp, fn), fscore(tp, fp, fn)
self._p_and_r_table(fh, ('strict', 'relaxed'), (p2, p3), (r2, r3), (f2, f3))
def _count_matches(self, strict=True):
tp, fp, fn = 0, 0, 0
for alignment in self.alignments:
if alignment.status == NO_MATCH_FP:
fp += 1
elif alignment.status == NO_MATCH_FN:
fn += 1
elif alignment.status == PARTIAL_MATCH:
if strict:
fp += 1
fn += 1
else:
tp += 1
elif alignment.status == EXACT_MATCH:
tp += 1
return (tp, fp, fn)
def _p_and_r_table(self, fh, headers, p_scores, r_scores, f_scores):
fh.write("<table class=scores cellpadding=8 cellspacing=0 border=1>\n")
nbsp, p_str, r_str, f_str = ' ', 'precision', 'recall', 'f-score'
HTML.row(fh, [nbsp] + list(headers))
HTML.row(fh, [p_str] + [ _as_float_string(p) for p in p_scores])
HTML.row(fh, [r_str] + [ _as_float_string(r) for r in r_scores])
HTML.row(fh, [f_str] + [ _as_float_string(f) for f in f_scores])
fh.write("</table>\n\n")
def _display_legend(self, fh):
def img(src): return '<img src="icons/%s.png" height=20>' % src
fh.write("<table class=scores cellpadding=8 cellspacing=0 border=1>\n")
em = len([a for a in self.alignments if a.status == EXACT_MATCH])
pm = len([a for a in self.alignments if a.status == PARTIAL_MATCH])
fp = len([a for a in self.alignments if a.status == NO_MATCH_FP])
fn = len([a for a in self.alignments if a.status == NO_MATCH_FN])
HTML.row(fh, [img("check-green"), 'exact match', em])
HTML.row(fh, [img("check-orange"), 'partial match', pm])
HTML.row(fh, [img('cross-red') + 'p',
'mismatch, false positive (precision error)', fp])
HTML.row(fh, [img('cross-red') + 'r',
'mismatch, false negative (recall error)', fn])
fh.write("</table>\n")
icons = { EXACT_MATCH: img('check-green'),
PARTIAL_MATCH: img('check-orange'),
NO_MATCH_FP: img('cross-red') + 'p',
NO_MATCH_FN: img('cross-red') + 'r' }
showing = [icons[choice]
for choice in DISPLAY_CHOICES
if self.display_choices[choice] is True]
fh.write("<p class=bordered>Showing: %s</p>\n"
% ' '.join(showing))
class EntityAnnotation(object):
"""Simple interface for an entity annotation."""
| |
to system transactions.",
)
enable_team_auditing: Optional[bool] = Field(
default=True,
concourse_env_var="CONCOURSE_ENABLE_TEAM_AUDITING",
description="Enable auditing for all api requests connected to teams.",
)
enable_volume_auditing: Optional[bool] = Field(
default=True,
concourse_env_var="CONCOURSE_ENABLE_VOLUME_AUDITING",
description="Enable auditing for all api requests connected to volumes.",
)
enable_worker_auditing: Optional[bool] = Field(
default=True,
concourse_env_var="CONCOURSE_ENABLE_WORKER_AUDITING",
description="Enable auditing for all api requests connected to workers.",
)
encryption_key: SecretStr = Field(
default_factory=partial(
# Using floor division to produce an int instead of float
secrets.token_hex,
CONCOURSE_ENCRYPTION_KEY_REQUIRED_LENGTH // 2,
),
concourse_env_var="CONCOURSE_ENCRYPTION_KEY",
env_transform=lambda _: _.get_secret_value(),
description="A 16 or 32 length key used to encrypt sensitive information before storing it in the database.",
)
garden_request_timeout: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GARDEN_REQUEST_TIMEOUT",
description="How long to wait for requests to Garden to complete. 0 means no timeout. (default: 5m)",
)
gc_check_recycle_period: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GC_CHECK_RECYCLE_PERIOD",
description="Period after which to reap checks that are completed. (default: 1m)",
)
gc_failed_grace_period: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GC_FAILED_GRACE_PERIOD",
description="Period after which failed containers will be garbage collected (default: 120h)",
)
gc_hijack_grace_period: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GC_HIJACK_GRACE_PERIOD",
description="Period after which hijacked containers will be garbage collected (default: 5m)",
)
gc_interval: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GC_INTERVAL",
description="Interval on which to perform garbage collection. (default: 30s)",
)
gc_missing_grace_period: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GC_MISSING_GRACE_PERIOD",
description="Period after which to reap containers and volumes that were created but went missing from the worker. (default: 5m)",
)
gc_one_off_grace_period: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GC_ONE_OFF_GRACE_PERIOD",
description="Period after which one_off build containers will be garbage_collected. (default: 5m)",
)
gc_var_source_recycle_period: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GC_VAR_SOURCE_RECYCLE_PERIOD",
description="Period after which to reap var_sources that are not used. (default: 5m)",
)
github_ca_cert: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GITHUB_CA_CERT",
description="CA certificate of GitHub Enterprise deployment",
)
github_client_id: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GITHUB_CLIENT_ID",
description="(Required) Client id",
)
github_client_secret: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GITHUB_CLIENT_SECRET",
description="(Required) Client secret",
)
github_host: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GITHUB_HOST",
description="Hostname of GitHub Enterprise deployment (No scheme, No trailing slash)",
)
github_main_team_concourse_team: Optional[str] = Field(
"mitodl:odl-engineering", concourse_env_var="CONCOURSE_MAIN_TEAM_GITHUB_TEAM"
)
github_main_team_org: Optional[str] = Field(
"mitodl", concourse_env_var="CONCOURSE_MAIN_TEAM_GITHUB_ORG"
)
github_main_team_user: str = Field(
"odlbot", concourse_env_var="CONCOURSE_MAIN_TEAM_GITHUB_USER"
)
gitlab_client_id: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GITLAB_CLIENT_ID",
description="(Required) Client id",
)
gitlab_client_secret: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GITLAB_CLIENT_SECRET",
description="(Required) Client secret",
)
gitlab_host: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GITLAB_HOST",
description="Hostname of Gitlab Enterprise deployment (Include scheme, No trailing slash)",
)
global_resource_check_timeout: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_GLOBAL_RESOURCE_CHECK_TIMEOUT",
description="Time limit on checking for new versions of resources. (default: 1h)",
)
iframe_options: IframeOptions = Field(
IframeOptions.deny,
concourse_env_var="CONCOURSE_X_FRAME_OPTIONS",
env_transform=str,
)
influxdb_batch_duration: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_INFLUXDB_BATCH_DURATION",
description="The duration to wait before emitting a batch of points to InfluxDB, disregarding influxdb_batch_size. (default: 300s)",
)
influxdb_batch_size: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_INFLUXDB_BATCH_SIZE",
description="Number of points to batch together when emitting to InfluxDB. (default: 5000)",
)
influxdb_database: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_INFLUXDB_DATABASE",
description="InfluxDB database to write points to.",
)
influxdb_insecure_skip_verify: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_INFLUXDB_INSECURE_SKIP_VERIFY",
description="Skip SSL verification when emitting to InfluxDB.",
)
influxdb_password: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_INFLUXDB_PASSWORD",
description="InfluxDB server password.",
)
influxdb_url: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_INFLUXDB_URL",
description="InfluxDB server address to emit points to.",
)
influxdb_username: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_INFLUXDB_USERNAME",
description="InfluxDB server username.",
)
intercept_idle_timeout: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_INTERCEPT_IDLE_TIMEOUT",
description="Length of time for a intercepted session to be idle before terminating. (default: 0m)",
)
job_scheduling_max_in_flight: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_JOB_SCHEDULING_MAX_IN_FLIGHT",
description="Maximum number of jobs to be scheduling at the same time (default: 32)",
)
ldap_bind_dn: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_BIND_DN",
description="(Required) Bind DN for searching LDAP users and groups. Typically this is a read_only user.",
)
ldap_bind_pw: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_BIND_PW",
description="(Required) Bind Password for the user specified by 'bind_dn'",
)
ldap_ca_cert: Optional[str] = Field(
None, concourse_env_var="CONCOURSE_LDAP_CA_CERT", description="CA certificate"
)
ldap_display_name: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_DISPLAY_NAME",
description="The auth provider name displayed to users on the login page",
)
ldap_group_search_base_dn: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_GROUP_SEARCH_BASE_DN",
description="BaseDN to start the search from. For example 'cn=groups,dc=example,dc=com'",
)
ldap_group_search_filter: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_GROUP_SEARCH_FILTER",
description="Optional filter to apply when searching the directory. For example '(objectClass=posixGroup)'",
)
ldap_group_search_group_attr: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_GROUP_SEARCH_GROUP_ATTR",
description="Adds an additional requirement to the filter that an attribute in the group match the user's attribute value. The exact filter being added is: (<groupAttr>=<userAttr value>)",
)
ldap_group_search_name_attr: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_GROUP_SEARCH_NAME_ATTR",
description="The attribute of the group that represents its name.",
)
ldap_group_search_scope: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_GROUP_SEARCH_SCOPE",
description="Can either be: 'sub' _ search the whole sub tree or 'one' _ only search one level. Defaults to 'sub'.",
)
ldap_group_search_user_attr: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_GROUP_SEARCH_USER_ATTR",
description="Adds an additional requirement to the filter that an attribute in the group match the user's attribute value. The exact filter being added is: (<groupAttr>=<userAttr value>)",
)
ldap_host: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_HOST",
description="(Required) The host and optional port of the LDAP server. If port isn't supplied, it will be guessed based on the TLS configuration. 389 or 636.",
)
ldap_insecure_no_ssl: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_INSECURE_NO_SSL",
description="Required if LDAP host does not use TLS.",
)
ldap_insecure_skip_verify: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_INSECURE_SKIP_VERIFY",
description="Skip certificate verification",
)
ldap_start_tls: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_START_TLS",
description="Start on insecure port, then negotiate TLS",
)
ldap_user_search_base_dn: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_USER_SEARCH_BASE_DN",
description="BaseDN to start the search from. For example 'cn=users,dc=example,dc=com'",
)
ldap_user_search_email_attr: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_USER_SEARCH_EMAIL_ATTR",
description="A mapping of attributes on the user entry to claims. Defaults to 'mail'.",
)
ldap_user_search_filter: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_USER_SEARCH_FILTER",
description="Optional filter to apply when searching the directory. For example '(objectClass=person)'",
)
ldap_user_search_id_attr: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_USER_SEARCH_ID_ATTR",
description="A mapping of attributes on the user entry to claims. Defaults to 'uid'.",
)
ldap_user_search_name_attr: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_USER_SEARCH_NAME_ATTR",
description="A mapping of attributes on the user entry to claims.",
)
ldap_user_search_scope: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_USER_SEARCH_SCOPE",
description="Can either be: 'sub' _ search the whole sub tree or 'one' _ only search one level. Defaults to 'sub'.",
)
ldap_user_search_username: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_USER_SEARCH_USERNAME",
description="Attribute to match against the inputted username. This will be translated and combined with the other filter as '(<attr>=<username>)'.",
)
ldap_username_prompt: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LDAP_USERNAME_PROMPT",
description="The prompt when logging in through the UI when __password_connector=ldap. Defaults to 'Username'.",
)
lets_encrypt_acme_url: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LETS_ENCRYPT_ACME_URL",
description="URL of the ACME CA directory endpoint. (default: https://acme_v02.api.letsencrypt.org/directory)",
)
lidar_scanner_interval: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LIDAR_SCANNER_INTERVAL",
description="Interval on which the resource scanner will run to see if new checks need to be scheduled (default: 10s)",
)
log_cluster_name: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_LOG_CLUSTER_NAME",
description="Log cluster name.",
)
log_db_queries: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_LOG_DB_QUERIES",
description="Log database queries.",
)
log_level: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_LOG_LEVEL",
description="Minimum level of logs to see. (default: info)",
)
main_team_config: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_CONFIG",
description="Configuration file for specifying team params",
)
main_team_github_org: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_GITHUB_ORG",
description="A whitelisted GitHub org",
)
main_team_github_team: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_GITHUB_TEAM",
description="A whitelisted GitHub team",
)
main_team_github_user: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_GITHUB_USER",
description="A whitelisted GitHub user",
)
main_team_ldap_group: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_LDAP_GROUP",
description="A whitelisted LDAP group",
)
main_team_ldap_user: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_LDAP_USER",
description="A whitelisted LDAP user",
)
main_team_local_user: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_LOCAL_USER",
description="A whitelisted local concourse user. These are the users you've added at web startup with the __add_local_user flag.",
)
main_team_oauth_group: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_OAUTH_GROUP",
description="A whitelisted OAuth2 group",
)
main_team_oauth_user: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_OAUTH_USER",
description="A whitelisted OAuth2 user",
)
main_team_oidc_group_name: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_OIDC_GROUP",
description="A whitelisted OIDC group",
)
main_team_oidc_user: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_OIDC_USER",
description="A whitelisted OIDC user",
)
main_team_saml_group_name: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_SAML_GROUP",
description="A whitelisted SAML group",
)
main_team_saml_user: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAIN_TEAM_SAML_USER",
description="A whitelisted SAML user",
)
max_active_containers_per_worker: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAX_ACTIVE_CONTAINERS_PER_WORKER",
description="Maximum allowed number of active containers per worker. Has effect only when used with limit_active_containers placement strategy. 0 means no limit. (default: 0)",
)
max_active_tasks_per_worker: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAX_ACTIVE_TASKS_PER_WORKER",
description="Maximum allowed number of active build tasks per worker. Has effect only when used with limit_active_tasks placement strategy. 0 means no limit. (default: 0)",
)
max_active_volumes_per_worker: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_MAX_ACTIVE_VOLUMES_PER_WORKER",
description="Maximum allowed number of | |
elif api_version == '2018-08-01':
from ..v2018_08_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2018-11-01':
from ..v2018_11_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2018-12-01':
from ..v2018_12_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2019-02-01':
from ..v2019_02_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import ExpressRouteServiceProvidersOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def firewall_policies(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`FirewallPoliciesOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.FirewallPoliciesOperations>`
* 2019-07-01: :class:`FirewallPoliciesOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.FirewallPoliciesOperations>`
* 2019-08-01: :class:`FirewallPoliciesOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.FirewallPoliciesOperations>`
* 2019-09-01: :class:`FirewallPoliciesOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.FirewallPoliciesOperations>`
* 2019-11-01: :class:`FirewallPoliciesOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.FirewallPoliciesOperations>`
* 2019-12-01: :class:`FirewallPoliciesOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.FirewallPoliciesOperations>`
* 2020-03-01: :class:`FirewallPoliciesOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.FirewallPoliciesOperations>`
* 2020-04-01: :class:`FirewallPoliciesOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.FirewallPoliciesOperations>`
"""
api_version = self._get_api_version('firewall_policies')
if api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import FirewallPoliciesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import FirewallPoliciesOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import FirewallPoliciesOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import FirewallPoliciesOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import FirewallPoliciesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import FirewallPoliciesOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import FirewallPoliciesOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import FirewallPoliciesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def firewall_policy_rule_groups(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`FirewallPolicyRuleGroupsOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.FirewallPolicyRuleGroupsOperations>`
* 2019-07-01: :class:`FirewallPolicyRuleGroupsOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.FirewallPolicyRuleGroupsOperations>`
* 2019-08-01: :class:`FirewallPolicyRuleGroupsOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.FirewallPolicyRuleGroupsOperations>`
* 2019-09-01: :class:`FirewallPolicyRuleGroupsOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.FirewallPolicyRuleGroupsOperations>`
* 2019-11-01: :class:`FirewallPolicyRuleGroupsOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.FirewallPolicyRuleGroupsOperations>`
* 2019-12-01: :class:`FirewallPolicyRuleGroupsOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.FirewallPolicyRuleGroupsOperations>`
* 2020-03-01: :class:`FirewallPolicyRuleGroupsOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.FirewallPolicyRuleGroupsOperations>`
* 2020-04-01: :class:`FirewallPolicyRuleGroupsOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.FirewallPolicyRuleGroupsOperations>`
"""
api_version = self._get_api_version('firewall_policy_rule_groups')
if api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import FirewallPolicyRuleGroupsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import FirewallPolicyRuleGroupsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import FirewallPolicyRuleGroupsOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import FirewallPolicyRuleGroupsOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import FirewallPolicyRuleGroupsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import FirewallPolicyRuleGroupsOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import FirewallPolicyRuleGroupsOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import FirewallPolicyRuleGroupsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def flow_logs(self):
"""Instance depends on the API version:
* 2019-11-01: :class:`FlowLogsOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.FlowLogsOperations>`
* 2019-12-01: :class:`FlowLogsOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.FlowLogsOperations>`
* 2020-03-01: :class:`FlowLogsOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.FlowLogsOperations>`
* 2020-04-01: :class:`FlowLogsOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.FlowLogsOperations>`
"""
api_version = self._get_api_version('flow_logs')
if api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import FlowLogsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import FlowLogsOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import FlowLogsOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import FlowLogsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def hub_route_tables(self):
"""Instance depends on the API version:
* 2020-04-01: :class:`HubRouteTablesOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.HubRouteTablesOperations>`
"""
api_version = self._get_api_version('hub_route_tables')
if api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import HubRouteTablesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def hub_virtual_network_connections(self):
"""Instance depends on the API version:
* 2018-04-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_04_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2018-06-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_06_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2018-07-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_07_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2018-08-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_08_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2018-10-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_10_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2018-11-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_11_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2018-12-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2018_12_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2019-02-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2019_02_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2019-04-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2019_04_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2019-06-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2019-07-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2019-08-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2019-09-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2019-11-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2019-12-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2020-03-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
* 2020-04-01: :class:`HubVirtualNetworkConnectionsOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.HubVirtualNetworkConnectionsOperations>`
"""
api_version = self._get_api_version('hub_virtual_network_connections')
if api_version == '2018-04-01':
from ..v2018_04_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2018-07-01':
from ..v2018_07_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2018-08-01':
from ..v2018_08_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2018-11-01':
from ..v2018_11_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2018-12-01':
from ..v2018_12_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2019-02-01':
from ..v2019_02_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import HubVirtualNetworkConnectionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def inbound_nat_rules(self):
"""Instance depends on the API version:
* 2017-06-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2017_06_01.aio.operations_async.InboundNatRulesOperations>`
* 2017-08-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2017_08_01.aio.operations_async.InboundNatRulesOperations>`
* 2017-09-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2017_09_01.aio.operations_async.InboundNatRulesOperations>`
* 2017-10-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2017_10_01.aio.operations_async.InboundNatRulesOperations>`
* 2017-11-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2017_11_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-01-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_01_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-02-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_02_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-04-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_04_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-06-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_06_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-07-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_07_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-08-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_08_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-10-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_10_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-11-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_11_01.aio.operations_async.InboundNatRulesOperations>`
* 2018-12-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2018_12_01.aio.operations_async.InboundNatRulesOperations>`
* 2019-02-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2019_02_01.aio.operations_async.InboundNatRulesOperations>`
* 2019-04-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2019_04_01.aio.operations_async.InboundNatRulesOperations>`
* 2019-06-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.InboundNatRulesOperations>`
* 2019-07-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.InboundNatRulesOperations>`
* 2019-08-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.InboundNatRulesOperations>`
* 2019-09-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.InboundNatRulesOperations>`
* 2019-11-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.InboundNatRulesOperations>`
* 2019-12-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.InboundNatRulesOperations>`
* 2020-03-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.InboundNatRulesOperations>`
* 2020-04-01: :class:`InboundNatRulesOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.InboundNatRulesOperations>`
"""
api_version = self._get_api_version('inbound_nat_rules')
if api_version == '2017-06-01':
from ..v2017_06_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2017-08-01':
from ..v2017_08_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2017-09-01':
from ..v2017_09_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2017-10-01':
from ..v2017_10_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2017-11-01':
from ..v2017_11_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-01-01':
from ..v2018_01_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-07-01':
from ..v2018_07_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-08-01':
from ..v2018_08_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-11-01':
from ..v2018_11_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2018-12-01':
from ..v2018_12_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2019-02-01':
from ..v2019_02_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import InboundNatRulesOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import InboundNatRulesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def interface_endpoints(self):
"""Instance depends on the API version:
* 2018-08-01: :class:`InterfaceEndpointsOperations<azure.mgmt.network.v2018_08_01.aio.operations_async.InterfaceEndpointsOperations>`
* 2018-10-01: :class:`InterfaceEndpointsOperations<azure.mgmt.network.v2018_10_01.aio.operations_async.InterfaceEndpointsOperations>`
* 2018-11-01: :class:`InterfaceEndpointsOperations<azure.mgmt.network.v2018_11_01.aio.operations_async.InterfaceEndpointsOperations>`
* 2018-12-01: :class:`InterfaceEndpointsOperations<azure.mgmt.network.v2018_12_01.aio.operations_async.InterfaceEndpointsOperations>`
* 2019-02-01: :class:`InterfaceEndpointsOperations<azure.mgmt.network.v2019_02_01.aio.operations_async.InterfaceEndpointsOperations>`
"""
api_version = self._get_api_version('interface_endpoints')
if api_version == '2018-08-01':
from ..v2018_08_01.aio.operations_async import InterfaceEndpointsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations_async import InterfaceEndpointsOperations as OperationClass
elif api_version == '2018-11-01':
from ..v2018_11_01.aio.operations_async import InterfaceEndpointsOperations as OperationClass
elif api_version == '2018-12-01':
from ..v2018_12_01.aio.operations_async import InterfaceEndpointsOperations as OperationClass
elif api_version == '2019-02-01':
from ..v2019_02_01.aio.operations_async import InterfaceEndpointsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def ip_allocations(self):
"""Instance depends on the API version:
* 2020-03-01: :class:`IpAllocationsOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.IpAllocationsOperations>`
* 2020-04-01: :class:`IpAllocationsOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.IpAllocationsOperations>`
"""
api_version = self._get_api_version('ip_allocations')
if api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import IpAllocationsOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import IpAllocationsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def ip_groups(self):
"""Instance depends on the API version:
* 2019-09-01: :class:`IpGroupsOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.IpGroupsOperations>`
* 2019-11-01: :class:`IpGroupsOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.IpGroupsOperations>`
* 2019-12-01: :class:`IpGroupsOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.IpGroupsOperations>`
* 2020-03-01: :class:`IpGroupsOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.IpGroupsOperations>`
* 2020-04-01: :class:`IpGroupsOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.IpGroupsOperations>`
"""
api_version = self._get_api_version('ip_groups')
if api_version == | |
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B3/B2 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "1.4"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "4"
}
}
],
"legend": {
"units": "Blue is low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Green Vegetation
{
# Included as a keyword for the layer
"label": "Gypsum Index",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band Ratio: (B10+B12)/B11
Blue is low gypsum content,
Red is high gypsum content
Useful for mapping:
(1) evaporative environments (e.g. salt lakes) and associated arid aeolian systems (e.g. dunes);
(2) acid waters (e.g. from oxidising sulphides) invading carbonate rich materials including around mine environments; and
(3) hydrothermal (e.g. volcanic) systems.""",
# The WMS name for the layer
"name": "aster_gypsum_index",
# The Datacube name for the associated data product
"product_name": "aster_gypsum_index",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "(B10+B12)/B11 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "0.47"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "0.5"
}
}
],
"legend": {
"units": "Blue is low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Gypsum Index
{
# Included as a keyword for the layer
"label": "Kaolin Group Index",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band Ratio: B6/B5
Blue is low content,
Red is high content
(potentially includes: pyrophyllite, alunite, well-ordered kaolinite)
Useful for mapping:
(1) different clay-type stratigraphic horizons;
(2) lithology-overprinting hydrothermal alteration, e.g. high sulphidation, “advanced argillic” alteration comprising pyrophyllite, alunite, kaolinite/dickite; and
(3) well-ordered kaolinite (warmer colours) versus poorly-ordered kaolinite (cooler colours) which can be used for mapping in situ versus transported materials, respectively.""",
# The WMS name for the layer
"name": "aster_kaolin_group_index",
# The Datacube name for the associated data product
"product_name": "aster_kaolin_group_index",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"legend": {
# "url": ""
"styles": ["ramp"]
},
"wcs_default_bands": ["Band_1"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "ramp",
"title": "B6/B5 ",
"abstract": "",
"index_function": lambda data: data["Band_1"],
"needed_bands": ["Band_1"],
"color_ramp": [
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 0.0,
"legend": {
"label": "1.0"
}
},
{
"value": 1,
"color": "#000000"
},
{
"value": 10,
"color": "#2d002b"
},
{
"value": 25,
"color": "#550071"
},
{
"value": 60,
"color": "#0400ff"
},
{
"value": 90,
"color": "#0098ff"
},
{
"value": 110,
"color": "#00ffff"
},
{
"value": 130,
"color": "#00ff94"
},
{
"value": 150,
"color": "#00ff2a"
},
{
"value": 170,
"color": "#3fff00"
},
{
"value": 210,
"color": "#ffee00"
},
{
"value": 230,
"color": "#ff8300"
},
{
"value": 255.0,
"color": "#ff0000",
"legend": {
"label": "1.125"
}
}
],
"legend": {
"units": "Blue is low content,\nRed is high content",
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "ramp",
}, # ASTER Kaolin Group Index
{
# Included as a keyword for the layer
"label": "MgOH Group Composition",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B7/B8
Blue-cyan is magnesite-dolomite, amphibole, chlorite
Red is calcite, epidote, amphibole
Useful for mapping:
(1) exposed saprolite/saprock is often white mica or Al-smectite (warmer colours) whereas transported materials are often kaolin-rich (cooler colours);
(2) clays developed over carbonates, especially Al-smectite (montmorillonite, beidellite) will produce middle to warmers colours.
(3) stratigraphic mapping based on different clay-types; and
(4) lithology-overprinting hydrothermal alteration, e.g. Si-rich and K-rich phengitic mica (warmer colours).
Combine with | |
"""
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from multiprocessing.pool import ApplyResult
import typing
from fds.sdk.TimeSeriesAPIforDigitalPortals.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.TimeSeriesAPIforDigitalPortals.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.TimeSeriesAPIforDigitalPortals.exceptions import ApiException
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_object import InlineObject
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_object1 import InlineObject1
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_object2 import InlineObject2
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_object3 import InlineObject3
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response200 import InlineResponse200
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response2001 import InlineResponse2001
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response2002 import InlineResponse2002
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response2003 import InlineResponse2003
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response2004 import InlineResponse2004
class VendorApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_vendor_chart_iq_time_series_intraday_subsample_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2003,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/vendor/chartIQ/timeSeries/intraday/subsample/get',
'operation_id': 'get_vendor_chart_iq_time_series_intraday_subsample_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'_from',
'type',
'quality',
'granularity',
'attributes',
],
'required': [
'id',
'_from',
],
'nullable': [
],
'enum': [
'type',
'quality',
'granularity',
],
'validation': [
'attributes',
]
},
root_map={
'validations': {
('attributes',): {
'max_items': 50,
},
},
'allowed_values': {
('type',): {
"TRADE": "trade",
"BID": "bid",
"ASK": "ask",
"YIELD": "yield"
},
('quality',): {
"RLT": "RLT",
"DLY": "DLY",
"BST": "BST"
},
('granularity',): {
"1S": "1s",
"5S": "5s",
"10S": "10s",
"30S": "30s",
"1M": "1m",
"5M": "5m",
"10M": "10m",
"15M": "15m",
"30M": "30m",
"1H": "1h"
},
},
'openapi_types': {
'id':
(str,),
'_from':
(str,),
'type':
(str,),
'quality':
(str,),
'granularity':
(str,),
'attributes':
([str],),
},
'attribute_map': {
'id': 'id',
'_from': 'from',
'type': 'type',
'quality': 'quality',
'granularity': 'granularity',
'attributes': '_attributes',
},
'location_map': {
'id': 'query',
'_from': 'query',
'type': 'query',
'quality': 'query',
'granularity': 'query',
'attributes': 'query',
},
'collection_format_map': {
'attributes': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.post_vendor_chart_iq_time_series_eod_list_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse200,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/vendor/chartIQ/timeSeries/eod/list',
'operation_id': 'post_vendor_chart_iq_time_series_eod_list',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(InlineObject,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.post_vendor_chart_iq_time_series_eod_subsample_get_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2001,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/vendor/chartIQ/timeSeries/eod/subsample/get',
'operation_id': 'post_vendor_chart_iq_time_series_eod_subsample_get',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(InlineObject1,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.post_vendor_chart_iq_time_series_eod_subsample_list_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2002,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/vendor/chartIQ/timeSeries/eod/subsample/list',
'operation_id': 'post_vendor_chart_iq_time_series_eod_subsample_list',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(InlineObject2,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.post_vendor_chart_iq_time_series_intraday_subsample_list_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (InlineResponse2004,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/vendor/chartIQ/timeSeries/intraday/subsample/list',
'operation_id': 'post_vendor_chart_iq_time_series_intraday_subsample_list',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(InlineObject3,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
@staticmethod
def apply_kwargs_defaults(kwargs, return_http_data_only, async_req):
kwargs["async_req"] = async_req
kwargs["_return_http_data_only"] = return_http_data_only
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
def get_vendor_chart_iq_time_series_intraday_subsample_get(
self,
id,
_from,
**kwargs
) -> InlineResponse2003:
"""Single subsample intraday data for a notation. # noqa: E501
Single subsample intraday data for a notation. The subsample may exceed the entitled time range, but will be based only on ticks that are in the entitled range. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the notation.
_from (str): Date and time of the start point of the subsample (inclusive). `from` must be aligned to `granularity`. That is, the numerical value is an integral multiple of the time span value represented by `granularity`. The data accessible in the past is limited to a few weeks at most. Dates in the future are not allowed.
Keyword Args:
type (str): Type of the price as configured for the customer.. [optional] if omitted the server will use the default value of "trade"
quality (str): Quality of the price.. [optional] if omitted the server will use the default value of "DLY"
granularity (str): Subsample granularities suitable for intraday data.. [optional] if omitted the server will use the default value of "1h"
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2003
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
kwargs['_from'] = \
_from
return self.get_vendor_chart_iq_time_series_intraday_subsample_get_endpoint.call_with_http_info(**kwargs)
def get_vendor_chart_iq_time_series_intraday_subsample_get_with_http_info(
self,
id,
_from,
**kwargs
) -> typing.Tuple[InlineResponse2003, int, typing.MutableMapping]:
"""Single subsample intraday data for a notation. # noqa: E501
Single subsample intraday data for a notation. The subsample may exceed the entitled time range, but will be based only on ticks that are in the entitled range. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the notation.
_from (str): Date and time of the start point of the subsample (inclusive). `from` must be aligned to `granularity`. That is, the numerical value is an integral multiple of the time span value represented by `granularity`. The data accessible in the past is limited to a few weeks at most. Dates in the future are not allowed.
Keyword Args:
type (str): Type of the price as configured for the customer.. [optional] if omitted the server will use the default value of "trade"
quality (str): Quality of the price.. [optional] if omitted the server will use the default value of "DLY"
granularity (str): Subsample granularities suitable for intraday data.. [optional] if omitted the server will use the default value of "1h"
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, | |
\
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv1a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=32, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv1b', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=64, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv2a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=64, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv2b', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=128, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv3a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=128, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv3b', dtype=tf.float32)
layers.append(net)
flattened = tf.reshape(net.output, [-1, net.output.shape[1]*net.output.shape[2]*net.output.shape[3]])
net = Layer.FullyConnected(flattened, outputSize=512, weightInit=Layer.XavierInit, wd=network._HParam['WeightDecay'], \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, \
activation=Layer.ReLU, \
name='FC1', dtype=tf.float32)
layers.append(net)
return net.output, layers
def LargeNetBody_Quant(network, images, preWeights, preBias):
def _outWrapper(net):
# Simulate quantization
a = net._outMin
b = net._outMax
s = (b - a) / 255.0
output = net.output
output = fake_quant_with_min_max_vars(net.output, a, b, num_bits=FAKEBITS, narrow_range=False)
# Simulate value degrade in approximate computing
# output -= 0.2 * (output - tf.reduce_min(output)) * tf.random_uniform(minval=0.0, maxval=1.0, shape=output.shape)
return output
layers = []
standardized = tf.identity(images * (1 / 255.0), name='images_standardized')
net = Layer.Conv2D(standardized, convChannels=32, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv1a']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv1a']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
fakeQuant=True, name='Conv1a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=32, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv1b']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv1b']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
fakeQuant=True, name='Conv1b', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=64, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv2a']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv2a']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
fakeQuant=True, name='Conv2a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=64, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv2b']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv2b']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
fakeQuant=True, name='Conv2b', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=128, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv3a']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv3a']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
fakeQuant=True, name='Conv3a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=128, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv3b']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv3b']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
fakeQuant=True, name='Conv3b', dtype=tf.float32)
layers.append(net)
flattened = tf.reshape(_outWrapper(net), [-1, net.output.shape[1]*net.output.shape[2]*net.output.shape[3]])
net = Layer.FullyConnected(flattened, outputSize=512, weightInit=Layer.ConstInit(preWeights['FC1']), wd=network._HParam['WeightDecay'], \
biasInit=Layer.ConstInit(preBias['FC1']), \
# bn=True, step=network._step, ifTest=network._ifTest, \
activation=Layer.ReLU, \
fakeQuant=True, name='FC1', dtype=tf.float32)
layers.append(net)
return net, layers
def LargeNetBody_Eval(network, images, preWeights, preBias, preMin, preMax):
def _outWrapper(net):
# Simulate quantization
a = net._outMin
b = net._outMax
s = (b - a) / 255.0
output = net.output
# output = fake_quant_with_min_max_vars(net.output, a, b, num_bits=FAKEBITS, narrow_range=False)
# Simulate value degrade in approximate computing
# output -= 0.2 * (output - tf.reduce_min(output)) * tf.random_uniform(minval=0.0, maxval=1.0, shape=output.shape)
return output
layers = []
standardized = tf.identity(images * (1 / 255.0), name='images_standardized')
net = Layer.Conv2D(standardized, convChannels=32, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv1a']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv1a']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv1a', dtype=tf.float32)
net.setMinMax(preMin['Conv1a'], preMax['Conv1a'])
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=32, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv1b']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv1b']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv1b', dtype=tf.float32)
net.setMinMax(preMin['Conv1b'], preMax['Conv1b'])
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=64, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv2a']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv2a']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv2a', dtype=tf.float32)
net.setMinMax(preMin['Conv2a'], preMax['Conv2a'])
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=64, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv2b']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv2b']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv2b', dtype=tf.float32)
net.setMinMax(preMin['Conv2b'], preMax['Conv2b'])
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=128, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv3a']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv3a']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv3a', dtype=tf.float32)
net.setMinMax(preMin['Conv3a'], preMax['Conv3a'])
layers.append(net)
net = Layer.Conv2D(_outWrapper(net), convChannels=128, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.ConstInit(preWeights['Conv3b']), convPadding='SAME', \
biasInit=Layer.ConstInit(preBias['Conv3b']), \
# bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv3b', dtype=tf.float32)
net.setMinMax(preMin['Conv3b'], preMax['Conv3b'])
layers.append(net)
flattened = tf.reshape(_outWrapper(net), [-1, net.output.shape[1]*net.output.shape[2]*net.output.shape[3]])
net = Layer.FullyConnected(flattened, outputSize=512, weightInit=Layer.ConstInit(preWeights['FC1']), wd=network._HParam['WeightDecay'], \
biasInit=Layer.ConstInit(preBias['FC1']), \
# bn=True, step=network._step, ifTest=network._ifTest, \
activation=Layer.ReLU, \
name='FC1', dtype=tf.float32)
net.setMinMax(preMin['FC1'], preMax['FC1'])
layers.append(net)
return net, layers
def AlexNetBody(network, images):
layers = []
standardized = tf.identity(images / 255.0, name='images_standardized')
net = Layer.Conv2D(standardized, convChannels=96, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv1a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=96, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv1b', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=256, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv2a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=256, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv2b', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=384, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv3a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=384, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv3b', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=256, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv4a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=256, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv4b', dtype=tf.float32)
layers.append(net)
flattened = tf.reshape(net.output, [-1, net.output.shape[1]*net.output.shape[2]*net.output.shape[3]])
net = Layer.FullyConnected(flattened, outputSize=4096, weightInit=Layer.XavierInit, wd=network._HParam['WeightDecay'], \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, \
activation=Layer.ReLU, \
name='FC1', dtype=tf.float32)
layers.append(net)
net = Layer.FullyConnected(net.output, outputSize=4096, weightInit=Layer.XavierInit, wd=network._HParam['WeightDecay'], \
bias=True, biasInit=Layer.ConstInit(0.0), \
#bn=True, step=network._step, ifTest=network._ifTest, \
activation=Layer.ReLU, \
name='FC2', dtype=tf.float32)
layers.append(net)
return net.output, layers
def AlexNetBNBody(network, images):
layers = []
standardized = tf.identity(images / 255.0, name='images_standardized')
net = Layer.Conv2D(standardized, convChannels=96, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, biasInit=Layer.ConstInit(0.0), \
bn=True, step=network._step, ifTest=network._ifTest, epsilon=1e-8, \
# pool=True, poolSize=[2, 2], poolStride=[2, 2], poolType=Layer.MaxPool, poolPadding='SAME', \
activation=Layer.ReLU, \
name='Conv1a', dtype=tf.float32)
layers.append(net)
net = Layer.Conv2D(net.output, convChannels=96, \
convKernel=[3, 3], convStride=[1, 1], convWD=network._HParam['WeightDecay'], \
convInit=Layer.XavierInit, convPadding='SAME', \
bias=True, | |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Dataverse data-types data model."""
from __future__ import absolute_import
from pyDataverse.utils import dict_to_json
from pyDataverse.utils import read_file_json
from pyDataverse.utils import write_file_json
"""
Data-structure to work with data and metadata of Dataverses, Datasets and
Datafiles - coming from different sources.
"""
class Dataverse(object):
"""Base class for Dataverse data model."""
"""Attributes required for Dataverse metadata json."""
__attr_required_metadata = [
'alias',
'name',
'dataverseContacts'
]
"""Attributes valid for Dataverse metadata json."""
__attr_valid_metadata = [
'alias',
'name',
'affiliation',
'description',
'dataverseContacts',
'dataverseType'
]
"""Attributes valid for Dataverse class."""
__attr_valid_class = [
# 'datasets',
# 'dataverses',
'pid'
] + __attr_valid_metadata
def __init__(self):
"""Init a Dataverse() class.
Examples
-------
Create a Dataverse::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
"""
"""Misc"""
self.datasets = []
self.dataverses = []
self.pid = None
"""Metadata"""
self.name = None
self.alias = None
self.dataverseContacts = []
self.affiliation = None
self.description = None
self.dataverseType = None
def __str__(self):
"""Return name of Dataverse() class for users."""
return 'pyDataverse Dataverse() model class.'
def set(self, data):
"""Set class attributes with a flat dict.
Parameters
----------
data : dict
Flat dict with data. Key's must be name the same as the class
attribute, the data should be mapped to.
Examples
-------
Set Dataverse attributes via flat dict::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.name
'Test pyDataverse'
"""
for key, val in data.items():
if key in self.__attr_valid_class:
self.__setattr__(key, val)
else:
# TODO: Raise Exception
print('Key {0} not valid.'.format(key))
def import_metadata(self, filename, format='dv_up'):
"""Import Dataverse metadata from file.
This simply parses in data with valid attribute naming as keys.
Data must not be complete, and also attributes required for the
metadata json export can be missing.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format.
Examples
-------
Import metadata coming from json file::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> dv.import_metadata('tests/data/dataverse_min.json')
>>> dv.name
'Test pyDataverse'
"""
data = {}
if format == 'dv_up':
metadata = read_file_json(filename)
# get first level metadata and parse it automatically
for attr in self.__attr_valid_metadata:
if attr in metadata:
data[attr] = metadata[attr]
self.set(data)
elif format == 'dv_down':
metadata = read_file_json(filename)
self.set(data)
else:
# TODO: Exception
print('Data-format not right.')
def is_valid(self):
"""Check if set attributes are valid for Dataverse api metadata creation.
The attributes required are listed in `__attr_required_metadata`.
Returns
-------
bool
True, if creation of metadata json is possible. False, if not.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': '<NAME>',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.is_valid
True
>>> dv.name = None
>>> dv.is_valid
False
"""
is_valid = True
for attr in self.__attr_required_metadata:
if not self.__getattribute__(attr):
is_valid = False
print('attribute \'{0}\' missing.'.format(attr))
return is_valid
def dict(self, format='dv_up'):
"""Create dicts in different data formats.
`dv_up`: Checks if data is valid for the different dict formats.
Parameters
----------
format : string
Data format for dict creation. Available formats are: `dv_up` with
all metadata for Dataverse api upload, and `all` with all attributes
set.
Returns
-------
dict
Data as dict.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> data = dv.dict()
>>> data['name']
'Test pyDataverse'
Todo
-------
Validate standards.
"""
data = {}
if format == 'dv_up':
if self.is_valid():
for attr in self.__attr_valid_metadata:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
# TODO: prüfen, ob required attributes gesetzt sind = Exception
return data
else:
print('dict can not be created. Data is not valid for format')
return None
elif format == 'all':
for attr in self.__attr_valid_class:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
# TODO: Exception
print('Format not right for dict.')
return None
def json(self, format='dv_up'):
r"""Create json from attributes.
Parameters
----------
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format and `all` with all attributes named in
`__attr_valid_class`.
Returns
-------
string
json-formatted string of Dataverse metadata for api upload.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> data = dv.json()
>>> data
'{\n "name": "Test pyDataverse",\n "dataverseContacts": [\n {\n "contactEmail": "<EMAIL>"\n }\n ],\n "alias": "test-pyDataverse"\n}'
Todo
-------
Validate standards.
"""
if format == 'dv_up':
data = self.dict('dv_up')
if data:
return dict_to_json(data)
else:
return None
elif format == 'all':
data = self.dict('all')
if data:
return dict_to_json(data)
else:
return None
else:
# TODO Exception
print('data format not valid.')
def export_metadata(self, filename, format='dv_up'):
"""Export Dataverse metadata to Dataverse api upload json.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format for export. Available format is: `dv_up` with all
metadata for Dataverse api upload.
Examples
-------
Export Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.export_metadata('tests/data/dataverse_export.json')
"""
if format == 'dv_up':
return write_file_json(filename, self.dict())
else:
# TODO: Exception
print('Data-format not right.')
class Dataset(object):
"""Base class for the Dataset data model."""
"""Attributes required for Dataset metadata json."""
__attr_required_metadata = [
'title',
'author',
'datasetContact',
'dsDescription',
'subject'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'].
"""
__attr_valid_metadata_datasetVersion = [
'license',
'termsOfUse',
'termsOfAccess'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'citation\'].
"""
__attr_valid_metadata_citation_dicts = [
'title',
'subtitle',
'alternativeTitle',
'alternativeURL',
'subject',
'notesText',
'productionDate',
'productionPlace',
'distributionDate',
'depositor',
'dateOfDeposit',
'kindOfData',
'seriesName',
'seriesInformation',
'relatedMaterial',
'relatedDatasets',
'otherReferences',
'dataSources',
'originOfSources',
'characteristicOfSources',
'accessToSources',
'kindOfData'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'citation\'][\'fields\'].
"""
__attr_valid_metadata_citation_arrays = {
'otherId': ['otherIdAgency', 'otherIdValue'],
'author': ['authorName', 'authorAffiliation', 'authorIdentifierScheme',
'authorIdentifier'],
'datasetContact': ['datasetContactName', 'datasetContactAffiliation',
'datasetContactEmail'],
'dsDescription': ['dsDescriptionValue', 'dsDescriptionDate'],
'keyword': ['keywordValue', 'keywordVocabulary',
'keywordVocabularyURI'],
'producer': ['producerName', 'producerAffiliation',
'producerAbbreviation', 'producerURL', 'producerLogoURL'],
'contributor': ['contributorType', 'contributorName'],
'grantNumber': ['grantNumberAgency', 'grantNumberValue'],
'topicClassification': ['topicClassValue', 'topicClassVocab'],
'publication': ['publicationCitation', 'publicationIDType',
'publicationIDNumber', 'publicationURL'],
'distributor': ['distributorName', 'distributorAffiliation',
'distributorAbbreviation', 'distributorURL',
'distributorLogoURL'],
'timePeriodCovered': ['timePeriodCoveredStart',
'timePeriodCoveredEnd'],
'dateOfCollection': ['dateOfCollectionStart', 'dateOfCollectionEnd'],
'software': ['softwareName', 'softwareVersion']
}
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'geospatial\'].
"""
__attr_valid_metadata_geospatial_dicts = [
'geographicUnit'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'geospatial\'][\'fields\'].
"""
__attr_valid_metadata_geospatial_arrays = {
'geographicCoverage': ['country', 'state', 'city',
'otherGeographicCoverage'],
'geographicBoundingBox': ['westLongitude', 'eastLongitude',
'northLongitude', 'southLongitude']
}
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'socialscience\'].
"""
__attr_valid_metadata_socialscience_dicts = [
'unitOfAnalysis',
'universe',
'timeMethod',
'dataCollector',
'collectorTraining',
'frequencyOfDataCollection',
'samplingProcedure',
'deviationsFromSampleDesign',
'collectionMode',
'researchInstrument',
'dataCollectionSituation',
'actionsToMinimizeLoss',
'controlOperations',
'weighting',
'cleaningOperations',
'datasetLevelErrorNotes',
'responseRate',
'samplingErrorEstimates',
'otherDataAppraisal',
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'journal\'].
"""
__attr_valid_metadata_journal_dicts = [
'journalArticleType'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'journal\'][\'fields\'].
"""
__attr_valid_metadata_journal_arrays = {
'journalVolumeIssue': ['journalVolume', 'journalIssue',
'journalPubDate']
}
"""Attributes valid for Dataset class."""
__attr_valid_class = [
'datafiles'
] + __attr_valid_metadata_datasetVersion \
+ __attr_valid_metadata_citation_dicts \
+ list(__attr_valid_metadata_citation_arrays.keys()) \
+ __attr_valid_metadata_geospatial_dicts \
+ list(__attr_valid_metadata_geospatial_arrays.keys()) \
+ __attr_valid_metadata_socialscience_dicts \
+ __attr_valid_metadata_journal_dicts \
+ list(__attr_valid_metadata_journal_arrays.keys()) \
def __init__(self):
"""Init a Dataset() class.
Examples
-------
Create a Dataverse::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
"""
"""Misc"""
self.datafiles = []
"""Metadata: dataset"""
self.license = None
self.termsOfUse = None
self.termsOfAccess = None
"""Metadata: citation"""
self.citation_displayName = None
self.title = None
self.subtitle = None
self.alternativeTitle = None
self.alternativeURL = None
self.otherId = []
self.author = []
self.datasetContact = []
self.dsDescription = []
self.subject = []
self.keyword = []
self.topicClassification = []
self.publication = []
self.notesText = None
self.producer = []
self.productionDate = None
self.productionPlace = None
self.contributor = []
self.grantNumber = []
self.distributor = []
self.distributionDate = | |
"""
@package PyCUB
Created by <NAME>
Date : 21 FEV 2018
University of Kent, ECE paris
jkobject.com
PyCUB is a Project which goal is to understand the particular dynamics of the codon usage
bias.
you can find more about pycub here ../README.md
if you find you need to add anything please contact me @jkobject or directly make pull requests.
"""
import os
import json
import zipfile
import shutil
from ftplib import FTP
import gzip
import copy
import requests
from sklearn.preprocessing import normalize
try:
from urllib2 import urlopen as urlopen
except:
from urllib.request import urlopen as urlopen
from sklearn.neural_network import MLPRegressor
from joblib import Parallel, delayed
import multiprocessing
from functools32 import lru_cache
from rpy2.robjects.packages import importr
from ete2 import NCBITaxa
from rpy2 import robjects
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
import pandas as pd
import numpy as np
from scipy.spatial.distance import euclidean
from scipy.sparse.csgraph import dijkstra
from scipy.stats import spearmanr
from sklearn import manifold as man
from sklearn.decomposition import PCA
from sklearn.linear_model import MultiTaskLassoCV, LassoCV
from sklearn import cluster
import espece as spe
import homoset as hset
import utils
import homology as h
from bokeh.plotting import *
from bokeh.models import *
from bokeh.io import save, show
from bokeh.layouts import column
import matplotlib.pyplot as plt
from Bio import SeqIO
# Lasso, cross Validation, Principal Component Analysis, T-SNE, spearman's rho,
# djikstra alg with fbonacci's boosting, multinomial distribution,
# multivariate normal approximation to multinomial, multiprocessing,
# parallel computing, entropy, AUC_entropy, pseudo phylodistance
# by cophenetic matrix of a dendogram, cosine similarity, hyperparams grid search,
# KMeans, MiniBatchKMeans, KModes, silhouette_score, calinski_harabaz_score, akaike information criterion,
# bayesian information criterion binary search, recurssive function,
# dynamic programming, endres distance, kulback leiber divergence, gaussian mixture clustering
# neural networks, Perceptron, DBscan
# python, js, REST, ftp, json, doxygen,gzip,
# CAI, tRNA, CUB, 3D conformation of DNA, CUF, fungi, animals, plants, GCcount, KaKs_Scores, hydrophob, synthcost, isoepoint
# HiC data, fasta, fast,
# biology, genomics, genetics, population genomics, comparative genomics, computational biology,
# bioinformatics, machine learning, statistics, statistical learning, informatics, computer science
# knowledge discovery, datascience, big data, cloud computing, scientific computing
import pdb
class PyCUB(object):
""" @package PyCUB is the main object of the project that allows the user to access most of the functions
When using it, please follow the documentation and examples on notebooks thought you can
still use it as you please and use some of the nice tricks provided here and in python
Attributes:
species: dictionary of Espece objects from the name of the species.
(see espece.py)
working_homoset: PyCUB.homoset object that stores a subset of the homologies
you want to work on
all_homoset PyCUB.homoset that stores the all the homologies
session: str the session name you want to use (will appear in the savings for example
_is_saved: bool trivial system only boolean
links: dict of all the links readily available in PyCUB.
for the project of <NAME> please use whatever datasets you may find usefull
(you can also download from Ensembl)
coeffgenes: np.array regressing values for each attributes
scoregenes: the score of the regressor
scorespecies: the score of the regressor
coeffspecies: np.array regressing values for each attributes
rho_ent: float from the scoring of spearman's rho for entropy
pent: float from the scoring of spearman's rho for entropy
rho_cub: float from the scoring of spearman's rho for CUB
pcub: float from the scoring of spearman's rho for CUB
rho_cuf: float from the scoring of spearman's rho for CUF
pcuf: float from the scoring of spearman's rho for CUF
"""
links = {'yun': {
'homology1t500.zip': 'https://www.dropbox.com/s/fmh0ljf02twn4vw/homology1t500.zip?dl=1',
'homology501t1000.zip': 'https://www.dropbox.com/s/ld4ar5pnh0f1w1w/homology501t1000.zip?dl=1',
'homology1001t2000.zip': 'https://www.dropbox.com/s/he39xu9c0n2jw8n/homology1001t2000.zip?dl=1',
'homology2001t2500.zip': 'https://www.dropbox.com/s/8w73jbs3r0ugqb8/homology2001t2500.zip?dl=1',
'homology2501t3000.zip': 'https://www.dropbox.com/s/86d23iaetw3hmzy/homology2501t3000.zip?dl=1',
'homology3001t3500.zip': 'https://www.dropbox.com/s/mr1tefylq11l3ee/homology3001t3500.zip?dl=1',
'first50.zip': 'https://www.dropbox.com/s/m3vob12ztfqs8gh/first50.zip?dl=1'},
'mymeta': {
'Amino Acid Properties README.txt': 'https://www.dropbox.com/s/3tb2j69l0acirt0/\
Amino%20Acid%20Properties%20README.txt?dl=1',
'Amino Acid Properties.csv':
'https://www.dropbox.com/s/g157emzyid2qi83/Amino%20Acid%20Properties.csv?dl=1',
'cerevisae_prot_abundance.csv':
'https://www.dropbox.com/s/t77016m5fqzb2fc/cerevisae_prot_abundance.csv?dl=1',
'names_with_links.csv':
'https://www.dropbox.com/s/voj26r0onvvqvx2/names_with_links.csv?dl=1',
'order_name461.csv':
'https://www.dropbox.com/s/0708046ld1pcju4/order_name461.csv?dl=1',
'Yun_Species_Context':
'https://www.dropbox.com/s/rdse1rco04hmuwf/Yun_Species_Context.csv?dl=1',
'homolist.json':
'https://www.dropbox.com/s/5a3h8hps9eozd8g/homolist.json?dl=1'
},
'meta': {
'fungi':
'ftp://ftp.ensemblgenomes.org/pub/release-39/fungi/species_metadata_EnsemblFungi.json',
'bacteria':
'ftp://ftp.ensemblgenomes.org/pub/release-39/bacteria/species_metadata_EnsemblBacteria.json',
'plants':
'ftp://ftp.ensemblgenomes.org/pub/release-39/plants/species_metadata_EnsemblPlants.json'
}
}
species = {}
working_homoset = None
all_homoset = None
_is_saved = False
session = None
coeffgenes = None
scoregenes = None
scorespecies = None
coeffspecies = None
def __init__(self, species={}, _is_saved=False,
_is_loaded=False, working_homoset=None, all_homoset=None, session='session1'):
"""
will initialize the object with the different values you might have from another project
Args:
species: dictionary of Espece objects from the name of the species.
(see espece.py)
working_homoset : PyCUB.homoset object that stores a subset of the homologies
you want to work on
all_homoset PyCUB.homoset that stores the all the homologies
session : str the session name you want to use (will appear in the savings for example
_is_saved : bool trivial system only boolean
"""
self.species = species
self.working_homoset = working_homoset
self.all_homoset = all_homoset
self._is_saved = _is_saved
self._is_loaded = _is_loaded
self.session = session
self.homolist = None
print "working on session: " + self.session
if os.path.isdir('utils/save/' + session):
print 'you already have a session here (just a warning)'
else:
os.mkdir('utils/save/' + session)
# create a function to find all homologies from a species
# CpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpCpCpGpApApTpApTpApTpTpTpTpCpCpGpApApTpApTpApTpTp
# GbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbGbGbCbTbTbAbTbAbTbAbAbAbGbGbCbTbTbAbTbAbTbAbAbAb
def getHomologylist(self, species='saccharomyces_cerevisiae', kingdom='fungi'):
"""
A function to retrieve the homologies directly from a given species
(it is better to use
one of the key species for the different kingdoms (sacharomyces, HS, Arabidopsis..))
Args:
specie: str the name of the specie to get the homology from
kingdom: str the kingdom where we can find this specie
"""
location = 'ftp.ensemblgenomes.org' if kingdom != 'vertebrate' else 'ftp.ensembl.org'
release = 'release-40/' if kingdom != 'vertebrate' else 'release-93'
ftp = FTP(location)
ftp.login()
if kingdom == 'vertebrate':
kingdom = ''
ftp.cwd('pub/' + release + kingdom + '/fasta/')
data = []
name = []
ftp.retrlines('NLST', data.append)
for d in data:
if d == species:
ftp.cwd(d)
link = []
ftp.cwd('cds')
ftp.retrlines('NLST', link.append)
with open("utils/data/temp.fa.gz", "wb") as file:
for i in link:
if i[-9:] == "all.fa.gz":
ftp.retrbinary("RETR " + i, file.write)
with gzip.open("utils/data/temp.fa.gz", "rt") as handle:
for record in SeqIO.parse(handle, "fasta"):
name.append(record.name)
self.homolist = name
def get_data(self, From='yun', homonames=None, kingdom='fungi', sequence='cdna',
additional='type=orthologues', saveonfiles=False, normalized=True, setnans=False,
by="entropy", using="normal", tRNA=True, getCAI=True, first=20, inpar=True):
"""
Download the data from somewhere on the web (Ensembl, Yun(with links))
you can provide a lot of different values to scrape Ensembl's datasets
it will compute from ensembl to retrieve a similar dataset as what yun's
data is.
Args:
From: str flag 'yun' or 'ensembl':
homonames: list[str] what particular homologies you want to scrap if 'all' and you have used the
getHomologylist() function, will get the homologies from there
kingdom: str same for kingdoms
sequence: str the type of sequences you want to use
additional: str additional information about the scrapping
saveonfiles: bool save the unprocessed data before populating working homoset
normalized: bool if you want the values to be normalized by the length of the codons
(lengths are always saved)
setnans: bool if you want to save the nans as metadata
by: str flag 'entropy', 'entropyLocation' (entropy location), 'frequency'
using: str flag 'random' 'normal' 'permutation' 'full'
inpar: bool or int for parallel computing and number of core
tRNA: bool whether or not to compute tRNA data
getCAI: bool flag to true to retrieve the CAI as well
first: int the first most expressed genes to compute the CAI ref statistics
Raises:
AttributeError: "you can't compute codon frequency with Yun's data...", 'not the right From'
UnboundLocalError: "you have to load the homologies first"
AttributeError: 'not the right From'
http://rest.ensemblgenomes.org/
"""
if by == 'frequency':
print "you will have a larger dimensional matrix (59D)"
if type(inpar) is int:
num_cores = inpar
else:
num_cores = -1 if inpar else 1
if From == 'yun':
if by is 'frequency':
raise AttributeError("you can't compute codon frequency with Yun's data...")
Parallel(n_jobs=8)(delayed(utils.getyun)(key, val) for key, val in
self.links['yun'].iteritems())
self.load(All=False if homonames is not None else True, filename=homonames,
From=From, by=by, inpar=inpar)
elif From == 'ensembl':
if homonames == 'all' or homonames is None:
if self.homolist is None and kingdom == 'fungi':
with open('utils/meta/homolist.json', "r") as f:
self.homolist = json.loads(f.read())
else:
if self.homolist is None:
raise UnboundLocalError("you have to load the homologies first")
print "using the loaded homolist from ensembl"
else:
self.homolist = homonames
self.all_homoset = hset.HomoSet(datatype=by)
print "doing all " + str(len(self.homolist)) + " homologies"
print ' '
homonamelist = []
getCAI = self.createRefCAI(first=first, kingdom=kingdom) if getCAI else None
if bool(inpar):
values = Parallel(n_jobs=num_cores)(delayed(utils.loadfromensembl)(
name, kingdom, sequence,
additional, saveonfiles,
normalized, setnans, i, by, using, getCAI) for i, name in | |
r in routine:
system.__dict__[routine_cli[r.lower()]].run(**kwargs)
else:
logger.error("System is not set up. Routines cannot continue.")
# Disable profiler and output results
if profile:
pr.disable()
if system.files.no_output:
nlines = 40
s = io.StringIO()
ps = pstats.Stats(pr, stream=sys.stdout).sort_stats('cumtime')
ps.print_stats(nlines)
logger.info(s.getvalue())
s.close()
else:
nlines = 999
with open(system.files.prof, 'w') as s:
ps = pstats.Stats(pr, stream=s).sort_stats('cumtime')
ps.print_stats(nlines)
ps.dump_stats(system.files.prof_raw)
logger.info('cProfile text data written to "%s".', system.files.prof)
logger.info('cProfile raw data written to "%s". View with tool `snakeviz`.', system.files.prof_raw)
if remove_pycapsule is True:
system.remove_pycapsule()
return system
def _find_cases(filename, path):
"""
Find valid cases using the provided names and path
Parameters
----------
filename : str
Test case file name
Returns
-------
list
A list of valid cases.
"""
logger.info('Working directory: "%s"', os.getcwd())
if len(filename) == 0:
logger.info('info: no input file. Use `andes run -h` for help.')
if isinstance(filename, str):
filename = [filename]
cases = []
for file in filename:
full_paths = os.path.join(path, file)
found = glob.glob(full_paths)
if len(found) == 0:
logger.error('error: file "%s" does not exist.', full_paths)
else:
cases += found
# remove folders and make cases unique
unique_cases = list(set(cases))
valid_cases = []
for case in unique_cases:
if os.path.isfile(case):
valid_cases.append(case)
if len(valid_cases) > 0:
valid_cases = sorted(valid_cases)
logger.debug('Found files: %s', pprint.pformat(valid_cases))
return valid_cases
def set_logger_level(lg, type_to_set, level):
"""
Set logging level for the given type of handler.
"""
for h in lg.handlers:
if isinstance(h, type_to_set):
h.setLevel(level)
def find_log_path(lg):
"""
Find the file paths of the FileHandlers.
"""
out = []
for h in lg.handlers:
if isinstance(h, logging.FileHandler):
out.append(h.baseFilename)
return out
def _run_mp_proc(cases, ncpu=NCPUS_PHYSICAL, **kwargs):
"""
Run multiprocessing with `Process`.
Return values from `run_case` are not preserved. Always return `True` when done.
"""
# start processes
jobs = []
for idx, file in enumerate(cases):
job = Process(name=f'Process {idx:d}', target=run_case, args=(file,), kwargs=kwargs)
jobs.append(job)
job.start()
start_msg = f'Process {idx:d} for "{file:s}" started.'
print(start_msg)
logger.debug(start_msg)
if (idx % ncpu == ncpu - 1) or (idx == len(cases) - 1):
sleep(0.1)
for job in jobs:
job.join()
jobs = []
return True
def _run_mp_pool(cases, ncpu=NCPUS_PHYSICAL, verbose=logging.INFO, **kwargs):
"""
Run multiprocessing jobs using Pool.
This function returns all System instances in a list, but requires longer computation time.
Parameters
----------
ncpu : int, optional = os.cpu_cout()
Number of cpu cores to use in parallel
mp_verbose : 10 - 50
Verbosity level during multiprocessing
verbose : 10, 20, 30, 40, 50
Verbosity level outside multiprocessing
"""
pool = Pool(ncpu)
print("Cases are processed in the following order:")
print('\n'.join([f'"{name}"' for name in cases]))
ret = pool.map(partial(run_case, verbose=verbose, remove_pycapsule=True, **kwargs), cases)
# fix address for in-place arrays
for ss in ret:
ss.fix_address()
return ret
def run(filename, input_path='', verbose=20, mp_verbose=30, ncpu=NCPUS_PHYSICAL, pool=False,
cli=False, codegen=False, shell=False, **kwargs):
"""
Entry point to run ANDES routines.
Parameters
----------
filename : str
file name (or pattern)
input_path : str, optional
input search path
verbose : int, 10 (DEBUG), 20 (INFO), 30 (WARNING), 40 (ERROR), 50 (CRITICAL)
Verbosity level. If ``config_logger`` is called prior to ``run``,
this option will be ignored.
mp_verbose : int
Verbosity level for multiprocessing tasks
ncpu : int, optional
Number of cpu cores to use in parallel
pool: bool, optional
Use Pool for multiprocessing to return a list of created Systems.
kwargs
Other supported keyword arguments
cli : bool, optional
If is running from command-line. If True, returns exit code instead of System
codegen : bool, optional
Run full code generation for System before loading case.
Only used for single test case.
shell : bool, optional
If True, enter IPython shell after routine.
Returns
-------
System or exit_code
An instance of system (if `cli == False`) or an exit code otherwise..
"""
if is_interactive() and len(logger.handlers) == 0:
config_logger(verbose, file=False)
# put some args back to `kwargs`
kwargs['input_path'] = input_path
kwargs['verbose'] = verbose
cases = _find_cases(filename, input_path)
system = None
ex_code = 0
if len(filename) > 0 and len(cases) == 0:
ex_code = 1 # file specified but not found
t0, _ = elapsed()
if len(cases) == 1:
system = run_case(cases[0], codegen=codegen, **kwargs)
elif len(cases) > 1:
# suppress logging output during multiprocessing
logger.info('-> Processing %s jobs on %s CPUs.', len(cases), ncpu)
set_logger_level(logger, logging.StreamHandler, mp_verbose)
set_logger_level(logger, logging.FileHandler, logging.DEBUG)
kwargs['no_pbar'] = True
if pool is True:
system = _run_mp_pool(cases,
ncpu=ncpu,
mp_verbose=mp_verbose,
**kwargs)
else:
system = _run_mp_proc(cases,
ncpu=ncpu,
mp_verbose=mp_verbose,
**kwargs)
# restore command line output when all jobs are done
set_logger_level(logger, logging.StreamHandler, verbose)
log_files = find_log_path(logger)
if len(log_files) > 0:
log_paths = '\n'.join(log_files)
print(f'Log saved to "{log_paths}".')
t0, s0 = elapsed(t0)
if len(cases) == 1:
if system is not None:
ex_code += system.exit_code
else:
ex_code += 1
elif len(cases) > 1:
if isinstance(system, list):
for s in system:
ex_code += s.exit_code
if len(cases) == 1:
if ex_code == 0:
print(f'-> Single process finished in {s0}.')
else:
print(f'-> Single process exit with an error in {s0}.')
elif len(cases) > 1:
if ex_code == 0:
print(f'-> Multiprocessing finished in {s0}.')
else:
print(f'-> Multiprocessing exit with an error in {s0}.')
# IPython interactive shell
if shell is True:
try:
from IPython import embed
# load plotter before entering IPython
if system is None:
logger.warning("IPython: The System object has not been created.")
elif isinstance(system, System):
logger.info("IPython: Access System object in variable `system`.")
system.TDS.load_plotter()
elif isinstance(system, list):
logger.warning("IPython: System objects stored in list `system`.\n"
"Call `TDS.load_plotter()` on each for plotter.")
embed()
except ImportError:
logger.warning("IPython import error. Installed?")
if cli is True:
return ex_code
return system
def plot(*args, **kwargs):
"""
Wrapper for the plot tool.
"""
from andes.plot import tdsplot
return tdsplot(*args, **kwargs)
def misc(edit_config='', save_config='', show_license=False, clean=True, recursive=False,
overwrite=None, version=False, **kwargs):
"""
Miscellaneous commands.
"""
if edit_conf(edit_config):
return
if show_license:
print_license()
return
if save_config != '':
save_conf(save_config, overwrite=overwrite, **kwargs)
return
if clean is True:
remove_output(recursive)
return
if demo is True:
demo(**kwargs)
return
if version is True:
versioninfo()
return
logger.info("info: no option specified. Use 'andes misc -h' for help.")
def prepare(quick=False, incremental=False, models=None,
precompile=False, nomp=False, **kwargs):
"""
Run code generation.
Parameters
----------
full : bool
True to run full prep with formatted equations.
Useful in interactive mode and during document generation.
ncpu : int
Number of cores to be used for parallel processing.
cli : bool
True to indicate running from CLI.
It will set `quick` to True if not `full`.
precompile : bool
True to compile model function calls after code generation.
Warnings
--------
The default behavior has changed since v1.0.8: when `cli` is `True` and
`full` is not `True`, quick code generation will be used.
Returns
-------
System object if `cli` is `False`; exit_code 0 otherwise.
"""
# use `quick` for cli if `full` is not enforced,
# because the LaTeX code gen is usually discarded in CLI.
cli = kwargs.get("cli", False)
full = kwargs.get("full", False)
ncpu = kwargs.get("ncpu", NCPUS_PHYSICAL)
if cli is True:
if not full:
quick = True
if full is True:
quick = False
# run code generation
system = System(options=kwargs, no_undill=True)
system.prepare(quick=quick, incremental=incremental, models=models,
nomp=nomp, ncpu=ncpu)
# compile model function calls
if precompile:
system.precompile(models, nomp=nomp, ncpu=ncpu)
if cli is True:
return 0
else:
return system
def selftest(quick=False, **kwargs):
"""
Run unit tests.
"""
# map verbosity level from logging to unittest
vmap = {1: 3, 10: 3, 20: 2, 30: 1, 40: 1, 50: 1}
verbose = vmap[kwargs.get('verbose', 20)]
# skip if quick
quick_skips = ('test_1_docs', 'test_codegen_inc')
try:
logger.handlers[0].setLevel(logging.WARNING)
sys.stdout = open(os.devnull, 'w') # suppress print statements
except IndexError: # logger not set up
pass
# discover test cases
test_directory = tests_root()
suite = unittest.TestLoader().discover(test_directory)
# remove codegen for quick mode
if quick is True:
for test_group in suite._tests:
for test_class in test_group._tests:
tests_keep = list()
for t in test_class._tests:
if t._testMethodName not in quick_skips:
tests_keep.append(t)
test_class._tests = tests_keep
unittest.TextTestRunner(verbosity=verbose).run(suite)
sys.stdout = sys.__stdout__
def doc(attribute=None, list_supported=False, config=False, **kwargs):
"""
Quick documentation from command-line.
"""
system = System()
if attribute is not None:
if attribute in system.__dict__ and hasattr(system.__dict__[attribute], 'doc'):
logger.info(system.__dict__[attribute].doc())
else:
logger.error('Model <%s> does not exist.', attribute)
elif list_supported is True:
logger.info(system.supported_models())
else:
logger.info('info: no option specified. Use \'andes doc -h\' for help.')
def demo(**kwargs):
"""
| |
<gh_stars>1-10
"""Soil water movement and content module.
Model one-dimensional soil water movement (fluxes).
Includes groundwater (assumes constant water table depth).
@author <NAME>
"""
from collections import namedtuple
import json
import math
# Soil water characteristics.
# sat: saturation point
# fc: field capacity
# pwp: permanent wilting point
# psd: pore-size distribution
# porosity: soil porosity
# airentry: air-entry value
SWC = namedtuple('SWC', 'sat fc pwp psd porosity airentry')
# Soil texture.
# clay: percentage of clay
# sand: percentage of sand
# om: percentage of organic matter
Texture = namedtuple('Texture', ['clay', 'sand', 'om'])
# Soil water characteristics in the rooting zone.
# wc: soil water content (in mm)
# vwc: soil water content (volumetric)
# critical: soil water content threshold, below which
# plant water stress occurs
# sat: saturation point
# fc: field capacity
# pwp: permanent wilting point
RootZone = namedtuple('RootZone', 'wc vwc critical sat fc pwp')
# Actual evapotranspiration (ET).
# crop - actual transpiration (from crop)
# soil - actual evaporation (from soil)
ActualET = namedtuple('ActualET', 'crop soil')
# Water fluxes into a given soil layer.
# t: water uptake via plant transpiration
# e: water loss via soil evaporation
# influx: water entry (input) into the soil layer
# outflux: water exit (output) out of the soil layer
# netflux: difference between water entry and water exit
Fluxes = namedtuple('Fluxes', 't e influx outflux netflux')
class SoilLayer(object):
"""Soil layer properties class.
The physical properties of a soil layer, dealing with
soil water content and fluxes.
ATTRIBUTES:
thick - thickness of the soil layer (m)
texture - sand, clay, and organic matter (%)
vwc - vol. water content (m3/m3)
wc - water content (mm)
accthick - cumulative thickness (m)
depth - depth of layer from soil surface (m)
swc - soil water characteristics (varying units)
ksat - saturated hydraulic conductivity (m/day)
k - hydraulic conductivity (m/day)
matric - matric head (m)
gravity - gravity head (m)
fluxes = Fluxes namedtuple for the various water flux components:
t - plant water uptake via transpiration (m/day)
e - loss of water via evaporation (m/day)
influx - influx: water entry into layer (m/day)
outflux - outflux: water exit out of layer (m/day)
netflux - net flux: difference between influx & outflux
(m/day)
METHODS:
initialize_layer - initialize all attributes
update_heads_k - update the matric head, gravity head, and
the unsaturated hydraulic conductivity
Getters:
tothead - total/sum of matric and gravity head (m)
Note:
Volumetric water content (vwc) can be given as a negative value.
Negative values are a special code to mean that the water content
is a fraction between SAT and FC or between FC and PWP. The codes
are along a scale from -3 to -1:
Scale:
-2.75 -2.25 -1.5
[-3 ....|..........|....-2 ...........|..........-1]
PWP FC SAT
so that if the given water content is -1, -2, or -3, it means the
water content should be set to saturation, field capacity, or
permanent wilting point, respectively. A value of -1.5 means the
water content will be set at halfway between SAT and FC.
Likewise, -2.25 and -2.75 mean the water content will be lower
than FC, where the former (-2.25) means the water content will be
set nearer to FC, but the latter (-2.75) closer to PWP.
Any negative values outside the range of -3 to -1 means the water
content wil be set at FC.
"""
__accdepth = 0.0 # internal use: used to determine a layer's depth
def __init__(self):
"""Initialize the SoilLayer object."""
self.thick = 0.0
self.texture = Texture(0.0, 0.0, 0.0)
self.vwc = 0.0
self.wc = 0.0
self.accthick = 0.0
self.depth = 0.0
self.swc = SWC(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
self.ksat = 0.0
self.k = 0.0
self.matric = 0.0
self.gravity = 0.0
self.fluxes = Fluxes(0.0, 0.0, 0.0, 0.0, 0.0)
self.prev = None
self.next = None
def initialize_layer(self, prevlayer, nextlayer):
"""Initialize all attributes.
Note:
This function sets the water content to within the range of
SAT and FC or between FC and PWP, if the vol. water content
is given as negative value. See this class's docstring above.
Args:
prevlayer - the previous soil layer (above layer)
nextlayer - the next soil layer (below layer)
Returns:
None
"""
self.prev = prevlayer
self.next = nextlayer
# 1. set layer depth and cumulative thickness:
prevaccthick = self.prev.accthick if self.prev else 0.0
self.accthick = self.thick + prevaccthick
prevthick = self.prev.thick if self.prev else 0.0
d = 0.5 * (prevthick + self.thick)
self.depth = SoilLayer.__accdepth + d
SoilLayer.__accdepth += d
# 2. set soil water characteristics (Saxton & Rawls, 2008):
c, s, om = self.texture
s /= 100 # convert sand and clay from % to fraction, but om is %
c /= 100
# 2a. permanent wilting, field capacity, then saturation points:
n1 = -0.024 * s + 0.487 * c + 0.006 * om
n2 = 0.005 * (s*om) - 0.013 * (c * om) + 0.068 * (s * c) + 0.031
theta1500t = n1 + n2
theta1500 = theta1500t + (0.14 * theta1500t - 0.02)
n1 = -0.251 * s + 0.195 * c + 0.011 * om
n2 = 0.006 * (s*om) - 0.027 * (c * om) + 0.452 * (s * c) + 0.299
theta33t = n1 + n2
theta33 = theta33t + (1.283*theta33t**2 - 0.374*theta33t - 0.015)
n1 = 0.278 * s + 0.034 * c + 0.022 * om
n2 = - 0.018 * (s*om) - 0.027 * (c*om) - 0.584 * (s * c) + 0.078
theta_s33t = n1 + n2
theta_s33 = theta_s33t + 0.636 * theta_s33t - 0.107
theta0 = theta33 + theta_s33 - 0.097 * s + 0.043
# 2b. pore size distribution index (no unit):
b = math.log(1500) - math.log(33)
b /= math.log(theta33) - math.log(theta1500)
psd = 1 / b
# 2c. air-entry suction (kPa):
awc = theta0 - theta33
n1 = -21.674 * s - 27.932 * c - 81.975 * awc + 71.121 * (s * awc)
n2 = 8.294 * (c * awc) + 14.05 * (s * c) + 27.161
aet = n1 + n2
ae = max(0.0, aet + (0.02 * aet ** 2 - 0.113 * aet - 0.7))
# 2d. store calculated soil water characteristics (all in m3/m3)
# calibrations/adjust for Malaysian soils:
theta1500 = 1.528 * theta1500 * (1 - theta1500) # PWP
theta33 = 1.605 * theta33 * (1 - theta33) # FC
theta0 = 2.225 * theta0 * (1 - theta0) # SAT (= porosity)
self.swc = SWC(theta0, theta33, theta1500, psd, theta0, ae)
# 3. saturated hydraulic conductivity (convert mm/hour to m/day):
self.ksat = 1930 * awc ** (3 - psd) * 24 / 1000
# 4. check for special code:
if self.vwc < 0:
# vol. water content is a fraction between SAT, FC, and PWP:
vwc = -self.vwc # make a +ve
fc = self.swc.fc
if 1 <= vwc <= 2:
# water content is between SAT and FC
sat = self.swc.sat
vwc = sat - (vwc - 1) * (sat - fc) # interpolation
elif 2 < vwc <= 3:
# water content is between FC and PWP
pwp = self.swc.pwp
vwc = fc - (vwc - 2) * (fc - pwp) # interpolation
else:
# out of range, so just set to FC
vwc = fc
self.vwc = vwc # m3/m3
self.wc = self.vwc * self.thick * 1000 # mm/day
# 5. update the matric and gravity heads,
# then the hydraulic conductivity:
self.update_heads_k()
def update_heads_k(self):
"""Update the matric and gravity heads (m), then
the unsaturated hydraulic conductivity (m/day).
Update is based on current soil water content.
"""
fc = self.swc.fc
vwc = self.vwc # current soil water content
# matric suction, convert from kPa to m by dividing by 10
if vwc >= fc:
df = vwc - fc
hm = 33 - (33 - self.swc.airentry) * df / (self.swc.sat - fc)
hm /= 10
else:
b = 1 / self.swc.psd
a = math.exp(3.496508 + b * math.log(fc))
hm = (a * max(0.05, | |
'''create files contains estimated generalization errors for model
INPUT FILE
WORKING/transactions-subset2.pickle
OUTPUT FILES
WORKING/ege_week/YYYY-MM-DD/MODEL-TD/HP-FOLD.pickle dict all_results
WORKING/ege_month/YYYY-MM-DD/MODEL-TD/HP-FOLD.pickle dict all_results
'''
import collections
import cPickle as pickle
import datetime
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import ensemble
import sys
import warnings
from Bunch import Bunch
from DataframeAppender import DataframeAppender
from directory import directory
from Logger import Logger
import parse_command_line
def usage(msg=None):
if msg is not None:
print 'invocation error: ' + str(msg)
print 'usage: python ege_week.py YYYY-MM-DD <other options>'
print ' YYYY-MM-DD mid-point of week; analyze -3 to +3 days'
print ' --month optional; test on next month, not next week'
print ' --model {lr|rf} which model to run'
print ' --td <range> training_days'
print ' --hpd <range> required iff model is rf; max_depths to model'
print ' --hpw <range> required iff model is rf; weight functions to model'
print ' --hpx <form> required iff mode is lr; transformation to x'
print ' --hpy <form> required iff mode is lr; transformation to y'
print ' --test optional; if present, program runs in test mode'
print 'where'
print ' <form> is {lin|log}+ saying whether the variable is in natural or log units'
print ' <range> is start [stop [step]], just like Python\'s range(start,stop,step)'
sys.exit(1)
DateRange = collections.namedtuple('DateRange', 'first last')
def make_DateRange(mid, half_range):
return DateRange(first=mid - datetime.timedelta(half_range),
last=mid + datetime.timedelta(half_range),
)
def make_predictors():
'''return dict key: column name, value: whether and how to convert to log domain
Include only features of the census and tax roll, not the assessment,
because previous work found that using features derived from the
assessment degraded estimated generalization errors.
NOTE: the columns in the x_array objects passed to scikit learn are in
this order. FIXME: that must be wrong, as we return a dictionary
'''
# earlier version returned a dictionary, which invalided the assumption
# about column order in x
result = ( # the columns in the x_arrays are in this order
('fraction.owner.occupied', None),
('FIREPLACE.NUMBER', 'log1p'),
('BEDROOMS', 'log1p'),
('BASEMENT.SQUARE.FEET', 'log1p'),
('LAND.SQUARE.FOOTAGE', 'log'),
('zip5.has.industry', None),
('census.tract.has.industry', None),
('census.tract.has.park', None),
('STORIES.NUMBER', 'log1p'),
('census.tract.has.school', None),
('TOTAL.BATHS.CALCULATED', 'log1p'),
('median.household.income', 'log'), # not log feature in earlier version
('LIVING.SQUARE.FEET', 'log'),
('has.pool', None),
('zip5.has.retail', None),
('census.tract.has.retail', None),
('is.new.construction', None),
('avg.commute', None),
('zip5.has.park', None),
('PARKING.SPACES', 'log1p'),
('zip5.has.school', None),
('TOTAL.ROOMS', 'log1p'),
('age', None),
('age2', None),
('effective.age', None),
('effective.age2', None),
)
return result
class CensusAdjacencies(object):
def __init__(self):
path = directory('working') + 'census_tract_adjacent.pickle'
f = open(path, 'rb')
self.adjacent = pickle.load(f)
f.close()
def adjacen(self, census_tract):
return self.adjacent.get(census_tract, None)
def make_control(argv):
'Return control Bunch'''
print 'argv'
pprint(argv)
if len(argv) < 3:
usage('missing invocation options')
def make_sale_date(s):
year, month, day = s.split('-')
return datetime.date(int(year), int(month), int(day))
pcl = parse_command_line.ParseCommandLine(argv)
arg = Bunch(
base_name=argv[0].split('.')[0],
hpd=pcl.get_range('--hpd') if pcl.has_arg('--hpd') else None,
hpw=pcl.get_range('--hpw') if pcl.has_arg('--hpw') else None,
hpx=pcl.get_arg('--hpx') if pcl.has_arg('--hpx') else None,
hpy=pcl.get_arg('--hpy') if pcl.has_arg('--hpy') else None,
model=pcl.get_arg('--model'),
month=pcl.has_arg('--month'),
sale_date=make_sale_date(argv[1]),
td=pcl.get_range('--td'),
test=pcl.has_arg('--test'),
)
print 'arg'
print arg
# check for missing options
if arg.model is None:
usage('missing --model')
if arg.td is None:
usage('missing --td')
# validate combinations of invocation options
if arg.model == 'lr':
if arg.hpx is None or arg.hpy is None:
usage('model lr requires --hpx and --hpy')
elif arg.model == 'rf':
if arg.hpd is None or arg.hpw is None:
usage('model rf requires --hpd and --hpw')
else:
usage('bad --model: %s' % str(arg.model))
random_seed = 123
now = datetime.datetime.now()
predictors = make_predictors()
print 'number of predictors', len(predictors)
sale_date_range = make_DateRange(arg.sale_date, 15 if arg.month else 3)
log_file_name = arg.base_name + '.' + now.isoformat('T') + '.log'
# dir_out: WORKING/ege_[month|week]/<sale_date>/
dir_out = (directory('working') +
'ege_' +
('month' if arg.month else 'week') +
'/' + argv[1] + '/'
)
debug = False
test = arg.test
b = Bunch(
arg=arg,
census_adjacencies=CensusAdjacencies(),
date_column='python.sale_date',
debug=debug,
dir_out=dir_out,
n_folds=2 if test else 10,
n_rf_estimators=100 if test else 1000, # num trees in a random forest
path_in_old=directory('working') + 'transactions-subset2.pickle',
path_in=directory('working') + 'transactions-subset3-subset-train.csv',
path_log=directory('log') + log_file_name,
predictors=predictors,
price_column='SALE.AMOUNT',
random_seed=random_seed,
relevant_date_range=DateRange(first=datetime.date(2003, 1, 1), last=datetime.date(2009, 3, 31)),
sale_date_range=sale_date_range,
start_time=now,
test=test,
use_old_input=True,
)
return b
def elapsed_time(start_time):
return datetime.datetime.now() - start_time
def x(mode, df, predictors):
'''return 2D np.array, with df x values possibly transformed to log
RETURNS array: np.array 2D
'''
def transform(v, mode, transformation):
if mode is None:
return v
if mode == 'linear' or mode == 'lin':
return v
if mode == 'log':
if transformation is None:
return v
if transformation == 'log':
return np.log(v)
if transformation == 'log1p':
return np.log1p(v)
raise RuntimeError('bad transformation: ' + str(transformation))
raise RuntimeError('bad mode:' + str(mode))
array = np.empty(shape=(df.shape[0], len(predictors)),
dtype=np.float64).T
# build up in transposed form
index = 0
for predictor_name, transformation in predictors:
v = transform(df[predictor_name].values, mode, transformation)
array[index] = v
index += 1
return array.T
def y(mode, df, price_column):
'''return np.array 1D with transformed price column from df'''
df2 = df.copy(deep=True)
if mode == 'log':
df2[price_column] = pd.Series(np.log(df[price_column]), index=df.index)
array = np.array(df2[price_column].as_matrix(), np.float64)
return array
def mask_in_date_range(df, date_range):
df_date = df['sale.python_date']
return (df_date >= date_range.first) & (df_date <= date_range.last)
def samples_in_date_range(df, date_range):
'return new df'
return df[mask_in_date_range(df, date_range)]
def add_age(df, sale_date):
'Return new df with extra columns for age and effective age'
column_names = df.columns.tolist()
if 'age' in column_names:
print column_names
print 'age in column_names'
pdb.set_trace()
assert('age' not in column_names)
assert('age2' not in column_names)
assert('effective.age' not in column_names)
assert('effective.age2' not in column_names)
sale_year = df['sale.year']
def age(column_name):
'age from sale_date to specified column'
age_in_years = sale_year - df[column_name].values
return pd.Series(age_in_years, index=df.index)
result = df.copy(deep=True)
result['age'] = age('YEAR.BUILT')
result['effective.age'] = age('EFFECTIVE.YEAR.BUILT')
result['age2'] = result['age'] * result['age']
result['effective.age2'] = result['effective.age'] * result['effective.age']
return result
def squeeze(obj, verbose=False):
'replace np.array float64 with np.array float32'
if isinstance(obj, dict):
return {k: squeeze(v) for k, v in obj.iteritems()}
if isinstance(obj, np.ndarray) and obj.dtype == np.float64:
return np.array(obj, dtype=np.float32)
return obj
def make_weights(query, train_df, hpw, control):
'return numpy.array of weights for each sample'
if hpw == 1:
return np.ones(len(train_df))
else:
print 'bad hpw: %s' % hpw
def sweep_hp_lr(train_df, validate_df, control):
'sweep hyperparameters, fitting and predicting for each combination'
def x_matrix(df, transform):
augmented = add_age(df, control.arg.sale_date)
return x(transform, augmented, control.predictors)
def y_vector(df, transform):
return y(transform, df, control.price_column)
verbose = True
LR = linear_model.LinearRegression
results = {}
for hpx in control.arg.hpx:
for hpy in control.arg.hpy:
if verbose:
print 'sweep_hr_lr hpx %s hpy %s' % (hpx, hpy)
model = LR(fit_intercept=True,
normalize=True,
copy_X=False,
)
train_x = x_matrix(train_df, hpx)
train_y = y_vector(train_df, hpy)
model.fit(train_x, train_y)
estimates = model.predict(x_matrix(validate_df, hpx))
actuals = y_vector(validate_df, hpy)
attributes = {
'coef_': model.coef_,
'intercept_': model.intercept_
}
results[('y_transform', hpy), ('x_transform', hpx)] = squeeze({
'estimate': estimates,
'actual': actuals,
'attributes': attributes
})
return results
def sweep_hp_rf(train_df, validate_df, control):
'fit a model and validate a model for each hyperparameter'
def x_matrix(df):
augmented = add_age(df, control.arg.sale_date)
return x(None, augmented, control.predictors)
def y_vector(df):
return y(None, df, control.price_column)
verbose = True
RFR = ensemble.RandomForestRegressor
train_x = x_matrix(train_df)
train_y = y_vector(train_df)
results = {}
for hpd in control.arg.hpd:
for hpw in control.arg.hpw:
for validate_row_index in xrange(len(validate_df)):
if verbose:
print 'sweep_hp_rf hpd %d hpw %d validate_row_index %d of %d' % (
hpd, hpw, validate_row_index, len(validate_df))
validate_row = validate_df[validate_row_index: validate_row_index + 1]
model = RFR(n_estimators=control.n_rf_estimators, # number of trees
random_state=control.random_seed,
max_depth=hpd)
weights = make_weights(validate_row, train_df, hpw, control)
model.fit(train_x, train_y, weights)
estimate = squeeze(model.predict(x_matrix(validate_row))[0])
actual = squeeze(y_vector(validate_row)[0])
# Don't keep some attributes
# oob attributes are not produced because we didn't ask for them
# estimators_ contains a fitted model for each estimate
attributes = {
'feature_importances_': model.feature_importances_,
}
results[('max_depth', hpd), ('weight_scheme_index', hpw)] = squeeze({
'estimate': estimate,
'actual': actual,
'attributes': attributes,
})
return results
def cross_validate(df, control):
'produce estimated generalization errors'
verbose = True
results = {}
fold_number = -1
sale_dates_mask = mask_in_date_range(df, control.sale_date_range)
skf = cross_validation.StratifiedKFold(sale_dates_mask, control.n_folds)
for train_indices, validate_indices in skf:
fold_number += 1
fold_train_all = df.iloc[train_indices].copy(deep=True)
fold_validate_all = df.iloc[validate_indices].copy(deep=True)
for td in control.arg.td:
if verbose:
print 'cross_validate fold %d of %d training_days %d' % (
fold_number, control.n_folds, td)
fold_train = samples_in_date_range(
fold_train_all,
DateRange(first=control.arg.sale_date - datetime.timedelta(td),
last=control.arg.sale_date - datetime.timedelta(1))
)
fold_validate = samples_in_date_range(
fold_validate_all,
control.sale_date_range
)
if control.arg.model == 'lr':
d = sweep_hp_lr(fold_train, fold_validate, control)
elif control.arg.model == 'rf':
d = sweep_hp_rf(fold_train, fold_validate, control)
# d = cross_validate_rf(fold_train, fold_validate, control)
else:
| |
== 2:
kernel = gaussian(array.shape[1], reso)
kernel /= np.sum(kernel)
for i in range(array.shape[0]):
array[i] = fftconvolve(array[i], kernel, mode='same')
elif array.ndim == 1:
kernel = gaussian(array.size, reso)
kernel /= np.sum(kernel)
array = fftconvolve(array, kernel, mode='same')
else:
my_logger.error(f'\n\tArray dimension must be 1 or 2. Here I have array.ndim={array.ndim}.')
return array
def formatting_numbers(value, error_high, error_low, std=None, label=None):
"""Format a physical value and its uncertainties. Round the uncertainties
to the first significant digit, and do the same for the physical value.
Parameters
----------
value: float
The physical value.
error_high: float
Upper uncertainty.
error_low: float
Lower uncertainty
std: float, optional
The RMS of the physical parameter (default: None).
label: str, optional
The name of the physical parameter to output (default: None).
Returns
-------
text: tuple
The formatted output strings inside a tuple.
Examples
--------
>>> formatting_numbers(3., 0.789, 0.500, std=0.45, label='test')
('test', '3.0', '0.8', '0.5', '0.5')
>>> formatting_numbers(3., 0.07, 0.008, std=0.03, label='test')
('test', '3.000', '0.07', '0.008', '0.03')
>>> formatting_numbers(3240., 0.2, 0.4, std=0.3)
('3240.0', '0.2', '0.4', '0.3')
>>> formatting_numbers(3240., 230, 420, std=330)
('3240', '230', '420', '330')
>>> formatting_numbers(0, 0.008, 0.04, std=0.03)
('0.000', '0.008', '0.040', '0.030')
>>> formatting_numbers(-55, 0.008, 0.04, std=0.03)
('-55.000', '0.008', '0.04', '0.03')
"""
str_std = ""
out = []
if label is not None:
out.append(label)
power10 = min(int(floor(np.log10(np.abs(error_high)))), int(floor(np.log10(np.abs(error_low)))))
if np.isclose(0.0, float("%.*f" % (abs(power10), value))):
str_value = "%.*f" % (abs(power10), 0)
str_error_high = "%.*f" % (abs(power10), error_high)
str_error_low = "%.*f" % (abs(power10), error_low)
if std is not None:
str_std = "%.*f" % (abs(power10), std)
elif power10 > 0:
str_value = f"{value:.0f}"
str_error_high = f"{error_high:.0f}"
str_error_low = f"{error_low:.0f}"
if std is not None:
str_std = f"{std:.0f}"
else:
if int(floor(np.log10(np.abs(error_high)))) == int(floor(np.log10(np.abs(error_low)))):
str_value = "%.*f" % (abs(power10), value)
str_error_high = f"{error_high:.1g}"
str_error_low = f"{error_low:.1g}"
if std is not None:
str_std = f"{std:.1g}"
elif int(floor(np.log10(np.abs(error_high)))) > int(floor(np.log10(np.abs(error_low)))):
str_value = "%.*f" % (abs(power10), value)
str_error_high = f"{error_high:.2g}"
str_error_low = f"{error_low:.1g}"
if std is not None:
str_std = f"{std:.2g}"
else:
str_value = "%.*f" % (abs(power10), value)
str_error_high = f"{error_high:.1g}"
str_error_low = f"{error_low:.2g}"
if std is not None:
str_std = f"{std:.2g}"
out += [str_value, str_error_high]
# if not np.isclose(error_high, error_low):
out += [str_error_low]
if std is not None:
out += [str_std]
out = tuple(out)
return out
def pixel_rotation(x, y, theta, x0=0, y0=0):
"""Rotate a 2D vector (x,y) of an angle theta clockwise.
Parameters
----------
x: float
x coordinate
y: float
y coordinate
theta: float
angle in radians
x0: float, optional
x position of the center of rotation (default: 0)
y0: float, optional
y position of the center of rotation (default: 0)
Returns
-------
u: float
rotated x coordinate
v: float
rotated y coordinate
Examples
--------
>>> pixel_rotation(0, 0, 45)
(0.0, 0.0)
>>> u, v = pixel_rotation(1, 0, np.pi/4)
.. doctest::
:hide:
>>> assert np.isclose(u, 1/np.sqrt(2))
>>> assert np.isclose(v, -1/np.sqrt(2))
>>> u, v = pixel_rotation(1, 2, -np.pi/2, x0=1, y0=0)
>>> assert np.isclose(u, -2)
>>> assert np.isclose(v, 0)
"""
u = np.cos(theta) * (x - x0) + np.sin(theta) * (y - y0)
v = -np.sin(theta) * (x - x0) + np.cos(theta) * (y - y0)
return u, v
def detect_peaks(image):
"""
Takes an image and detect the peaks using the local maximum filter.
Returns a boolean mask of the peaks (i.e. 1 when
the pixel's value is the neighborhood maximum, 0 otherwise).
Only positive peaks are detected (take absolute value or negative value of the
image to detect the negative ones).
Parameters
----------
image: array_like
The image 2D array.
Returns
-------
detected_peaks: array_like
Boolean maskof the peaks.
Examples
--------
>>> im = np.zeros((50,50))
>>> im[4,6] = 2
>>> im[10,20] = -3
>>> im[49,49] = 1
>>> detected_peaks = detect_peaks(im)
.. doctest::
:hide:
>>> assert detected_peaks[4,6]
>>> assert not detected_peaks[10,20]
>>> assert detected_peaks[49,49]
"""
# define an 8-connected neighborhood
neighborhood = generate_binary_structure(2, 2)
# apply the local maximum filter; all pixel of maximal value
# in their neighborhood are set to 1
local_max = maximum_filter(image, footprint=neighborhood) == image
# local_max is a mask that contains the peaks we are
# looking for, but also the background.
# In order to isolate the peaks we must remove the background from the mask.
# we create the mask of the background
background = (image == 0)
# a little technicality: we must erode the background in order to
# successfully subtract it form local_max, otherwise a line will
# appear along the background border (artifact of the local maximum filter)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=50)
# we obtain the final mask, containing only peaks,
# by removing the background from the local_max mask (xor operation)
detected_peaks = local_max ^ eroded_background
return detected_peaks
def clean_target_spikes(data, saturation):
saturated_pixels = np.where(data > saturation)
data[saturated_pixels] = saturation
NY, NX = data.shape
delta = len(saturated_pixels[0])
while delta > 0:
delta = len(saturated_pixels[0])
grady, gradx = np.gradient(data)
for iy in range(1, NY - 1):
for ix in range(1, NX - 1):
# if grady[iy,ix] > 0.8*np.max(grady) :
# data[iy,ix] = data[iy-1,ix]
# if grady[iy,ix] < 0.8*np.min(grady) :
# data[iy,ix] = data[iy+1,ix]
if gradx[iy, ix] > 0.8 * np.max(gradx):
data[iy, ix] = data[iy, ix - 1]
if gradx[iy, ix] < 0.8 * np.min(gradx):
data[iy, ix] = data[iy, ix + 1]
saturated_pixels = np.where(data >= saturation)
delta = delta - len(saturated_pixels[0])
return data
def plot_image_simple(ax, data, scale="lin", title="", units="Image units", cmap=None,
target_pixcoords=None, vmin=None, vmax=None, aspect=None, cax=None):
"""Simple function to plot a spectrum with error bars and labels.
Parameters
----------
ax: Axes
Axes instance to make the plot
data: array_like
The image data 2D array.
scale: str
Scaling of the image (choose between: lin, log or log10, symlog) (default: lin)
title: str
Title of the image (default: "")
units: str
Units of the image to be written in the color bar label (default: "Image units")
cmap: colormap
Color map label (default: None)
target_pixcoords: array_like, optional
2D array giving the (x,y) coordinates of the targets on the image: add a scatter plot (default: None)
vmin: float
Minimum value of the image (default: None)
vmax: float
Maximum value of the image (default: None)
aspect: str
Aspect keyword to be passed to imshow (default: None)
cax: Axes, optional
Color bar axes if necessary (default: None).
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from spectractor.extractor.images import Image
>>> from spectractor import parameters
>>> from spectractor.tools import plot_image_simple
>>> f, ax = plt.subplots(1,1)
>>> im = Image('tests/data/reduc_20170605_028.fits', config="./config/ctio.ini")
>>> plot_image_simple(ax, im.data, scale="symlog", units="ADU", target_pixcoords=(815,580),
... title="tests/data/reduc_20170605_028.fits")
>>> if parameters.DISPLAY: plt.show()
"""
if scale == "log" or scale == "log10":
# removes the zeros and negative pixels first
zeros = np.where(data <= 0)
min_noz = np.min(data[np.where(data > 0)])
data[zeros] = min_noz
# apply log
# data = np.log10(data)
if scale == "log10" or scale == "log":
norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax)
elif scale == "symlog":
norm = matplotlib.colors.SymLogNorm(vmin=vmin, vmax=vmax, linthresh=10, base=10)
else:
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
im = ax.imshow(data, origin='lower', cmap=cmap, norm=norm, aspect=aspect)
ax.grid(color='silver', ls='solid')
ax.grid(True)
ax.set_xlabel(parameters.PLOT_XLABEL)
ax.set_ylabel(parameters.PLOT_YLABEL)
cb = plt.colorbar(im, ax=ax, cax=cax)
if scale == "lin":
cb.formatter.set_powerlimits((0, 0))
cb.locator = MaxNLocator(7, prune=None)
cb.update_ticks()
cb.set_label('%s (%s scale)' % (units, scale)) # ,fontsize=16)
if title != "":
ax.set_title(title)
if target_pixcoords is not None:
ax.scatter(target_pixcoords[0], target_pixcoords[1], marker='o', s=100, edgecolors='k', facecolors='none',
label='Target', linewidth=2)
def plot_spectrum_simple(ax, lambdas, data, data_err=None, xlim=None, color='r', linestyle='none', lw=2, label='',
title='', units=''):
"""Simple function to plot a spectrum with error bars and labels.
Parameters
----------
ax: Axes
Axes instance to make the plot.
lambdas: array
The wavelengths array.
data: array
The spectrum data array.
data_err: array, optional
The spectrum uncertainty array (default: None).
xlim: list, optional
List of minimum and maximum abscisses (default: None).
color: str, optional
String for the color of the spectrum (default: 'r').
linestyle: str, optional
String for the linestyle of the spectrum (default: 'none').
lw: int, optional
Integer for line width (default: 2).
label: str, optional
String label for the plot legend (default: '').
title: str, optional
String label for the plot title (default: '').
units: str, optional
String label | |
from __future__ import absolute_import
from __future__ import print_function
import math
import functools
from collections import OrderedDict
import veriloggen.core.vtypes as vtypes
import veriloggen.types.axi as axi
from veriloggen.fsm.fsm import FSM
from veriloggen.optimizer import try_optimize as optimize
from .ttypes import _MutexFunction
from .ram import RAM, MultibankRAM, to_multibank_ram
from .fifo import FIFO
class AXIStreamIn(axi.AxiStreamIn, _MutexFunction):
""" AXI Stream Interface for Input """
__intrinsics__ = ('read',
'dma_read', 'dma_read_async',
'dma_wait_read')
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
noio=False,
op_sel_width=8, req_fifo_addrwidth=3, fsm_as_module=False):
axi.AxiStreamIn.__init__(self, m, name, clk, rst, datawidth,
with_last, with_strb,
id_width, user_width, dest_width,
noio)
self.addrwidth = addrwidth
self.op_sel_width = op_sel_width
self.req_fifo_addrwidth = req_fifo_addrwidth
self.fsm_as_module = fsm_as_module
self.mutex = None
# Read
self.read_req_fifo = FIFO(self.m, '_'.join(['', self.name, 'read_req_fifo']),
self.clk, self.rst,
datawidth=self.op_sel_width + self.addrwidth * 3 + 1,
addrwidth=self.req_fifo_addrwidth,
sync=False)
self.read_op_sel_fifo = self.m.Wire('_'.join(['', self.name,
'read_op_sel_fifo']),
self.op_sel_width)
self.read_local_addr_fifo = self.m.Wire('_'.join(['', self.name,
'read_local_addr_fifo']),
self.addrwidth)
self.read_local_stride_fifo = self.m.Wire('_'.join(['', self.name,
'read_local_stride_fifo']),
self.addrwidth)
self.read_local_size_fifo = self.m.Wire('_'.join(['', self.name,
'read_local_size_fifo']),
self.addrwidth + 1)
read_unpack_values = self.unpack_read_req(self.read_req_fifo.rdata)
self.read_op_sel_fifo.assign(read_unpack_values[0])
self.read_local_addr_fifo.assign(read_unpack_values[1])
self.read_local_stride_fifo.assign(read_unpack_values[2])
self.read_local_size_fifo.assign(read_unpack_values[3])
self.read_op_sel_buf = self.m.Reg('_'.join(['', self.name,
'read_op_sel_buf']),
self.op_sel_width, initval=0)
self.read_local_addr_buf = self.m.Reg('_'.join(['', self.name,
'read_local_addr_buf']),
self.addrwidth, initval=0)
self.read_local_stride_buf = self.m.Reg('_'.join(['', self.name,
'read_local_stride_buf']),
self.addrwidth, initval=0)
self.read_local_size_buf = self.m.Reg('_'.join(['', self.name,
'read_local_size_buf']),
self.addrwidth + 1, initval=0)
self.read_data_idle = self.m.Reg(
'_'.join(['', self.name, 'read_data_idle']), initval=1)
self.read_idle = self.m.Wire('_'.join(['', self.name, 'read_idle']))
self.read_idle.assign(vtypes.Ands(self.read_req_fifo.empty, self.read_data_idle))
self.read_op_id_map = OrderedDict()
self.read_op_id_count = 1
self.read_ops = []
self.read_data_fsm = None
self.read_data_narrow_fsm = None
self.read_data_wide_fsm = None
def read(self, fsm):
# state 0
self.seq.If(fsm.here, self.read_data_idle)(
self.read_data_idle(0)
)
fsm.If(self.read_data_idle).goto_next()
# state 1
rcond = fsm.here
tdata = self.m.TmpReg(self.datawidth, initval=0,
signed=True, prefix='axistreamin_tdata')
if self.tdata.tlast is not None:
tlast = self.m.TmpReg(initval=0,
prefix='axistreamin_tlast')
else:
tlast = None
_ = self.read_data(cond=rcond)
fsm.If(self.tdata.tvalid)(
tdata(self.tdata.tdata),
)
if self.tdata.tlast is not None:
fsm.If(self.tdata.tvalid)(
tlast(self.tdata.tlast),
)
self.seq.If(fsm.here, self.tdata.tvalid)(
self.read_data_idle(1)
)
fsm.If(self.tdata.tvalid).goto_next()
return tdata, tlast
def dma_read(self, fsm, ram, local_addr, size,
local_stride=1, port=0):
self._dma_read(fsm, ram, local_addr, size,
local_stride, port)
self.dma_wait_read(fsm)
def dma_read_async(self, fsm, ram, local_addr, size,
local_stride=1, port=0):
self._dma_read(fsm, ram, local_addr, size,
local_stride, port)
def dma_wait_read(self, fsm):
fsm.If(self.read_idle).goto_next()
def _dma_read(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
if isinstance(ram, (tuple, list)):
ram = to_multibank_ram(ram)
if not isinstance(ram, (RAM, MultibankRAM)):
raise TypeError('RAM object is required.')
if ram_method is None:
ram_method = getattr(ram, 'write_burst')
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
ram_datawidth = (ram.packed_datawidth if 'packed' in ram_method_name else
ram.rams[0].packed_datawidth if 'block' in ram_method_name else
ram.datawidth)
if not isinstance(self.datawidth, int):
raise TypeError("axi.datawidth must be int, not '%s'" %
str(type(self.datawidth)))
if not isinstance(ram_datawidth, int):
raise TypeError("ram_datawidth must be int, not '%s'" %
str(type(ram_datawidth)))
start = fsm.here
self._set_read_request(ram, port, ram_method, ram_datawidth,
start, local_addr, size, local_stride)
self._synthesize_read_data_fsm(ram, port, ram_method, ram_datawidth)
fsm.If(vtypes.Not(self.read_req_fifo.almost_full)).goto_next()
def _set_read_request(self, ram, port, ram_method, ram_datawidth,
start, local_addr, size, local_stride):
local_size = size
op_id = self._get_read_op_id(ram, port, ram_method)
enq_cond = vtypes.Ands(start,
vtypes.Not(self.read_req_fifo.almost_full))
_ = self.read_req_fifo.enq_rtl(self.pack_read_req(op_id,
local_addr,
local_stride,
local_size),
cond=enq_cond)
def _synthesize_read_data_fsm(self, ram, port, ram_method, ram_datawidth):
if self.datawidth == ram_datawidth:
return self._synthesize_read_data_fsm_same(ram, port, ram_method, ram_datawidth)
if self.datawidth < ram_datawidth:
return self._synthesize_read_data_fsm_narrow(ram, port, ram_method, ram_datawidth)
return self._synthesize_read_data_fsm_wide(ram, port, ram_method, ram_datawidth)
def _synthesize_read_data_fsm_same(self, ram, port, ram_method, ram_datawidth):
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
self.read_ops.append(op_id)
# Data FSM
if self.read_data_fsm is not None:
""" new op """
data_fsm = self.read_data_fsm
data_fsm.set_index(0)
else:
data_fsm = FSM(self.m, '_'.join(['', self.name, 'read_data_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_data_fsm = data_fsm
# Data state 0
cond = vtypes.Ands(self.read_data_idle,
vtypes.Not(self.read_req_fifo.empty),
self.read_op_sel_fifo == op_id)
self.seq.If(data_fsm.here, cond)(
self.read_data_idle(0),
self.read_op_sel_buf(self.read_op_sel_fifo),
self.read_local_addr_buf(self.read_local_addr_fifo),
self.read_local_stride_buf(self.read_local_stride_fifo),
self.read_local_size_buf(self.read_local_size_fifo),
)
deq_cond = vtypes.Ands(data_fsm.here, cond)
_ = self.read_req_fifo.deq_rtl(cond=deq_cond)
data_fsm.If(cond).goto_next()
# Data state 1
ram_cond = vtypes.Ands(data_fsm.here, self.read_op_sel_buf == op_id)
ram_method(self.read_local_addr_buf, self.read_local_stride_buf,
self.read_local_size_buf, 1,
self.tdata.tdata, self.tdata.tvalid, False,
port=port, cond=ram_cond)
data_fsm.goto_next()
# Data state 2
_ = self.read_data(cond=data_fsm)
self.seq.If(data_fsm.here, self.tdata.tvalid)(
self.read_local_size_buf.dec()
)
data_fsm.If(self.tdata.tvalid, self.read_local_size_buf <= 1).goto_init()
self.seq.If(data_fsm.here, self.tdata.tvalid, self.read_local_size_buf <= 1)(
self.read_data_idle(1)
)
def _synthesize_read_data_fsm_narrow(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth < ram.datawidth """
if ram_datawidth % self.datawidth != 0:
raise ValueError(
'ram_datawidth must be multiple number of axi.datawidth')
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
pack_size = ram_datawidth // self.datawidth
log_pack_size = int(math.log(pack_size, 2))
if op_id in self.read_ops:
""" already synthesized op """
return
self.read_ops.append(op_id)
# Data FSM
if self.read_data_narrow_fsm is not None:
""" new op """
data_fsm = self.read_data_narrow_fsm
data_fsm.set_index(0)
else:
data_fsm = FSM(self.m, '_'.join(['', self.name, 'read_data_narrow_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_data_narrow_fsm = data_fsm
# Data state 0
cond = vtypes.Ands(self.read_data_idle,
vtypes.Not(self.read_req_fifo.empty),
self.read_op_sel_fifo == op_id)
self.seq.If(data_fsm.here, cond)(
self.read_data_idle(0),
self.read_op_sel_buf(self.read_op_sel_fifo),
self.read_local_addr_buf(self.read_local_addr_fifo),
self.read_local_stride_buf(self.read_local_stride_fifo),
self.read_local_size_buf(self.read_local_size_fifo),
)
deq_cond = vtypes.Ands(data_fsm.here, cond)
_ = self.read_req_fifo.deq_rtl(cond=deq_cond)
data_fsm.If(cond).goto_next()
# Data state 1
ram_cond = vtypes.Ands(data_fsm.here, self.read_op_sel_buf == op_id)
wdata = self.m.TmpReg(ram_datawidth, initval=0,
prefix='_'.join(['', self.name, 'read_narrow_wdata']))
wvalid = self.m.TmpReg(initval=0, prefix='_'.join(['', self.name, 'read_narrow_wvalid']))
count = self.m.TmpReg(log_pack_size, initval=0,
prefix='_'.join(['', self.name, 'read_narrow_count']))
ram_method(self.read_local_addr_buf, self.read_local_stride_buf,
self.read_local_size_buf, 1,
wdata, wvalid, False,
port=port, cond=ram_cond)
data_fsm(
count(0),
wvalid(0)
)
data_fsm.goto_next()
# Data state 2
_ = self.read_data(cond=data_fsm)
cond = self.read_op_sel_buf == op_id
data_fsm.If(cond)(
wvalid(0)
)
data_fsm.If(cond, self.tdata.tvalid, count < pack_size - 1)(
count.inc(),
wdata(vtypes.Cat(self.tdata.tdata, wdata[self.datawidth:])),
wvalid(0),
)
data_fsm.If(cond, self.tdata.tvalid, count == pack_size - 1)(
count(0),
wdata(vtypes.Cat(self.tdata.tdata, wdata[self.datawidth:])),
wvalid(1)
)
self.seq.If(data_fsm.here, cond, self.tdata.tvalid, count == pack_size - 1)(
self.read_local_size_buf.dec()
)
data_fsm.If(cond, self.tdata.tvalid, self.read_local_size_buf <= 1,
count == pack_size - 1).goto_init()
self.seq.If(data_fsm.here, cond, self.tdata.tvalid, self.read_local_size_buf <= 1,
count == pack_size - 1)(
self.read_data_idle(1)
)
def _synthesize_read_data_fsm_wide(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth > ram.datawidth """
if self.datawidth % ram_datawidth != 0:
raise ValueError(
'axi.datawidth must be multiple number of ram_datawidth')
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
pack_size = self.datawidth // ram_datawidth
log_pack_size = int(math.log(pack_size, 2))
if op_id in self.read_ops:
""" already synthesized op """
return
self.read_ops.append(op_id)
# Data FSM
if self.read_data_wide_fsm is not None:
""" new op """
data_fsm = self.read_data_wide_fsm
data_fsm.set_index(0)
else:
data_fsm = FSM(self.m, '_'.join(['', self.name, 'read_data_wide_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_data_wide_fsm = data_fsm
# Data state 0
cond = vtypes.Ands(self.read_data_idle,
vtypes.Not(self.read_req_fifo.empty),
self.read_op_sel_fifo == op_id)
self.seq.If(data_fsm.here, cond)(
self.read_data_idle(0),
self.read_op_sel_buf(self.read_op_sel_fifo),
self.read_local_addr_buf(self.read_local_addr_fifo),
self.read_local_stride_buf(self.read_local_stride_fifo),
self.read_local_size_buf(self.read_local_size_fifo),
)
deq_cond = vtypes.Ands(data_fsm.here, cond)
_ = self.read_req_fifo.deq_rtl(cond=deq_cond)
data_fsm.If(cond).goto_next()
# Data state 1
ram_cond = vtypes.Ands(data_fsm.here, self.read_op_sel_buf == op_id)
wdata = self.m.TmpReg(self.datawidth, initval=0,
prefix='_'.join(['', self.name, 'read_wide_wdata']))
wvalid = self.m.TmpReg(initval=0, prefix='_'.join(['', self.name, 'read_wide_wvalid']))
count = self.m.TmpReg(log_pack_size, initval=0,
prefix='_'.join(['', self.name, 'read_wide_count']))
_wdata = wdata[:ram_datawidth]
ram_method(self.read_local_addr_buf, self.read_local_stride_buf,
self.read_local_size_buf, 1,
_wdata, wvalid, False,
port=port, cond=ram_cond)
data_fsm(
count(0),
wvalid(0)
)
data_fsm.goto_next()
# Data state 2
cond = self.read_op_sel_buf == op_id
rcond = vtypes.Ands(data_fsm.here, cond, count == 0)
_ = self.read_data(cond=rcond)
data_fsm.If(cond)(
wvalid(0)
)
data_fsm.If(cond, self.tdata.tvalid, count == 0)(
count.inc(),
wdata(self.tdata.tdata),
wvalid(1),
)
self.seq.If(data_fsm.here, cond, self.tdata.tvalid, count == 0)(
self.read_local_size_buf.dec()
)
data_fsm.If(cond, count > 0)(
count.inc(),
wdata(wdata >> ram_datawidth),
wvalid(1),
)
self.seq.If(data_fsm.here, cond, count > 0)(
self.read_local_size_buf.dec()
)
data_fsm.If(cond, count == pack_size - 1)(
count(0)
)
data_fsm.If(self.read_local_size_buf <= 1,
cond, count > 0).goto_init()
data_fsm.If(self.read_local_size_buf <= 1,
cond, self.tdata.tvalid, count == 0).goto_init()
self.seq.If(data_fsm.here, self.read_local_size_buf <= 1,
cond, count > 0)(
self.read_data_idle(1)
)
self.seq.If(data_fsm.here, self.read_local_size_buf <= 1,
cond, self.tdata.tvalid, count == 0)(
self.read_data_idle(1)
)
def _set_flag(self, fsm, prefix='axistreamin_flag'):
flag = self.m.TmpWire(prefix=prefix)
flag.assign(fsm.here)
return flag
def _get_read_op_id(self, ram, port, ram_method):
ram_id = ram._id()
port = vtypes.to_int(port)
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
op = (ram_id, port, ram_method_name)
if op in self.read_op_id_map:
op_id = self.read_op_id_map[op]
else:
op_id = self.read_op_id_count
self.read_op_id_count += 1
self.read_op_id_map[op] = op_id
return op_id
def pack_read_req(self, op_sel, local_addr, local_stride, local_size):
_op_sel = self.m.TmpWire(self.op_sel_width, prefix='pack_read_req_op_sel')
_local_addr = self.m.TmpWire(self.addrwidth, prefix='pack_read_req_local_addr')
_local_stride = self.m.TmpWire(self.addrwidth, prefix='pack_read_req_local_stride')
_local_size = self.m.TmpWire(self.addrwidth + 1, prefix='pack_read_req_local_size')
_op_sel.assign(op_sel)
_local_addr.assign(local_addr)
_local_stride.assign(local_stride)
_local_size.assign(local_size)
packed = self.m.TmpWire(self.op_sel_width + self.addrwidth * 3 + 1,
prefix='pack_read_req_packed')
packed.assign(vtypes.Cat(_op_sel, _local_addr, _local_stride, _local_size))
return packed
def unpack_read_req(self, v):
op_sel = v[self.addrwidth * 3 + 1:self.addrwidth * 3 + 1 + self.op_sel_width]
local_addr = v[self.addrwidth * 2 + 1:self.addrwidth * 2 + 1 + self.addrwidth]
local_stride = v[self.addrwidth + 1:self.addrwidth + 1 + self.addrwidth]
local_size = v[0:self.addrwidth + 1]
_op_sel = self.m.TmpWire(self.op_sel_width, prefix='unpack_read_req_op_sel')
_local_addr = self.m.TmpWire(self.addrwidth, prefix='unpack_read_req_local_addr')
_local_stride = self.m.TmpWire(self.addrwidth, prefix='unpack_read_req_local_stride')
_local_size = self.m.TmpWire(self.addrwidth + 1, prefix='unpack_read_req_local_size')
_op_sel.assign(op_sel)
_local_addr.assign(local_addr)
_local_stride.assign(local_stride)
_local_size.assign(local_size)
return _op_sel, _local_addr, _local_stride, _local_size
class AXIStreamInFifo(AXIStreamIn):
""" AXI Stream Interface to FIFO for Input """
__intrinsics__ = ('read',
'dma_read', 'dma_read_async',
'dma_wait_read')
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
noio=False,
op_sel_width=8, req_fifo_addrwidth=3, fsm_as_module=False):
axi.AxiStreamIn.__init__(self, m, name, clk, rst, datawidth,
with_last, with_strb,
id_width, user_width, dest_width,
noio)
self.addrwidth = addrwidth
self.op_sel_width | |
<reponame>MrMao/allennlp
from typing import List, Dict, Tuple, Set, Callable
from copy import copy
import numpy
from nltk import ngrams
from parsimonious.grammar import Grammar
from parsimonious.expressions import Expression, OneOf, Sequence, Literal
from allennlp.semparse.contexts.atis_tables import * # pylint: disable=wildcard-import,unused-wildcard-import
from allennlp.semparse.contexts.atis_sql_table_context import AtisSqlTableContext, KEYWORDS
from allennlp.semparse.contexts.sql_context_utils import SqlVisitor, format_action, initialize_valid_actions
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
def get_strings_from_utterance(tokenized_utterance: List[Token]) -> Dict[str, List[int]]:
"""
Based on the current utterance, return a dictionary where the keys are the strings in
the database that map to lists of the token indices that they are linked to.
"""
string_linking_scores: Dict[str, List[int]] = defaultdict(list)
for index, token in enumerate(tokenized_utterance):
for string in ATIS_TRIGGER_DICT.get(token.text.lower(), []):
string_linking_scores[string].append(index)
bigrams = ngrams([token.text for token in tokenized_utterance], 2)
for index, bigram in enumerate(bigrams):
for string in ATIS_TRIGGER_DICT.get(' '.join(bigram).lower(), []):
string_linking_scores[string].extend([index,
index + 1])
trigrams = ngrams([token.text for token in tokenized_utterance], 3)
for index, trigram in enumerate(trigrams):
for string in ATIS_TRIGGER_DICT.get(' '.join(trigram).lower(), []):
string_linking_scores[string].extend([index,
index + 1,
index + 2])
return string_linking_scores
class AtisWorld():
"""
World representation for the Atis SQL domain. This class has a ``SqlTableContext`` which holds the base
grammar, it then augments this grammar by constraining each column to the values that are allowed in it.
Parameters
----------
utterances: ``List[str]``
A list of utterances in the interaction, the last element in this list is the
current utterance that we are interested in.
tokenizer: ``Tokenizer``, optional (default=``WordTokenizer()``)
We use this tokenizer to tokenize the utterances.
"""
database_file = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/atis/atis.db"
sql_table_context = None
def __init__(self,
utterances: List[str],
tokenizer: Tokenizer = None) -> None:
if AtisWorld.sql_table_context is None:
AtisWorld.sql_table_context = AtisSqlTableContext(ALL_TABLES,
TABLES_WITH_STRINGS,
AtisWorld.database_file)
self.utterances: List[str] = utterances
self.tokenizer = tokenizer if tokenizer else WordTokenizer()
self.tokenized_utterances = [self.tokenizer.tokenize(utterance) for utterance in self.utterances]
self.linked_entities = self._get_linked_entities()
self.dates = self._get_dates()
entities, linking_scores = self._flatten_entities()
# This has shape (num_entities, num_utterance_tokens).
self.linking_scores: numpy.ndarray = linking_scores
self.entities: List[str] = entities
self.grammar: Grammar = self._update_grammar()
self.valid_actions = initialize_valid_actions(self.grammar,
KEYWORDS)
def _update_grammar(self):
"""
We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also
has the new entities that are extracted from the utterance. Stitching together the expressions
to form the grammar is a little tedious here, but it is worth it because we don't have to create
a new grammar from scratch. Creating a new grammar is expensive because we have many production
rules that have all database values in the column on the right hand side. We update the expressions
bottom up, since the higher level expressions may refer to the lower level ones. For example, the
ternary expression will refer to the start and end times.
"""
# This will give us a shallow copy. We have to be careful here because the ``Grammar`` object
# contains ``Expression`` objects that have tuples containing the members of that expression.
# We have to create new sub-expression objects so that original grammar is not mutated.
new_grammar = copy(AtisWorld.sql_table_context.grammar)
numbers = self._get_numeric_database_values('number')
number_literals = [Literal(number) for number in numbers]
new_grammar['number'] = OneOf(*number_literals, name='number')
self._update_expression_reference(new_grammar, 'pos_value', 'number')
time_range_start = self._get_numeric_database_values('time_range_start')
time_range_start_literals = [Literal(time) for time in time_range_start]
new_grammar['time_range_start'] = OneOf(*time_range_start_literals, name='time_range_start')
time_range_end = self._get_numeric_database_values('time_range_end')
time_range_end_literals = [Literal(time) for time in time_range_end]
new_grammar['time_range_end'] = OneOf(*time_range_end_literals, name='time_range_end')
ternary_expressions = [self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']]),
self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('NOT'),
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']]),
self._get_sequence_with_spacing(new_grammar,
[new_grammar['col_ref'],
Literal('not'),
Literal('BETWEEN'),
new_grammar['time_range_start'],
Literal(f'AND'),
new_grammar['time_range_end']])]
new_grammar['ternaryexpr'] = OneOf(*ternary_expressions, name='ternaryexpr')
self._update_expression_reference(new_grammar, 'condition', 'ternaryexpr')
if self.dates:
new_binary_expressions = []
year_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('year'),
new_grammar['binaryop'],
Literal(f'{self.dates[0].year}')])
new_binary_expressions.append(year_binary_expression)
for date in self.dates:
month_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('month_number'),
new_grammar['binaryop'],
Literal(f'{date.month}')])
day_binary_expression = self._get_sequence_with_spacing(new_grammar,
[Literal('date_day'),
Literal('.'),
Literal('day_number'),
new_grammar['binaryop'],
Literal(f'{date.day}')])
new_binary_expressions.extend([month_binary_expression,
day_binary_expression])
new_binary_expressions = new_binary_expressions + list(new_grammar['biexpr'].members)
new_grammar['biexpr'] = OneOf(*new_binary_expressions, name='biexpr')
self._update_expression_reference(new_grammar, 'condition', 'biexpr')
return new_grammar
def _get_numeric_database_values(self,
nonterminal: str) -> List[str]:
return sorted([value[1] for key, value in self.linked_entities['number'].items()
if value[0] == nonterminal], reverse=True)
def _update_expression_reference(self, # pylint: disable=no-self-use
grammar: Grammar,
parent_expression_nonterminal: str,
child_expression_nonterminal: str) -> None:
"""
When we add a new expression, there may be other expressions that refer to
it, and we need to update those to point to the new expression.
"""
grammar[parent_expression_nonterminal].members = \
[member if member.name != child_expression_nonterminal
else grammar[child_expression_nonterminal]
for member in grammar[parent_expression_nonterminal].members]
def _get_sequence_with_spacing(self, # pylint: disable=no-self-use
new_grammar,
expressions: List[Expression],
name: str = '') -> Sequence:
"""
This is a helper method for generating sequences, since we often want a list of expressions
with whitespaces between them.
"""
expressions = [subexpression
for expression in expressions
for subexpression in (expression, new_grammar['ws'])]
return Sequence(*expressions, name=name)
def get_valid_actions(self) -> Dict[str, List[str]]:
return self.valid_actions
def add_to_number_linking_scores(self,
all_numbers: Set[str],
number_linking_scores: Dict[str, Tuple[str, str, List[int]]],
get_number_linking_dict: Callable[[str, List[Token]],
Dict[str, List[int]]],
current_tokenized_utterance: List[Token],
nonterminal: str) -> None:
"""
This is a helper method for adding different types of numbers (eg. starting time ranges) as entities.
We first go through all utterances in the interaction and find the numbers of a certain type and add
them to the set ``all_numbers``, which is initialized with default values. We want to add all numbers
that occur in the interaction, and not just the current turn because the query could contain numbers
that were triggered before the current turn. For each entity, we then check if it is triggered by tokens
in the current utterance and construct the linking score.
"""
number_linking_dict: Dict[str, List[int]] = {}
for utterance, tokenized_utterance in zip(self.utterances, self.tokenized_utterances):
number_linking_dict = get_number_linking_dict(utterance, tokenized_utterance)
all_numbers.update(number_linking_dict.keys())
all_numbers_list: List[str] = sorted(all_numbers, reverse=True)
for number in all_numbers_list:
entity_linking = [0 for token in current_tokenized_utterance]
# ``number_linking_dict`` is for the last utterance here. If the number was triggered
# before the last utterance, then it will have linking scores of 0's.
for token_index in number_linking_dict.get(number, []):
if token_index < len(entity_linking):
entity_linking[token_index] = 1
action = format_action(nonterminal, number, is_number=True, keywords_to_uppercase=KEYWORDS)
number_linking_scores[action] = (nonterminal, number, entity_linking)
def _get_linked_entities(self) -> Dict[str, Dict[str, Tuple[str, str, List[int]]]]:
"""
This method gets entities from the current utterance finds which tokens they are linked to.
The entities are divided into two main groups, ``numbers`` and ``strings``. We rely on these
entities later for updating the valid actions and the grammar.
"""
current_tokenized_utterance = [] if not self.tokenized_utterances \
else self.tokenized_utterances[-1]
# We generate a dictionary where the key is the type eg. ``number`` or ``string``.
# The value is another dictionary where the key is the action and the value is a tuple
# of the nonterminal, the string value and the linking score.
entity_linking_scores: Dict[str, Dict[str, Tuple[str, str, List[int]]]] = {}
number_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {}
string_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {}
# Get time range start
self.add_to_number_linking_scores({'0'},
number_linking_scores,
get_time_range_start_from_utterance,
current_tokenized_utterance,
'time_range_start')
self.add_to_number_linking_scores({"1200"},
number_linking_scores,
get_time_range_end_from_utterance,
current_tokenized_utterance,
'time_range_end')
self.add_to_number_linking_scores({'0', '1'},
number_linking_scores,
get_numbers_from_utterance,
current_tokenized_utterance,
'number')
# Add string linking dict.
string_linking_dict: Dict[str, List[int]] = {}
for tokenized_utterance in self.tokenized_utterances:
string_linking_dict = get_strings_from_utterance(tokenized_utterance)
strings_list = AtisWorld.sql_table_context.strings_list
# We construct the linking scores for strings from the ``string_linking_dict`` here.
for string in strings_list:
entity_linking = [0 for token in current_tokenized_utterance]
# string_linking_dict has the strings and linking scores from the last utterance.
# If the string is not in the last utterance, then the linking scores will be all 0.
for token_index in string_linking_dict.get(string[1], []):
entity_linking[token_index] = 1
action = string[0]
string_linking_scores[action] = (action.split(' -> ')[0], string[1], entity_linking)
entity_linking_scores['number'] = number_linking_scores
entity_linking_scores['string'] = string_linking_scores
return entity_linking_scores
def _get_dates(self):
dates = []
for tokenized_utterance in self.tokenized_utterances:
dates.extend(get_date_from_utterance(tokenized_utterance))
return dates
def get_action_sequence(self, query: str) -> List[str]:
sql_visitor = SqlVisitor(self.grammar, keywords_to_uppercase=KEYWORDS)
if query:
action_sequence = sql_visitor.parse(query)
return action_sequence
return []
def all_possible_actions(self) -> List[str]:
"""
Return a sorted list of strings representing all possible actions
of the form: nonterminal -> [right_hand_side]
"""
all_actions = set()
for _, action_list in self.valid_actions.items():
for action in action_list:
all_actions.add(action)
return sorted(all_actions)
def _flatten_entities(self) -> Tuple[List[str], numpy.ndarray]:
"""
When we first get the entities and the linking scores in ``_get_linked_entities``
we represent as dictionaries for easier updates to the grammar and valid actions.
In this method, we flatten them for the model so that the entities are represented as
a list, and the | |
import sys, subprocess, os, re, time, getopt, shlex
import lldb
from functools import wraps
from ctypes import c_ulonglong as uint64_t
from ctypes import c_void_p as voidptr_t
import code
import core
from core import caching
from core.standard import *
from core.configuration import *
from core.kernelcore import *
from utils import *
from core.lazytarget import *
MODULE_NAME=__name__
""" Kernel Debugging macros for lldb.
Please make sure you read the README COMPLETELY BEFORE reading anything below.
It is very critical that you read coding guidelines in Section E in README file.
"""
# End Utility functions
# Debugging specific utility functions
#decorators. Not to be called directly.
def static_var(var_name, initial_value):
def _set_var(obj):
setattr(obj, var_name, initial_value)
return obj
return _set_var
def header(initial_value):
def _set_header(obj):
setattr(obj, 'header', initial_value)
return obj
return _set_header
# holds type declarations done by xnu.
#DONOTTOUCHME: Exclusive use of lldb_type_summary only.
lldb_summary_definitions = {}
def lldb_type_summary(types_list):
""" A function decorator to register a summary for a type in lldb.
params: types_list - [] an array of types that you wish to register a summary callback function. (ex. ['task *', 'task_t'])
returns: Nothing. This is a decorator.
"""
def _get_summary(obj):
def _internal_summary_function(lldbval, internal_dict):
out_string= ""
if internal_dict != None and len(obj.header) > 0 :
out_string += "\n" + obj.header +"\n"
out_string += obj( core.value(lldbval) )
return out_string
myglobals = globals()
summary_function_name = "LLDBSummary" + obj.__name__
myglobals[summary_function_name] = _internal_summary_function
summary_function = myglobals[summary_function_name]
summary_function.__doc__ = obj.__doc__
global lldb_summary_definitions
for single_type in types_list:
if config['showTypeSummary']:
if single_type in lldb_summary_definitions.keys():
lldb.debugger.HandleCommand("type summary delete --category kernel \""+ single_type + "\"")
lldb.debugger.HandleCommand("type summary add \""+ single_type +"\" --category kernel --python-function " + MODULE_NAME + "." + summary_function_name)
lldb_summary_definitions[single_type] = obj
return obj
return _get_summary
#global cache of documentation for lldb commands exported by this module
#DONOTTOUCHME: Exclusive use of lldb_command only.
lldb_command_documentation = {}
def lldb_command(cmd_name, option_string = ''):
""" A function decorator to define a command with namd 'cmd_name' in the lldb scope to call python function.
params: cmd_name - str : name of command to be set in lldb prompt.
option_string - str: getopt like option string. Only CAPITAL LETTER options allowed.
see README on Customizing command options.
"""
if option_string != option_string.upper():
raise RuntimeError("Cannot setup command with lowercase option args. %s" % option_string)
def _cmd(obj):
def _internal_command_function(debugger, command, result, internal_dict):
global config, lldb_run_command_state
stream = CommandOutput(result)
# need to avoid printing on stdout if called from lldb_run_command.
if 'active' in lldb_run_command_state and lldb_run_command_state['active']:
debuglog('Running %s from lldb_run_command' % command)
else:
result.SetImmediateOutputFile(sys.__stdout__)
command_args = shlex.split(command)
lldb.debugger.HandleCommand('type category disable kernel' )
def_verbose_level = config['verbosity']
try:
stream.setOptions(command_args, option_string)
if stream.verbose_level != 0:
config['verbosity'] += stream.verbose_level
with RedirectStdStreams(stdout=stream) :
if option_string:
obj(cmd_args=stream.target_cmd_args, cmd_options=stream.target_cmd_options)
else:
obj(cmd_args=stream.target_cmd_args)
except KeyboardInterrupt:
print "Execution interrupted by user"
except ArgumentError as arg_error:
if str(arg_error) != "HELP":
print "Argument Error: " + str(arg_error)
print "{0:s}:\n {1:s}".format(cmd_name, obj.__doc__.strip())
return False
except Exception as exc:
if not config['debug']:
print """
************ LLDB found an exception ************
There has been an uncaught exception. A possible cause could be that remote connection has been disconnected.
However, it is recommended that you report the exception to lldb/kernel debugging team about it.
************ Please run 'xnudebug debug enable' to start collecting logs. ************
"""
raise
if config['showTypeSummary']:
lldb.debugger.HandleCommand('type category enable kernel' )
if stream.pluginRequired :
plugin = LoadXNUPlugin(stream.pluginName)
if plugin == None :
print "Could not load plugins."+stream.pluginName
return
plugin.plugin_init(kern, config, lldb, kern.IsDebuggerConnected())
return_data = plugin.plugin_execute(cmd_name, result.GetOutput())
ProcessXNUPluginResult(return_data)
plugin.plugin_cleanup()
#restore the verbose level after command is complete
config['verbosity'] = def_verbose_level
return
myglobals = globals()
command_function_name = obj.__name__+"Command"
myglobals[command_function_name] = _internal_command_function
command_function = myglobals[command_function_name]
if not obj.__doc__ :
print "ERROR: Cannot register command({:s}) without documentation".format(cmd_name)
return obj
command_function.__doc__ = obj.__doc__
global lldb_command_documentation
if cmd_name in lldb_command_documentation:
lldb.debugger.HandleCommand("command script delete "+cmd_name)
lldb_command_documentation[cmd_name] = (obj.__name__, obj.__doc__.lstrip(), option_string)
lldb.debugger.HandleCommand("command script add -f " + MODULE_NAME + "." + command_function_name + " " + cmd_name)
return obj
return _cmd
def lldb_alias(alias_name, cmd_line):
""" define an alias in the lldb command line.
A programatic way of registering an alias. This basically does
(lldb)command alias alias_name "cmd_line"
ex.
lldb_alias('readphys16', 'readphys 16')
"""
alias_name = alias_name.strip()
cmd_line = cmd_line.strip()
lldb.debugger.HandleCommand("command alias " + alias_name + " "+ cmd_line)
def SetupLLDBTypeSummaries(reset=False):
global lldb_summary_definitions, MODULE_NAME
if reset == True:
lldb.debugger.HandleCommand("type category delete kernel ")
for single_type in lldb_summary_definitions.keys():
summary_function = lldb_summary_definitions[single_type]
lldb_cmd = "type summary add \""+ single_type +"\" --category kernel --python-function " + MODULE_NAME + ".LLDBSummary" + summary_function.__name__
debuglog(lldb_cmd)
lldb.debugger.HandleCommand(lldb_cmd)
if config['showTypeSummary']:
lldb.debugger.HandleCommand("type category enable kernel")
else:
lldb.debugger.HandleCommand("type category disable kernel")
return
def LoadXNUPlugin(name):
""" Try to load a plugin from the plugins directory.
"""
retval = None
name=name.strip()
try:
module_obj = __import__('plugins.'+name, globals(), locals(), [], -1)
module_obj = module_obj.__dict__[name]
defs = dir(module_obj)
if 'plugin_init' in defs and 'plugin_execute' in defs and 'plugin_cleanup' in defs:
retval = module_obj
else:
print "Plugin is not correctly implemented. Please read documentation on implementing plugins"
except:
print "plugin not found :"+name
return retval
def ProcessXNUPluginResult(result_data):
""" Look at the returned data from plugin and see if anymore actions are required or not
params: result_data - list of format (status, out_string, more_commands)
"""
ret_status = result_data[0]
ret_string = result_data[1]
ret_commands = result_data[2]
if ret_status == False:
print "Plugin failed: " + ret_string
return
print ret_string
if len(ret_commands) >= 0:
for cmd in ret_commands:
print "Running command on behalf of plugin:" + cmd
lldb.debugger.HandleCommand(cmd)
return
# holds tests registered with xnu.
#DONOTTOUCHME: Exclusive use of xnudebug_test only
lldb_command_tests = {}
def xnudebug_test(test_name):
""" A function decoratore to register a test with the framework. Each test is supposed to be of format
def Test<name>(kernel_target, config, lldb_obj, isConnected )
NOTE: The testname should start with "Test" else exception will be raised.
"""
def _test(obj):
global lldb_command_tests
if obj.__name__.find("Test") != 0 :
print "Test name ", obj.__name__ , " should start with Test"
raise ValueError
lldb_command_tests[test_name] = (test_name, obj.__name__, obj, obj.__doc__)
return obj
return _test
# End Debugging specific utility functions
# Kernel Debugging specific classes and accessor methods
# global access object for target kernel
def GetObjectAtIndexFromArray(array_base, index):
""" Subscript indexing for arrays that are represented in C as pointers.
for ex. int *arr = malloc(20*sizeof(int));
now to get 3rd int from 'arr' you'd do
arr[2] in C
GetObjectAtIndexFromArray(arr_val,2)
params:
array_base : core.value - representing a pointer type (ex. base of type 'ipc_entry *')
index : int - 0 based index into the array
returns:
core.value : core.value of the same type as array_base_val but pointing to index'th element
"""
array_base_val = array_base.GetSBValue()
base_address = array_base_val.GetValueAsUnsigned()
size = array_base_val.GetType().GetPointeeType().GetByteSize()
obj_address = base_address + (index * size)
obj = kern.GetValueFromAddress(obj_address, array_base_val.GetType().GetName())
return Cast(obj, array_base_val.GetType())
kern = None
def GetLLDBThreadForKernelThread(thread_obj):
""" Get a reference to lldb.SBThread representation for kernel thread.
params:
thread_obj : core.cvalue - thread object of type thread_t
returns
lldb.SBThread - lldb thread object for getting backtrace/registers etc.
"""
tid = unsigned(thread_obj.thread_id)
lldb_process = LazyTarget.GetProcess()
sbthread = lldb_process.GetThreadByID(tid)
if not sbthread.IsValid():
# in case lldb doesnt know about this thread, create one
if hasattr(lldb_process, "CreateOSPluginThread"):
debuglog("creating os plugin thread on the fly for {0:d} 0x{1:x}".format(tid, thread_obj))
lldb_process.CreateOSPluginThread(tid, unsigned(thread_obj))
else:
raise RuntimeError("LLDB process does not support CreateOSPluginThread.")
sbthread = lldb_process.GetThreadByID(tid)
if not sbthread.IsValid():
raise RuntimeError("Unable to find lldb thread for tid={0:d} thread = {1:#018x} (#16049947: have you put 'settings set target.load-script-from-symbol-file true' in your .lldbinit?)".format(tid, thread_obj))
return sbthread
def GetThreadBackTrace(thread_obj, verbosity = vHUMAN, prefix = ""):
""" Get a string to display back trace for a thread.
params:
thread_obj - core.cvalue : a thread object of type thread_t.
verbosity - int : either of vHUMAN, vSCRIPT or vDETAIL to describe the verbosity of output
prefix - str : a string prefix added before the line for each frame.
isContinuation - bool : is thread a continuation?
returns:
str - a multi line string showing each frame in backtrace.
"""
is_continuation = not bool(unsigned(thread_obj.kernel_stack))
thread_val = GetLLDBThreadForKernelThread(thread_obj)
out_string = ""
kernel_stack = unsigned(thread_obj.kernel_stack)
reserved_stack = unsigned(thread_obj.reserved_stack)
if not is_continuation:
if kernel_stack and reserved_stack:
out_string += prefix + "reserved_stack = {:#018x}\n".format(reserved_stack)
| |
self.filters.w
K0, kernel_depth, kernel_h, kernel_w = self.filters.shape
n, K1, out_nm_h, out_nm_w = top_grad.shape
# Partial derivatives of filters:
# A naive explanation:
# for i in range(K1):
# f = top_grad[:,i]
# # convolute with inp:
# # (n, oh, ow) conv (n, kd*kh*kw, oh, ow) => (K, kd, kh, kw):
# for kw in range(kernel_w):
# for kh in range(kernel_h):
# for kd in range(kernel_depth):
# # self.filters_grad[i, kd, kh, kw] += 1/n *
# sum_(p,q,s){ X[p, kd, kh + q*strip_h, kw + s*strip_w] * f[q,s] } # pay attention to indice
#
# Parameters:
# i: output data channel index, equal to self.inp.shape[0], denoted as K1
# p: original input batch sample index, equal to top, denoted as K0
# q: out_nm_h index
# s: out_nm_w index
# Rearrange above loop:
self.filters.grad[:] = 0.0
for k in range(K1):
f = top_grad[:, k]
for kd in range(kernel_depth):
for kh in range(kernel_h):
for kw in range(kernel_w):
uid = (kd*kernel_h+kh)*kernel_w+kw
self.filters.grad[k, kd, kh, kw] += np.sum(self.X_col[:,uid] * f.reshape(-1, out_nm_h * out_nm_w))
# partial derivatives of inp
# opposite to forward , inp computed in flipped direction
# (n, channel, in_nm_h, in_nm_w) <= (n, K , oh, ow) conv flipped(filter)
self.inp.grad[:] = 0.0
for k in range(self.channel):
for i in range(self.in_nm_h):
for j in range(self.in_nm_w):
# grad (n, K, oh, ow) conlv flipped(f) (K, kernel_depth, kernel_h, kernel_w)
self.conlv(self.inp.grad, top_grad, W, (k,i,j))
# partial derivatives of bias
for d in range(K1):
f = top_grad[:, d]
self.bias.grad[d] = np.sum(f)
return self.inp.grad, self.filters.grad, self.bias.grad
# Transposed convolution
def conlv(self, target, grad, convs, index):
'''
Transposed Convolution
:param target: np.array, destination
:param grad: np.array, top_diff
:param convs: np.array, original convolution
:param index: tuple, destination index
:return:
'''
K0, kernel_depth, kernel_h, kernel_w = convs.shape
k,i,j = index
for h in range(self.out_nm_h):
for w in range(self.out_nm_w):
if i-h*self.strip+self.pad < 0 or i-h*self.strip+self.pad >= kernel_h or \
j-w*self.strip+self.pad < 0 or j-w*self.strip+self.pad >= kernel_w:
continue
try:
target[:,k,i,j] += np.matmul(grad[:,:,h,w], convs[:, k, i-h*self.strip+self.pad, j-w*self.strip+self.pad])
except Exception as e:
raise(e)
class FullyCnnLayer(Layer):
"""
See a naive implementation inspired from my solution submitted to <NAME>'s coursera deeplearning course in 2014 and 2015 where I passed
server tests with 100% scores!
logistic implementation with L1, L2 normalization experiments solution provided:
fully connected neural network implementation solution provided:
"""
pass
class AtrousConvLayer(Layer):
"""
AtrousConv (also called Dilate Convolution) correspond to the backpropogation algorithm with respect to filters or
gradient of them being applied to the input in a forward process.
"""
pass
class BatchNorm(Layer):
def __init__(self, frazed=False, gamma=1, beta=0, bn_params={}, mode='trainning'):
# input vol instance
self.inp = None
self.LAYER_TYPE = 'batch_norm'
# Not used for the moment TODO
self.frazed = frazed # Mask RCNN implementation for details
# parameters
self.spatial_size = None
self.gamma = gamma
self.beta = beta
# the defaul values borrow from cs231n, I didnt find out reasons why it is good, maybe it is not.
self.epilon = bn_params.get('epilon', 1e-5)
self.stat_momentum = bn_params.get('stat_momentum', 0.9)
# parameters used inside forward operation, needed to be persistent
self.running_mean = bn_params.get('running_mean', None)
self.running_var = bn_params.get('running_var' , None)
# learnable parameters
# to make a reasonable difference, W and bias should have the same size of gradient spatial shape
# where parital gamma_0 (scalar) = sum_(i) { partial J over partial y_i0 * partial y_i0 over partial gamma_0 }
# which means y_i0 can only be computed from gamma_i0 values. Hence if gamma is a scalar equal to gamma_0, that does
# not make sense
self._param_initialized = False
self.W = Vol(1, (1,), init_gen=np.array([self.gamma,])) if np.isscalar(self.gamma) \
else Vol(1, (1,), init_gen=None) # params
self.bias = Vol(1, (1,), init_gen=np.array([self.beta,])) if np.isscalar(self.beta) \
else Vol(1, (1,), init_gen=None) # params
# bn_params
# algorithm
# despite sample mean and variance suggested in original paper, cs231n also suggests torch7 batch norm implementation
# algorithm "RunningAverages"
self.algorithm = bn_params.get("algorithm", "SampleStat")
self.supported_algorithms = [
"SampleStat",
"RunningAverages" # see Torch7 batch norm implementation
]
self.mode = mode
# output Vol instance
self.out = None
super(BatchNorm, self).__init__(None, None, 1, 1, name='BatchNorm')
def forward1(self, inp):
self.inp = inp
X = inp.w
n = inp.batch_size
spatial_size = inp.spatial_size
self.spatial_size = spatial_size
if self.algorithm not in self.supported_algorithms:
raise UnSupportedAlgorithm("Does not support %s. We only support %s" % (self.algorithm, self.supported_algorithms))
if self._param_initialized is False:
identity = np.ones(spatial_size)
self.W.reset_spatial_size(spatial_size, fill=identity * self.gamma)
self.bias.reset_spatial_size(spatial_size, fill=identity * self.beta)
self._param_initialized = True
W = self.W.w
bias = self.bias.w
if self.mode == 'validation':
shifted = X - self.running_mean
std = np.sqrt(self.running_var + self.epilon)
normalized = shifted * 1.0 / std
affine = normalized * W + bias
self.out = Vol(n, spatial_size, init_gen=affine)
return self.out
if self.mode is not 'trainning':
raise ValueError("Invalide forward batchnorm mode <%s>" % self.mode)
# stat computation
miu = np.mean(X, axis=0)
shifted = X - miu
variance = np.sum(shifted**2, axis=0) / n
std = np.sqrt(variance + self.epilon)
normalized = shifted * 1.0 / std
affine = normalized * W + bias
self.normalized = normalized
self.shifted = shifted
self.variance = variance
if self.algorithm is "SampleStat":
self.out = Vol(n, inp.spatial_size, init_gen=affine)
return self.out
# see cs231n implementation for reference
if self.algorithm is "RunningAverages":
"""
The running_mean and running_var are used in inference mode where we have no idea of what statistics should be used from input.
Hence we turn to running batch of data to gather statistic information.
"""
self.running_mean = self.running_mean or np.zeros(spatial_size, dtype=X.dtype)
self.running_var = self.running_var or np.zeros(spatial_size, dtype=X.dtype)
self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * miu
self.running_var = self.momentum * self.running_var + (1-self.momentum) * variance
self.out = Vol(n, spatial_size, init_gen=affine)
return self.out
else:
raise NotImplementedError("%s not implemented yet!" % self.algorithm)
def bp(self, top_layer):
grad = top_layer.inp.grad
spatial_size = self.spatial_size or grad.spatial_size
if self._param_initialized is False:
identity = np.ones(spatial_size)
self.W.reset_spatial_size(spatial_size, fill=identity * self.gamma)
self.bias.reset_spatial_size(spatial_size, fill=identity * self.beta)
self._param_initialized = True
W = self.W.w
N = grad.shape[0]
# bias_grad, partial derivatives of biases, no parmeters in this layer
db = np.sum(grad, axis=0) # the derivative of beta
self.bias.grad[:] = db[:]
# dW, no parameters in this layer
dW = np.sum(self.normalized * grad, axis=0) # the derivative of gamma
self.W.grad[:] = dW[:]
# partial derivatives of inp
dx_bar = grad * W
inverse_std = 1.0 / np.sqrt(self.variance + self.epilon)
dvar = np.sum(dx_bar * self.shifted, axis=0) * -0.5 * inverse_std**3
dmiu_1 = dx_bar * inverse_std
dmiu_2 = 2 * dvar * np.ones(grad.shape) * self.shifted / N
dx1 = dmiu_1 + dmiu_2
dmiu = -1 * np.sum(dx1, axis=0)
dx2 = dmiu * 1.0 / N
self.inp.grad[:] = dx1 + dx2
return self.inp.grad, self.W.grad, self.bias.grad
class ReluActivation(Layer):
def __init__(self):
# input Vol instance
self.inp = None
self.LAYER_TYPE = 'relu'
# output Vol instance
self.out = None
super(ReluActivation, self).__init__(None, None, 1, 1, name='ReluActivation')
def forward1(self, inp):
self.inp = inp
X = inp.w
out = np.maximum(0, X)
self.out = Vol(inp.batch_size, inp.spatial_size, init_gen=out)
return self.out
def bp(self, top_layer):
# bias_grad, partial derivatives of biases, no parameters in this layer
db = None
# dW, no parameters in this layer
dW = None
# partial derivatives of inp
top_grad = top_layer.inp.grad
out = top_layer.inp.w
self.inp.grad[:] = top_grad * (np.maximum(0, out) > 0)
return self.inp.grad, dW, db
class MaxPooling(Layer):pass
class UpSampling(Layer):
"""
Upsampling is for resampling and interpolation of your input up to higher resolution. The terminology comes from Signal
Processing. In convolution neural network, since maxpooling is non invertible, upsampling is an approximation of
reverse operation of max pooling, which used commonly by the Feature Pyramid Network (FPN) backbone.
FPN and ResNet50(101, 152, ...) form the foundation of the state of the art in the network architecture for features extraction
in the realm of objects detection. FPN makes different scales of the same feature map and composes two stages of layers
stacking method: bottom-up and top-down. It is top-down where we need `upsampling` from the smaller resolution feature map:
P_i = Add(Upsampling(P_{i+1}), Conv2D()(Ci)) 2<= i < 5
P_5 = Conv2D()(C_5)
There are several implementation for that | |
<reponame>mitchellmaler/pulumi-rancher2<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class ClusterAlterRule(pulumi.CustomResource):
annotations: pulumi.Output[dict]
"""
The cluster alert rule annotations (map)
"""
cluster_id: pulumi.Output[str]
"""
The cluster id where create cluster alert rule (string)
"""
event_rule: pulumi.Output[dict]
"""
The cluster alert rule event rule. ConflictsWith: `"metric_rule", "node_rule", "system_service_rule"`` (list Maxitems:1)
* `eventType` (`str`) - Event type. Supported values : `"Warning" | "Normal"`. Default: `Warning` (string)
* `resourceKind` (`str`) - Resource kind. Supported values : `"DaemonSet" | "Deployment" | "Node" | "Pod" | "StatefulSet"` (string)
"""
group_id: pulumi.Output[str]
"""
The cluster alert rule alert group ID (string)
"""
group_interval_seconds: pulumi.Output[float]
"""
The cluster alert rule group interval seconds. Default: `180` (int)
"""
group_wait_seconds: pulumi.Output[float]
"""
The cluster alert rule group wait seconds. Default: `180` (int)
"""
inherited: pulumi.Output[bool]
"""
The cluster alert rule inherited. Default: `true` (bool)
"""
labels: pulumi.Output[dict]
"""
The cluster alert rule labels (map)
"""
metric_rule: pulumi.Output[dict]
"""
The cluster alert rule metric rule. ConflictsWith: `"event_rule", "node_rule", "system_service_rule"`` (list Maxitems:1)
* `comparison` (`str`) - Metric rule comparison. Supported values : `"equal" | "greater-or-equal" | "greater-than" | "less-or-equal" | "less-than" | "not-equal" | "has-value"`. Default: `equal` (string)
* `description` (`str`) - Metric rule description (string)
* `duration` (`str`) - Metric rule duration (string)
* `expression` (`str`) - Metric rule expression (string)
* `thresholdValue` (`float`) - Metric rule threshold value (float64)
"""
name: pulumi.Output[str]
"""
The cluster alert rule name (string)
"""
node_rule: pulumi.Output[dict]
"""
The cluster alert rule node rule. ConflictsWith: `"event_rule", "metric_rule", "system_service_rule"`` (list Maxitems:1)
* `condition` (`str`) - System service rule condition. Supported values : `"controller-manager" | "etcd" | "scheduler"`. Default: `scheduler` (string)
* `cpuThreshold` (`float`) - Node rule cpu threshold. Default: `70` (int)
* `memThreshold` (`float`) - Node rule mem threshold. Default: `70` (int)
* `nodeId` (`str`) - Node ID (string)
* `selector` (`dict`) - Node rule selector (map)
"""
repeat_interval_seconds: pulumi.Output[float]
"""
The cluster alert rule wait seconds. Default: `3600` (int)
"""
severity: pulumi.Output[str]
"""
The cluster alert rule severity. Supported values : `"critical" | "info" | "warning"`. Default: `critical` (string)
"""
system_service_rule: pulumi.Output[dict]
"""
The cluster alert rule system service rule. ConflictsWith: `"event_rule", "metric_rule", "node_rule"`` (list Maxitems:1)
* `condition` (`str`) - System service rule condition. Supported values : `"controller-manager" | "etcd" | "scheduler"`. Default: `scheduler` (string)
"""
def __init__(__self__, resource_name, opts=None, annotations=None, cluster_id=None, event_rule=None, group_id=None, group_interval_seconds=None, group_wait_seconds=None, inherited=None, labels=None, metric_rule=None, name=None, node_rule=None, repeat_interval_seconds=None, severity=None, system_service_rule=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Rancher v2 Cluster Alert Rule resource. This can be used to create Cluster Alert Rule for Rancher v2 environments and retrieve their information.
> This content is derived from https://github.com/terraform-providers/terraform-provider-rancher2/blob/master/website/docs/r/clusterAlertRule.html.markdown.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] annotations: The cluster alert rule annotations (map)
:param pulumi.Input[str] cluster_id: The cluster id where create cluster alert rule (string)
:param pulumi.Input[dict] event_rule: The cluster alert rule event rule. ConflictsWith: `"metric_rule", "node_rule", "system_service_rule"`` (list Maxitems:1)
:param pulumi.Input[str] group_id: The cluster alert rule alert group ID (string)
:param pulumi.Input[float] group_interval_seconds: The cluster alert rule group interval seconds. Default: `180` (int)
:param pulumi.Input[float] group_wait_seconds: The cluster alert rule group wait seconds. Default: `180` (int)
:param pulumi.Input[bool] inherited: The cluster alert rule inherited. Default: `true` (bool)
:param pulumi.Input[dict] labels: The cluster alert rule labels (map)
:param pulumi.Input[dict] metric_rule: The cluster alert rule metric rule. ConflictsWith: `"event_rule", "node_rule", "system_service_rule"`` (list Maxitems:1)
:param pulumi.Input[str] name: The cluster alert rule name (string)
:param pulumi.Input[dict] node_rule: The cluster alert rule node rule. ConflictsWith: `"event_rule", "metric_rule", "system_service_rule"`` (list Maxitems:1)
:param pulumi.Input[float] repeat_interval_seconds: The cluster alert rule wait seconds. Default: `3600` (int)
:param pulumi.Input[str] severity: The cluster alert rule severity. Supported values : `"critical" | "info" | "warning"`. Default: `critical` (string)
:param pulumi.Input[dict] system_service_rule: The cluster alert rule system service rule. ConflictsWith: `"event_rule", "metric_rule", "node_rule"`` (list Maxitems:1)
The **event_rule** object supports the following:
* `eventType` (`pulumi.Input[str]`) - Event type. Supported values : `"Warning" | "Normal"`. Default: `Warning` (string)
* `resourceKind` (`pulumi.Input[str]`) - Resource kind. Supported values : `"DaemonSet" | "Deployment" | "Node" | "Pod" | "StatefulSet"` (string)
The **metric_rule** object supports the following:
* `comparison` (`pulumi.Input[str]`) - Metric rule comparison. Supported values : `"equal" | "greater-or-equal" | "greater-than" | "less-or-equal" | "less-than" | "not-equal" | "has-value"`. Default: `equal` (string)
* `description` (`pulumi.Input[str]`) - Metric rule description (string)
* `duration` (`pulumi.Input[str]`) - Metric rule duration (string)
* `expression` (`pulumi.Input[str]`) - Metric rule expression (string)
* `thresholdValue` (`pulumi.Input[float]`) - Metric rule threshold value (float64)
The **node_rule** object supports the following:
* `condition` (`pulumi.Input[str]`) - System service rule condition. Supported values : `"controller-manager" | "etcd" | "scheduler"`. Default: `scheduler` (string)
* `cpuThreshold` (`pulumi.Input[float]`) - Node rule cpu threshold. Default: `70` (int)
* `memThreshold` (`pulumi.Input[float]`) - Node rule mem threshold. Default: `70` (int)
* `nodeId` (`pulumi.Input[str]`) - Node ID (string)
* `selector` (`pulumi.Input[dict]`) - Node rule selector (map)
The **system_service_rule** object supports the following:
* `condition` (`pulumi.Input[str]`) - System service rule condition. Supported values : `"controller-manager" | "etcd" | "scheduler"`. Default: `scheduler` (string)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['annotations'] = annotations
if cluster_id is None:
raise TypeError("Missing required property 'cluster_id'")
__props__['cluster_id'] = cluster_id
__props__['event_rule'] = event_rule
if group_id is None:
raise TypeError("Missing required property 'group_id'")
__props__['group_id'] = group_id
__props__['group_interval_seconds'] = group_interval_seconds
__props__['group_wait_seconds'] = group_wait_seconds
__props__['inherited'] = inherited
__props__['labels'] = labels
__props__['metric_rule'] = metric_rule
__props__['name'] = name
__props__['node_rule'] = node_rule
__props__['repeat_interval_seconds'] = repeat_interval_seconds
__props__['severity'] = severity
__props__['system_service_rule'] = system_service_rule
super(ClusterAlterRule, __self__).__init__(
'rancher2:index/clusterAlterRule:ClusterAlterRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, annotations=None, cluster_id=None, event_rule=None, group_id=None, group_interval_seconds=None, group_wait_seconds=None, inherited=None, labels=None, metric_rule=None, name=None, node_rule=None, repeat_interval_seconds=None, severity=None, system_service_rule=None):
"""
Get an existing ClusterAlterRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] annotations: The cluster alert rule annotations (map)
:param pulumi.Input[str] cluster_id: The cluster id where create cluster alert rule (string)
:param pulumi.Input[dict] event_rule: The cluster alert rule event rule. ConflictsWith: `"metric_rule", "node_rule", "system_service_rule"`` (list Maxitems:1)
:param pulumi.Input[str] group_id: The cluster alert rule alert group ID (string)
:param pulumi.Input[float] group_interval_seconds: The cluster alert rule group interval seconds. Default: `180` (int)
:param pulumi.Input[float] group_wait_seconds: The cluster alert rule group wait seconds. Default: `180` (int)
:param pulumi.Input[bool] inherited: The cluster alert rule inherited. Default: `true` (bool)
:param pulumi.Input[dict] labels: The cluster alert rule labels (map)
:param pulumi.Input[dict] metric_rule: The cluster alert rule metric rule. ConflictsWith: `"event_rule", "node_rule", "system_service_rule"`` (list Maxitems:1)
:param pulumi.Input[str] name: The cluster alert rule name (string)
:param pulumi.Input[dict] node_rule: The cluster alert rule node rule. ConflictsWith: `"event_rule", "metric_rule", "system_service_rule"`` (list Maxitems:1)
:param pulumi.Input[float] repeat_interval_seconds: The cluster alert rule wait seconds. Default: `3600` (int)
:param pulumi.Input[str] severity: The cluster alert rule severity. Supported values : `"critical" | "info" | "warning"`. Default: `critical` (string)
:param pulumi.Input[dict] system_service_rule: The cluster alert rule system service rule. ConflictsWith: `"event_rule", "metric_rule", "node_rule"`` (list Maxitems:1)
The **event_rule** object supports the following:
* `eventType` (`pulumi.Input[str]`) - Event type. Supported values : `"Warning" | "Normal"`. Default: `Warning` (string)
* `resourceKind` (`pulumi.Input[str]`) - Resource kind. Supported values : | |
%s was not entered correctly. Possible'\
+'material types include "iso", "trans_iso", or "ortho." In'\
+'addition, mat_constants must then be of length 3,6, or 10'\
+'respectively. Refer to documentation for more clarification.')
# Store material constants such that:
self.E1 = saved_mat_const[0]
self.E2 = saved_mat_const[1]
self.E3 = saved_mat_const[2]
self.nu_23 = saved_mat_const[3]
self.nu_13 = saved_mat_const[4]
self.nu_12 = saved_mat_const[5]
self.G_23 = saved_mat_const[6]
self.G_13 = saved_mat_const[7]
self.G_12 = saved_mat_const[8]
self.rho = saved_mat_const[9]
self.t = mat_t
# Initialize the compliance matrix in the local fiber 123 CSYS:
self.Smat = np.array([[1./self.E1,-self.nu_12/self.E1,-self.nu_13/self.E1,0.,0.,0.],\
[-self.nu_12/self.E1,1./self.E2,-self.nu_23/self.E2,0.,0.,0.],\
[-self.nu_13/self.E1,-self.nu_23/self.E2,1./self.E3,0.,0.,0.],\
[0.,0.,0.,1./self.G_23,0.,0.],\
[0.,0.,0.,0.,1./self.G_13,0.],\
[0.,0.,0.,0.,0.,1./self.G_12]])
# Rotate the compliance matrix to the local x-sect csys if the material
# is to be used for cross-sectional analysis:
self.Smat = self.returnComplMat(th)
# Solve for the material stiffness matrix
self.Cmat = np.linalg.inv(self.Smat)
def printSummary(self,**kwargs):
"""Prints a tabulated summary of the material.
This method prints out basic information about the
material, including the type, the material constants, material
thickness, as well as the tabulated stiffness or compliance
matricies if requested.
:Args:
- `compliance (str)`: A boolean input to signify if the compliance
matrix should be printed.
- `stiffness (str)`: A boolean input to signify if the stiffness matrix
should be printed.
:Returns:
- String print out containing the material name, as well as material
constants and other defining material attributes. If requested
this includes the material stiffness and compliance matricies.
"""
# Print Name
print(self.name)
# Print string summary attribute
print(self.summary)
# Print compliance matrix if requested
if kwargs.pop('compliance',False):
print('COMPLIANCE MATRIX')
print('xyz cross-section CSYS:')
print(tabulate(self.Smat,tablefmt="fancy_grid"))
# Print Stiffness matrix if requested
if kwargs.pop('stiffness',False):
print('STIFFNESS MATRIX')
print('xyz cross-section CSYS:')
print(tabulate(np.around(self.Cmat,decimals=4),tablefmt="fancy_grid"))
def returnComplMat(self,th,**kwargs):
"""Returns the material 6x6 compliance matrix.
Mainly inteded as a private method although kept public, and
fascilitated the transformation of the compliance matrix to another
coordinate system.
:Args:
- `th (1x3 Array[float])`: The angles about which the material can be
rotated when it is initialized. In degrees.
:Returns:
- `Sp`: The transformed compliance matrix.
"""
# Method to return the compliance matrix
rh = RotationHelper()
Sp = rh.transformCompl(self.Smat,th)
return Sp
class MicroMechanics:
"""An class which calculates properties using micromechanical models.
This method while not currently implemented can be used to calculate
smeared material properties given the isotropic matrix and transversely
isotropic fiber mechanical properties.
"""
def genCompProp(Vf,E1f,E2f,nu12f,G12f,E_m,nu_m,rhof,rhom,**kwargs):
"""Calculates the smeared properties of a composite.
Given the fiber and matrix material information, this method assists
with calculating the smeared mechanical properties of a composite.
The code assumes the fibers are transversely isotropic, and the matrix
is isotropic.
This class is in beta form currently and is largely unsuported. The
methods and formula have been tested, however the class an not been
used or implemented with any other piece in the module.
:Args:
- `Vf (float)`: The fiber volume fraction
- `E1f (float)`: The fiber stiffness in the 1-direction
- `E2f (float)`: The fiber stiffness in the 2-direction
- `nu12f (float)`: The in-plane fiber poisson ratio
- `G12f (float)`: The in-plane fiber shear modulus
- `E_m (float)`: The matrix stiffness
- `nu_m (float)`: The matrix poisson ratio
- `rhof (float)`: The fiber density
- `rhom (float)`: The matrix density
- `thermal (1x3 Array[float])`: Coefficients of thermal expansion
- `moisture (1x3 Array[float])`: Coeffiecients of moisture expansion
:Returns:
- An array containing the transversely isotropic material properties
of the smeared material.
"""
#thermal = [a1f, a2f, a_m]
thermal = kwargs.pop('thermal', [0,0,0])
#moisture = [b1f, b2f, b_m]
moisture = kwargs.pop('moisture', [0,0,0])
#G_m:
G_m = E_m/(2.*(1.+nu_m))
#E1:
E1 = E1f*Vf+E_m*(1-Vf)
#E2:
E2 = 1/((1-np.sqrt(Vf))/E_m+np.sqrt(Vf)/(E2f*np.sqrt(Vf)+(1-np.sqrt(Vf))*E_m))
#Nu_12:
nu_12 = nu12f*Vf+nu_m*(1-Vf)
#Nu_23 = ???
#TODO: Implement micro-mechanical model
nu_23 = .458
#G_12
G_12 = G_m*(((G_m+G12f)-Vf*(G_m-G12f))/((G_m+G12f)+Vf*(G_m-G12f)))
#Comp Density
rho = rhof*Vf+rhom*(1-Vf)
#TODO: Add thermal properties to the output set
if not thermal==[0,0,0]:
a1f = thermal[0]
a2f = thermal[1]
a_m = thermal[2]
#Alpha_1
a1 = (E1f*a1f*Vf+E_m*a_m*(1-Vf))/(E1f*Vf+E_m*(1-Vf))
#Alpha_2
a2 = (a2f-(E_m/E1)*Vf*(a_m-a1f)*(1-Vf))*Vf+(a_m+(E1f/E1)*nu_m*(a_m-a1f)*Vf)*(1-Vf)
if not moisture==[0,0,0]:
b1f = moisture[0]
b2f = moisture[1]
b_m = moisture[2]
#Beta_1
b1 = (E1f*b1f*Vf+E_m*b_m*(1-Vf))/(E1f*Vf+E_m*(1-Vf))
#Beta_2
b2 = (b2f-(E_m/E1)*Vf*(b_m-b1f)*(1-Vf))*Vf+(b_m+(E1f/E1)*nu_m*(b_m-b1f)*Vf)*(1-Vf)
return [E1, E2, nu_12, nu_23, G_12, rho]
# 2-D CQUADX class, can be used for cross-sectional analysis
class CQUADX:
""" Creates a linear, 2D 4 node quadrilateral element object.
The main purpose of this class is to assist in the cross-sectional
analysis of a beam, however it COULD be modified to serve as an element for
2D plate or laminate FE analysis.
:Attributes:
- `type (str)`: A string designating it a CQUADX element.
- `xsect (bool)`: States whether the element is to be used in cross-
sectional analysis.
- `th (1x3 Array[float])`: Array containing the Euler-angles expressing how
the element constitutive relations should be rotated from the
material fiber frame to the global CSYS. In degrees.
- `EID (int)`: An integer identifier for the CQUADX element.
- `MID (int)`: An integer refrencing the material ID used for the
constitutive relations.
- `NIDs (1x4 Array[int])`: Contains the integer node identifiers for the
node objects used to create the element.
- `nodes (1x4 Array[obj])`: Contains the properly ordered nodes objects
used to create the element.
- `xs (1x4 np.array[float])`: Array containing the x-coordinates of the
nodes used in the element
- `ys (1x4 np.array[float])`: Array containing the y-coordinates of the
nodes used in the element
- `rho (float)`: Density of the material used in the element.
- `mass (float)`: Mass per unit length (or thickness) of the element.
- `U (12x1 np.array[float])`: This column vector contains the CQUADXs
3 DOF (x-y-z) displacements in the local xsect CSYS due to cross-
section warping effects.
- `Eps (6x4 np.array[float])`: A matrix containing the 3D strain state
within the CQUADX element.
- `Sig (6x4 np.array[float])`: A matrix containing the 3D stress state
within the CQUADX element.
:Methods:
- `x`: Calculates the local xsect x-coordinate provided the desired master
coordinates eta and xi.
- `y`: Calculates the local xsect y-coordinate provided the desired master
coordinates eta and xi.
- `J`: Calculates the jacobian of the element provided the desired master
coordinates eta and xi.
- `resetResults`: Initializes the displacement (U), strain (Eps), and
stress (Sig) attributes of the element.
- `getDeformed`: Provided an analysis has been conducted, this method
returns 3 2x2 np.array[float] containing the element warped
displacements in the local xsect CSYS.
- `getStressState`: Provided an analysis has been conducted, this method
returns 3 2x2 np.array[float] containing the element stress at four
points. The 3D stress state is processed to return the Von-Mises
or Maximum Principal stress state.
- `printSummary`: Prints out a tabulated form of the element ID, as well
as the node ID's referenced by the element.
"""
def __init__(self,EID,nodes,MID,matLib,**kwargs):
""" Initializes the element.
:Args:
- `EID (int)`: An integer identifier for the CQUADX element.
- `nodes (1x4 Array[obj])`: Contains the properly ordered nodes objects
used to create the element.
- `MID (int)`: An integer refrencing the material ID used for the
constitutive relations.
- `matLib (obj)`: A material library object containing a dictionary
with the material corresponding to the provided MID.
- `xsect (bool)`: A boolean to determine whether this quad element is
to be used for cross-sectional analysis. Defualt value is True.
- `th (1x3 Array[float])`: Array containing the Euler-angles expressing
how the element constitutive relations should be rotated from
the material fiber frame to the global CSYS. In degrees.
:Returns:
- None
.. Note:: The reference coordinate system for cross-sectional analysis is a
local coordinate system in which the x and y axes are planer with the
element, and the z-axis is perpendicular to the plane of the element.
"""
# Initialize type
self.type = 'CQUADX'
# Used for xsect analysis?
xsect = kwargs.pop('xsect', True)
self.xsect = xsect
# Initialize Euler-angles for material orientation in the xsect | |
theta: m_tau1 m_tau2 c_tau m_delta1 m_delta2 c_delta
:param lam:
wavelength in angstrom
:param flux_i:
intrinsic flux of sed (units don't matter)
:param logmstar:
log M* of galaxies
:param logsfr:
log SFR of galaxies
:param nebular:
if True nebular flux has an attenuation that is scaled from the
continuum attenuation.
'''
assert theta.shape[0] == 6, print(theta)
logmstar = np.atleast_1d(logmstar)
logsfr = np.atleast_1d(logsfr)
zerosfr = (logsfr == -999.)
_logmstar = logmstar[~zerosfr]
_logsfr = logsfr[~zerosfr]
tauV = np.clip(theta[0] * (_logmstar - 10.) + theta[1] * _logsfr + theta[2],
1e-3, None)
delta = theta[3] * (_logmstar - 10.) + theta[4] * _logsfr + theta[5]
# Kriek & Conroy (2013)
E_b = -1.9 * delta + 0.85
# Narayanan+(2018)
# E_b = -0.46 * delta + 0.69
# randomly sample the inclinatiion angle from 0 - pi/2
if incl is None:
incl = np.random.uniform(0., 0.5*np.pi, size=_logmstar.shape[0])
sec_incl = 1./np.cos(incl)
#Eq. 14 of Somerville+(1999)
A_V = -2.5 * np.log10((1.0 - np.exp(-tauV * sec_incl)) / (tauV * sec_incl))
assert np.all(np.isfinite(A_V))
dlam = 350. # width of bump from Noll+(2009)
lam0 = 2175. # wavelength of bump
k_V_calzetti = 4.87789
# bump
D_bump = E_b[:,None] * ((lam * dlam)**2 / ((lam**2 - lam0**2)**2 + (lam *
dlam)**2))
# calzetti is already normalized to k_V
A_lambda = A_V[:,None] * (calzetti_absorption(lam) + D_bump / k_V_calzetti) * \
(lam / 5500.)**delta[:,None]
_T_lam = 10.0**(-0.4 * A_lambda)
T_lam = np.ones((len(logmstar), len(lam)))
T_lam[~zerosfr] = _T_lam
return flux_i * T_lam
def DEM_slab_noll_msfr(theta, lam, flux_i, logmstar, logsfr, nebular=True):
''' Dust empirical model that combines the slab model with Noll+(2009)
A(lambda) = -2.5 log10( (1 - exp(-tauV sec(i))) / (tauV sec(i)) ) x
(k'(lambda) + D(lambda, E_b))/k_V x
(lambda / lambda_V)^delta
tauV = m_tau1 (log M* - 10.) + m_tau2 logSFR + c_tau
delta = m_delta1 (log M* - 10.) + m_delta2 logSFR + c_delta -2.2 < delta < 0.4
E_b = m_E delta + c_E
:param theta:
9 free parameter of the slab + Noll+(2009) model
theta: m_tau1 m_tau2 c_tau m_delta1 m_delta2 c_delta m_E c_E f_nebular
:param lam:
wavelength in angstrom
:param flux_i:
intrinsic flux of sed (units don't matter)
:param logmstar:
log M* of galaxies
:param logsfr:
log SFR of galaxies
:param nebular:
if True nebular flux has an attenuation that is scaled from the
continuum attenuation.
'''
assert theta.shape[0] == 9, print(theta)
if isinstance(logsfr, float):
if logsfr == -999.: raise ValueError
else:
if -999. in logsfr: raise ValueError
logmstar = np.atleast_1d(logmstar)
logsfr = np.atleast_1d(logsfr)
tauV = np.clip(theta[0] * (logmstar - 10.) + theta[1] * logsfr + theta[2],
1e-3, None)
delta = theta[3] * (logmstar - 10.) + theta[4] * logsfr + theta[5]
E_b = theta[6] * delta + theta[7]
# randomly sample the inclinatiion angle from 0 - pi/2
incl = np.random.uniform(0., 0.5*np.pi, size=logmstar.shape[0])
sec_incl = 1./np.cos(incl)
#Eq. 14 of Somerville+(1999)
A_V = -2.5 * np.log10((1.0 - np.exp(-tauV * sec_incl)) / (tauV * sec_incl))
assert np.all(np.isfinite(A_V)), print(tauV, logmstar, logsfr)
dlam = 350. # width of bump from Noll+(2009)
lam0 = 2175. # wavelength of bump
k_V_calzetti = 4.87789
# bump
D_bump = E_b[:,None] * ((lam * dlam)**2 / ((lam**2 - lam0**2)**2 + (lam *
dlam)**2))
# calzetti is already normalized to k_V
A_lambda = A_V[:,None] * (calzetti_absorption(lam) + D_bump / k_V_calzetti) * \
(lam / 5500.)**delta[:,None]
if not nebular: factor = 1.
else: factor = theta[8]
T_lam = 10.0**(-0.4 * A_lambda * factor)
return flux_i * T_lam
def DEM_slab_noll_m(theta, lam, flux_i, logmstar, logsfr, nebular=True):
''' Dust empirical model that combines the slab model with Noll+(2009)
A(lambda) = -2.5 log10( (1 - exp(-tauV sec(i))) / (tauV sec(i)) ) x
(k'(lambda) + D(lambda, E_b))/k_V x
(lambda / lambda_V)^delta
tauV = m_tau (log M* - 10.) + c_tau
delta = m_delta (log M* - 10.) + c_delta -2.2 < delta < 0.4
E_b = m_E delta + c_E
:param theta:
7 free parameter of the slab + Noll+(2009) model
theta: m_tau c_tau m_delta c_delta m_E c_E f_nebular
:param lam:
wavelength in angstrom
:param flux_i:
intrinsic flux of sed (units don't matter)
:param logmstar:
logmstar of galaxy
:param logsfr:
logSFR of galaxy (not used in this DEM
:param nebular:
if True nebular flux has an attenuation that is scaled from the
continuum attenuation.
'''
assert theta.shape[0] == 7, print(theta)
logmstar = np.atleast_1d(logmstar)
tauV = np.clip(theta[0] * (logmstar - 10.) + theta[1], 1e-3, None)
delta = theta[2] * (logmstar - 10.) + theta[3]
E_b = theta[4] * delta + theta[5]
# randomly sample the inclinatiion angle from 0 - pi/2
incl = np.random.uniform(0., 0.5*np.pi, size=logmstar.shape[0])
sec_incl = 1./np.cos(incl)
#Eq. 14 of Somerville+(1999)
A_V = -2.5 * np.log10((1.0 - np.exp(-tauV * sec_incl)) / (tauV * sec_incl))
assert np.isfinite(A_V), print(tauV, slab, logmstar, logsfr)
dlam = 350. # width of bump from Noll+(2009)
lam0 = 2175. # wavelength of bump
k_V_calzetti = 4.87789
# bump
D_bump = E_b * (lam * dlam)**2 / ((lam**2 - lam0**2)**2 + (lam * dlam)**2)
A_lambda = A_V * (calzetti_absorption(lam) + D_bump / k_V_calzetti) * \
(lam / 5500.)**delta
if not nebular: factor = 1.
else: factor = theta[6]
T_lam = 10.0**(-0.4 * A_lambda * factor)
return flux_i * T_lam
def DEM_slabcalzetti(theta, lam, flux_i, logmstar, logsfr, nebular=True):
''' Dust Empirical Model that uses the slab model with tauV(theta, mstar)
parameterization with inclinations randomly sampled
A(lambda) = -2.5 log10( (1 - exp(-tauV sec(i))) / (tauV sec(i)) ) x
(k'(lambda) / k_V)
:param theta:
parameter of the DEM model that specifies the M* dep. V-band optical depth (slope, offset)
as well as the nebular flux attenuatoin fraction
:param lam:
wavelength in angstrom
:param flux_i:
intrinsic flux of sed (units don't matter)
:param nebular:
if True nebular flux has an attenuation that is scaled from the
continuum attenuation.
notes
-----
* slab model to apply dust attenuation Eq.14 (Somerville+1999)
'''
logmstar = np.atleast_1d(logmstar)
tauV = np.clip(theta[0] * (logmstar - 10.) + theta[1], 1e-3, None)
# randomly sample the inclinatiion angle from 0 - pi/2
incl = np.random.uniform(0., 0.5*np.pi, size=logmstar.shape[0])
sec_incl = 1./np.cos(incl)
#cosis = 1.0 - np.cos(np.random.uniform(low=0, high=0.5*np.pi, size=mstar.shape[0]))
if not nebular: factor = 1.
else: factor = theta[2]
#Eq. 14 of Somerville+(1999)
A_V = -2.5 * np.log10((1.0 - np.exp(-tauV * sec_incl)) / (tauV * sec_incl))
assert np.isfinite(A_V), print(tauV, logmstar, logsfr)
# minimum attenuation from Romeel's paper (which one?)
A_V = np.clip(A_V, 0.1, None)
T_lam = 10.0**(A_V * -0.4 * calzetti_absorption(lam) * factor)
return flux_i * T_lam
def DEM_slab_noll_simple(theta, lam, flux_i, logmstar, logsfr, nebular=True):
''' simplified version of the Dust empirical model that combines the slab
model with Noll+(2009). This is to better understand the distance metrics
A(lambda) = -2.5 log10( (1 - exp(-tauV sec(i))) / (tauV sec(i)) ) x
(k'(lambda) + D(lambda, E_b))/k_V x
(lambda / lambda_V)^delta
tauV = c_tau
delta = c_delta -2.2 < delta < 0.4
E_b = constant
:param theta:
2 free parameter of the simplified slab + Noll+(2009) model
theta: c_tau c_delta f_nebular
:param lam:
wavelength in angstrom
:param flux_i:
intrinsic flux of sed (units don't matter)
:param logmstar:
log M* of galaxies
:param logsfr:
log SFR of galaxies
:param nebular:
if True nebular flux has an attenuation that is scaled from | |
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import os
import posixpath
import xml.etree.ElementTree as ET
import defcon
from mutatorMath.objects.error import MutatorError
from mutatorMath.objects.location import Location
from mutatorMath.objects.mutator import Mutator
from mutatorMath.ufo.instance import InstanceWriter
"""
Read and write mutator math designspace files.
A DesignSpaceDocumentWriter object can be instructed to write a properly formed
description of a designspace for UFO fonts.
A DesignSpaceDocumentReader object can then execute such a designspace document
and generate the UFO's described.
"""
import logging
def initializeLogger(proposedLogPath):
logging.basicConfig(filename=proposedLogPath, level=logging.INFO, format='%(asctime)s %(message)s')
def _indent(elem, whitespace=" ", level=0):
# taken from http://effbot.org/zone/element-lib.htm#prettyprint
i = "\n" + level * whitespace
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + whitespace
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, whitespace, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class DesignSpaceDocumentWriter(object):
"""
Writer for a design space description file.
* path: path for the document
* toolVersion: version of this tool
"""
_whiteSpace = " "
def __init__(self, path, toolVersion=3, verbose=False):
self.path = path
self.toolVersion = toolVersion
self.verbose = verbose
self.root = ET.Element("designspace")
self.root.attrib['format'] = "%d"%toolVersion
self.root.append(ET.Element("axes"))
self.root.append(ET.Element("sources"))
self.root.append(ET.Element("instances"))
self.logger = None
if verbose:
self.logger = logging.getLogger("mutatorMath")
self.currentInstance = None
def save(self, pretty=True):
""" Save the xml. Make pretty if necessary. """
self.endInstance()
if pretty:
_indent(self.root, whitespace=self._whiteSpace)
tree = ET.ElementTree(self.root)
tree.write(self.path, encoding="utf-8", method='xml', xml_declaration=True)
if self.logger:
self.logger.info("Writing %s", self.path)
def _makeLocationElement(self, locationObject, name=None):
""" Convert Location object to an locationElement."""
locElement = ET.Element("location")
if name is not None:
locElement.attrib['name'] = name
for dimensionName, dimensionValue in locationObject.items():
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = dimensionName
if type(dimensionValue)==tuple:
dimElement.attrib['xvalue'] = "%f"%dimensionValue[0]
dimElement.attrib['yvalue'] = "%f"%dimensionValue[1]
else:
dimElement.attrib['xvalue'] = "%f"%dimensionValue
locElement.append(dimElement)
return locElement
def _posixPathRelativeToDocument(self, otherPath):
relative = os.path.relpath(otherPath, os.path.dirname(self.path))
return posixpath.join(*relative.split(os.path.sep))
def addSource(self,
path,
name,
location,
copyLib=False,
copyGroups=False,
copyInfo=False,
copyFeatures=False,
muteKerning=False,
muteInfo=False,
mutedGlyphNames=None,
familyName=None,
styleName=None,
):
"""
Add a new UFO source to the document.
* path: path to this UFO, will be written as a relative path to the document path.
* name: reference name for this source
* location: name of the location for this UFO
* copyLib: copy the contents of this source to instances
* copyGroups: copy the groups of this source to instances
* copyInfo: copy the non-numerical fields from this source.info to instances.
* copyFeatures: copy the feature text from this source to instances
* muteKerning: mute the kerning data from this source
* muteInfo: mute the font info data from this source
* familyName: family name for this UFO (to be able to work on the names without reading the whole UFO)
* styleName: style name for this UFO (to be able to work on the names without reading the whole UFO)
Note: no separate flag for mute font: the source is just not added.
"""
sourceElement = ET.Element("source")
sourceElement.attrib['filename'] = self._posixPathRelativeToDocument(path)
sourceElement.attrib['name'] = name
if copyLib:
libElement = ET.Element('lib')
libElement.attrib['copy'] = "1"
sourceElement.append(libElement)
if copyGroups:
groupsElement = ET.Element('groups')
groupsElement.attrib['copy'] = "1"
sourceElement.append(groupsElement)
if copyFeatures:
featuresElement = ET.Element('features')
featuresElement.attrib['copy'] = "1"
sourceElement.append(featuresElement)
if copyInfo or muteInfo:
# copy info:
infoElement = ET.Element('info')
if copyInfo:
infoElement.attrib['copy'] = "1"
if muteInfo:
infoElement.attrib['mute'] = "1"
sourceElement.append(infoElement)
if muteKerning:
# add kerning element to the source
kerningElement = ET.Element("kerning")
kerningElement.attrib["mute"] = '1'
sourceElement.append(kerningElement)
if mutedGlyphNames:
# add muted glyphnames to the source
for name in mutedGlyphNames:
glyphElement = ET.Element("glyph")
glyphElement.attrib["name"] = name
glyphElement.attrib["mute"] = '1'
sourceElement.append(glyphElement)
if familyName is not None:
sourceElement.attrib['familyname'] = familyName
if styleName is not None:
sourceElement.attrib['stylename'] = styleName
locationElement = self._makeLocationElement(location)
sourceElement.append(locationElement)
self.root.findall('.sources')[0].append(sourceElement)
def startInstance(self, name=None,
location=None,
familyName=None,
styleName=None,
fileName=None,
postScriptFontName=None,
styleMapFamilyName=None,
styleMapStyleName=None,
):
""" Start a new instance.
Instances can need a lot of configuration.
So this method starts a new instance element. Use endInstance() to finish it.
* name: the name of this instance
* familyName: name for the font.info.familyName field. Required.
* styleName: name fot the font.info.styleName field. Required.
* fileName: filename for the instance UFO file. Required.
* postScriptFontName: name for the font.info.postScriptFontName field. Optional.
* styleMapFamilyName: name for the font.info.styleMapFamilyName field. Optional.
* styleMapStyleName: name for the font.info.styleMapStyleName field. Optional.
"""
if self.currentInstance is not None:
# We still have the previous one open
self.endInstance()
instanceElement = ET.Element('instance')
if name is not None:
instanceElement.attrib['name'] = name
if location is not None:
locationElement = self._makeLocationElement(location)
instanceElement.append(locationElement)
if familyName is not None:
instanceElement.attrib['familyname'] = familyName
if styleName is not None:
instanceElement.attrib['stylename'] = styleName
if fileName is not None:
instanceElement.attrib['filename'] = self._posixPathRelativeToDocument(fileName)
if postScriptFontName is not None:
instanceElement.attrib['postscriptfontname'] = postScriptFontName
if styleMapFamilyName is not None:
instanceElement.attrib['stylemapfamilyname'] = styleMapFamilyName
if styleMapStyleName is not None:
instanceElement.attrib['stylemapstylename'] = styleMapStyleName
self.currentInstance = instanceElement
def endInstance(self):
"""
Finalise the instance definition started by startInstance().
"""
if self.currentInstance is None:
return
allInstances = self.root.findall('.instances')[0].append(self.currentInstance)
self.currentInstance = None
def writeGlyph(self,
name,
unicodes=None,
location=None,
masters=None,
note=None,
mute=False,
):
""" Add a new glyph to the current instance.
* name: the glyph name. Required.
* unicodes: unicode values for this glyph if it needs to be different from the unicode values associated with this glyph name in the masters.
* location: a design space location for this glyph if it needs to be different from the instance location.
* masters: a list of masters and locations for this glyph if they need to be different from the masters specified for this instance.
* note: a note for this glyph
* mute: if this glyph is muted. None of the other attributes matter if this one is true.
"""
if self.currentInstance is None:
return
glyphElement = ET.Element('glyph')
if mute:
glyphElement.attrib['mute'] = "1"
if unicodes is not None:
glyphElement.attrib['unicode'] = " ".join([hex(u) for u in unicodes])
if location is not None:
locationElement = self._makeLocationElement(location)
glyphElement.append(locationElement)
if name is not None:
glyphElement.attrib['name'] = name
if note is not None:
noteElement = ET.Element('note')
noteElement.text = note
glyphElement.append(noteElement)
if masters is not None:
mastersElement = ET.Element("masters")
for glyphName, masterName, location in masters:
masterElement = ET.Element("master")
if glyphName is not None:
masterElement.attrib['glyphname'] = glyphName
masterElement.attrib['source'] = masterName
if location is not None:
locationElement = self._makeLocationElement(location)
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
if self.currentInstance.findall('.glyphs') == []:
glyphsElement = ET.Element('glyphs')
self.currentInstance.append(glyphsElement)
else:
glyphsElement = self.currentInstance.findall('.glyphs')[0]
glyphsElement.append(glyphElement)
def writeInfo(self, location=None, masters=None):
""" Write font into the current instance.
Note: the masters attribute is ignored at the moment.
"""
if self.currentInstance is None:
return
infoElement = ET.Element("info")
if location is not None:
locationElement = self._makeLocationElement(location)
infoElement.append(locationElement)
self.currentInstance.append(infoElement)
def writeKerning(self, location=None, masters=None):
""" Write kerning into the current instance.
Note: the masters attribute is ignored at the moment.
"""
if self.currentInstance is None:
return
kerningElement = ET.Element("kerning")
if location is not None:
locationElement = self._makeLocationElement(location)
kerningElement.append(locationElement)
self.currentInstance.append(kerningElement)
def writeWarp(self, warpDict):
""" Write a list of (in, out) values for a warpmap """
warpElement = ET.Element("warp")
axisNames = sorted(warpDict.keys())
for name in axisNames:
axisElement = ET.Element("axis")
axisElement.attrib['name'] = name
for a, b in warpDict[name]:
warpPt = ET.Element("map")
warpPt.attrib['input'] = str(a)
warpPt.attrib['output'] = str(b)
axisElement.append(warpPt)
warpElement.append(axisElement)
self.root.append(warpElement)
def addAxis(self, tag, name, minimum, maximum, default, warpMap=None):
""" Write an axis element.
This will be added to the <axes> element.
"""
axisElement = ET.Element("axis")
axisElement.attrib['name'] = name
axisElement.attrib['tag'] = tag
axisElement.attrib['minimum'] = str(minimum)
axisElement.attrib['maximum'] = str(maximum)
axisElement.attrib['default'] = str(default)
if warpMap is not None:
for a, b in warpMap:
warpPt = ET.Element("map")
warpPt.attrib['input'] = str(a)
warpPt.attrib['output'] = str(b)
axisElement.append(warpPt)
self.root.findall('.axes')[0].append(axisElement)
class DesignSpaceDocumentReader(object):
""" Read a designspace description.
Build Instance objects, generate them.
* documentPath: path of the document to read
* ufoVersion: target UFO version
* roundGeometry: apply rounding to all geometry
"""
_fontClass = defcon.Font
_glyphClass = defcon.Glyph
_libClass = defcon.Lib
_glyphContourClass = defcon.Contour
_glyphPointClass = defcon.Point
_glyphComponentClass = defcon.Component
_glyphAnchorClass = defcon.Anchor
_kerningClass = defcon.Kerning
_groupsClass = defcon.Groups
_infoClass = defcon.Info
_featuresClass = defcon.Features
_instanceWriterClass = InstanceWriter
_tempFontLibGlyphMuteKey = "_mutatorMath.temp.mutedGlyphNames"
_tempFontLocationKey = "_mutatorMath.temp.fontLocation"
def __init__(self, documentPath,
ufoVersion,
roundGeometry=False,
verbose=False,
logPath=None,
progressFunc=None
):
self.path = documentPath
self.ufoVersion = ufoVersion
self.roundGeometry = roundGeometry
self.documentFormatVersion = 0
| |
my favorite recipe.'
assert len(r1.slugs) == len(r2.slugs) == 1
assert r1.slugs[0] != r2.slugs[0]
assert r1.slugs[0].slug == r2.slugs[
0].slug == 'rocky-mountain-river-ipa'
def test_copy_multiple_slugs(self):
r = model.Recipe(
type='MASH',
name='Rocky Mountain River IPA',
gallons=5,
boil_minutes=60,
notes=u'This is my favorite recipe.'
)
model.RecipeSlug(slug='secondary-slug', recipe=r)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate()
model.commit()
assert model.Recipe.query.count() == 2
assert model.RecipeSlug.query.count() == 4
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert r1.type == r2.type == 'MASH'
assert r1.name == r2.name == 'Rocky Mountain River IPA'
assert r1.gallons == r2.gallons == 5
assert r1.boil_minutes == r2.boil_minutes == 60
assert r1.notes == r2.notes == u'This is my favorite recipe.'
assert len(r1.slugs) == len(r2.slugs) == 2
assert r1.slugs[0] != r2.slugs[0]
assert r1.slugs[0].slug == r2.slugs[
0].slug == 'rocky-mountain-river-ipa'
assert r1.slugs[1] != r2.slugs[1]
assert r1.slugs[1].slug == r2.slugs[1].slug == 'secondary-slug'
def test_simple_copy_with_overrides(self):
model.Recipe(
type='MASH',
name='Rocky Mountain River IPA',
gallons=5,
boil_minutes=60,
notes=u'This is my favorite recipe.'
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate({
'type': 'EXTRACT',
'name': '<NAME>',
'gallons': 10,
'boil_minutes': 90,
'notes': u'This is a duplicate.'
})
model.commit()
assert model.Recipe.query.count() == 2
assert model.RecipeSlug.query.count() == 2
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert r2.type == 'EXTRACT'
assert r2.name == '<NAME>'
assert r2.gallons == 10
assert r2.boil_minutes == 90
assert r2.notes == u'This is a duplicate.'
def test_author_copy(self):
model.Recipe(
name='Rocky Mountain River IPA',
author=model.User()
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate()
model.commit()
assert model.Recipe.query.count() == 2
assert model.User.query.count() == 1
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert r1.author == r2.author == model.User.get(1)
def test_author_copy_with_overrides(self):
model.Recipe(
name='Rocky Mountain River IPA',
author=model.User()
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate({
'author': model.User()
})
model.commit()
assert model.Recipe.query.count() == 2
assert model.User.query.count() == 2
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert r1.author and r2.author
assert r1.author != r2.author
def test_style_copy(self):
model.Recipe(
name='Rocky Mountain River IPA',
style=model.Style(name=u'American IPA')
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate()
model.commit()
assert model.Recipe.query.count() == 2
assert model.Style.query.count() == 1
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert r1.style == r2.style == model.Style.get(1)
def test_style_copy_with_overrides(self):
model.Recipe(
name='Rocky Mountain River IPA',
style=model.Style(name=u'American IPA')
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate({
'style': model.Style(name=u'Baltic Porter')
})
model.commit()
assert model.Recipe.query.count() == 2
assert model.Style.query.count() == 2
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert r1.style and r2.style
assert r1.style != r2.style
def test_slugs_copy(self):
model.Recipe(
name='Rocky Mountain River IPA',
slugs=[
model.RecipeSlug(slug=u'rocky-mountain-river-ipa'),
model.RecipeSlug(slug=u'my-favorite-ipa')
]
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate()
model.commit()
assert model.Recipe.query.count() == 2
assert model.RecipeSlug.query.count() == 4
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert len(r1.slugs) == len(r2.slugs) == 2
assert r1.slugs[0] != r2.slugs[0]
assert r1.slugs[0].slug == r2.slugs[
0].slug == 'rocky-mountain-river-ipa'
assert r1.slugs[1] != r2.slugs[1]
assert r1.slugs[1].slug == r2.slugs[1].slug == 'my-favorite-ipa'
def test_slugs_copy_with_overrides(self):
model.Recipe(
name='Rocky Mountain River IPA',
slugs=[
model.RecipeSlug(slug=u'rocky-mountain-river-ipa'),
model.RecipeSlug(slug=u'my-favorite-ipa')
]
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate({
'slugs': [model.RecipeSlug(slug='custom-slug')]
})
model.commit()
assert model.Recipe.query.count() == 2
assert model.RecipeSlug.query.count() == 3
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert len(r1.slugs) == 2
assert len(r2.slugs) == 1
assert r1.slugs[0].slug == 'rocky-mountain-river-ipa'
assert r1.slugs[1].slug == 'my-favorite-ipa'
assert r2.slugs[0].slug == 'custom-slug'
def test_fermentation_steps_copy(self):
model.Recipe(
name='Rocky Mountain River IPA',
fermentation_steps=[
model.FermentationStep(
step='PRIMARY',
days=14,
fahrenheit=65
),
model.FermentationStep(
step='SECONDARY',
days=90,
fahrenheit=45
)
]
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate()
model.commit()
assert model.Recipe.query.count() == 2
assert model.FermentationStep.query.count() == 4
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert len(r1.fermentation_steps) == len(r2.fermentation_steps) == 2
assert r1.fermentation_steps[
0].step == r2.fermentation_steps[0].step == 'PRIMARY'
assert r1.fermentation_steps[
0].days == r2.fermentation_steps[0].days == 14
assert r1.fermentation_steps[
0].fahrenheit == r2.fermentation_steps[0].fahrenheit == 65
assert r1.fermentation_steps[
1].step == r2.fermentation_steps[1].step == 'SECONDARY'
assert r1.fermentation_steps[
1].days == r2.fermentation_steps[1].days == 90
assert r1.fermentation_steps[
1].fahrenheit == r2.fermentation_steps[1].fahrenheit == 45
def test_fermentation_steps_copy_with_override(self):
model.Recipe(
name='Rocky Mountain River IPA',
fermentation_steps=[
model.FermentationStep(
step='PRIMARY',
days=14,
fahrenheit=65
),
model.FermentationStep(
step='SECONDARY',
days=90,
fahrenheit=45
)
]
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate({
'fermentation_steps': [model.FermentationStep(
step='PRIMARY',
days=21,
fahrenheit=75
)]
})
model.commit()
assert model.Recipe.query.count() == 2
assert model.FermentationStep.query.count() == 3
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert len(r1.fermentation_steps) == 2
assert len(r2.fermentation_steps) == 1
assert r2.fermentation_steps[0].step == 'PRIMARY'
assert r2.fermentation_steps[0].days == 21
assert r2.fermentation_steps[0].fahrenheit == 75
def test_additions_copy(self):
recipe = model.Recipe(name=u'Sample Recipe')
grain = model.Fermentable()
primary_hop = model.Hop()
bittering_hop = model.Hop()
yeast = model.Yeast()
recipe.additions = [
model.RecipeAddition(
use='MASH',
fermentable=grain
),
model.RecipeAddition(
use='MASH',
hop=primary_hop
),
model.RecipeAddition(
use='FIRST WORT',
hop=primary_hop
),
model.RecipeAddition(
use='BOIL',
hop=primary_hop,
),
model.RecipeAddition(
use='POST-BOIL',
hop=primary_hop
),
model.RecipeAddition(
use='FLAME OUT',
hop=bittering_hop
),
model.RecipeAddition(
use='PRIMARY',
yeast=yeast
),
model.RecipeAddition(
use='SECONDARY',
yeast=yeast
)
]
model.commit()
assert model.Recipe.query.count() == 1
assert model.RecipeAddition.query.count() == 8
assert model.Fermentable.query.count() == 1
assert model.Hop.query.count() == 2
assert model.Yeast.query.count() == 1
recipe = model.Recipe.query.first()
recipe.duplicate()
model.commit()
assert model.Recipe.query.count() == 2
assert model.RecipeAddition.query.count() == 16
assert model.Fermentable.query.count() == 1
assert model.Hop.query.count() == 2
assert model.Yeast.query.count() == 1
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert len(r1.additions) == len(r2.additions) == 8
for f in model.Fermentable.query.all():
assert f in [a.ingredient for a in r1.additions]
assert f in [a.ingredient for a in r2.additions]
assert len(set([a.recipe for a in f.additions])) == 2
for h in model.Hop.query.all():
assert h in [a.ingredient for a in r1.additions]
assert h in [a.ingredient for a in r2.additions]
assert len(set([a.recipe for a in h.additions])) == 2
for y in model.Yeast.query.all():
assert y in [a.ingredient for a in r1.additions]
assert y in [a.ingredient for a in r2.additions]
assert len(set([a.recipe for a in y.additions])) == 2
def test_additions_copy_with_overrides(self):
recipe = model.Recipe(name=u'Sample Recipe')
grain = model.Fermentable()
primary_hop = model.Hop()
bittering_hop = model.Hop()
yeast = model.Yeast()
recipe.additions = [
model.RecipeAddition(
use='MASH',
fermentable=grain
),
model.RecipeAddition(
use='MASH',
hop=primary_hop
),
model.RecipeAddition(
use='FIRST WORT',
hop=primary_hop
),
model.RecipeAddition(
use='BOIL',
hop=primary_hop,
),
model.RecipeAddition(
use='POST-BOIL',
hop=primary_hop
),
model.RecipeAddition(
use='FLAME OUT',
hop=bittering_hop
),
model.RecipeAddition(
use='PRIMARY',
yeast=yeast
),
model.RecipeAddition(
use='SECONDARY',
yeast=yeast
)
]
model.commit()
assert model.Recipe.query.count() == 1
assert model.RecipeAddition.query.count() == 8
assert model.Fermentable.query.count() == 1
assert model.Hop.query.count() == 2
assert model.Yeast.query.count() == 1
recipe = model.Recipe.query.first()
recipe.duplicate({
'additions': [model.RecipeAddition(
use='MASH',
fermentable=model.Fermentable.query.first()
)]
})
model.commit()
assert model.Recipe.query.count() == 2
assert model.RecipeAddition.query.count() == 9
assert model.Fermentable.query.count() == 1
assert model.Hop.query.count() == 2
assert model.Yeast.query.count() == 1
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert len(r1.additions) == 8
assert len(r2.additions) == 1
assert r2.additions[0].fermentable == model.Fermentable.query.first()
class TestDrafts(TestModel):
def test_draft_creation(self):
model.Recipe(
type='MASH',
name='Rocky Mountain River IPA',
gallons=5,
boil_minutes=60,
notes=u'This is my favorite recipe.',
state=u'PUBLISHED'
)
model.commit()
model.Recipe.query.first().draft()
model.commit()
assert model.Recipe.query.count() == 2
source = model.Recipe.query.filter(
model.Recipe.published_version == null()
).first() # noqa
draft = model.Recipe.query.filter(
model.Recipe.published_version != null()
).first() # noqa
assert source != draft
assert source.type == draft.type == 'MASH'
assert source.name == draft.name == 'Rocky Mountain River IPA'
assert source.state != draft.state
assert draft.state == 'DRAFT'
assert draft.published_version == source
assert source.current_draft == draft
def test_draft_merge(self):
source = model.Recipe(
type='MASH',
name='Rocky Mountain River IPA',
gallons=5,
boil_minutes=60,
notes=u'This is my favorite recipe.',
state=u'PUBLISHED'
)
source.flush()
primary_key = source.id
creation_date = source.creation_date
model.commit()
# Make a new draft of the recipe
model.Recipe.query.first().draft()
model.commit()
assert model.Recipe.query.count() == 2
# Make a change to the draft
draft = model.Recipe.query.filter(
model.Recipe.state == 'DRAFT').first()
draft.name = 'Simcoe IPA'
draft.slugs = [model.RecipeSlug(name='simcoe-ipa')]
draft.gallons = 10
draft.boil_minutes = 90
draft.notes = u'This is a modified recipe'
model.commit()
# Merge the draft back into its origin recipe.
draft = model.Recipe.query.filter(
model.Recipe.state == 'DRAFT').first()
draft.publish()
model.commit()
# Make sure the remaining version is the (newly saved) draft
assert model.Recipe.query.count() == 1
assert model.RecipeSlug.query.count() == 1
published = model.Recipe.query.first()
assert published.id == primary_key
assert published.name == 'Simcoe IPA'
assert published.state == 'PUBLISHED' # not modified
assert published.creation_date == creation_date # not modified
assert len(published.slugs) == 1
assert published.slugs[0].slug == 'simcoe-ipa'
assert published.gallons == 10
assert published.boil_minutes == 90
assert published.notes == u'This is a modified recipe'
def test_draft_merge_style(self):
model.Recipe(
name='Rocky Mountain River IPA',
style=model.Style(name='American IPA'),
state=u'PUBLISHED'
)
model.commit()
# Make a new draft of the recipe
model.Recipe.query.first().draft()
model.commit()
assert model.Recipe.query.count() == 2
# Change the style of the draft
draft = model.Recipe.query.filter(
model.Recipe.state == 'DRAFT').first()
draft.style = model.Style(name='Baltic Porter')
model.commit()
# Merge the draft back into its origin recipe.
draft = model.Recipe.query.filter(
model.Recipe.state == 'DRAFT').first()
draft.publish()
model.commit()
# Make sure the remaining version is the (newly saved) draft
assert model.Recipe.query.count() == 1
assert model.Style.query.count() == 2
published = model.Recipe.query.first()
assert published.style == model.Style.get_by(name='Baltic Porter')
def test_draft_merge_fermentation_steps(self):
model.Recipe(
name='Rocky Mountain River IPA',
fermentation_steps=[
model.FermentationStep(
step='PRIMARY',
days=14,
fahrenheit=65
),
model.FermentationStep(
step='SECONDARY',
days=90,
fahrenheit=45
)
],
state=u'PUBLISHED'
)
model.commit()
| |
<reponame>mike-sino/ns2.35-with-docker
#!/usr/bin/python
"""This module contains objects that help generate wireless networks to be fed
into ns2. It supports generating random topologies, shortest path routing via
Dijkstra's algorithm and outputting the network data in ns2 format."""
import sys
import math
import random
class Node( object ):
"""This class represents a single wireless node. The important
attributes are the x and y coordinates. The others are designed to be
internal attributes used for routing."""
__slots__ = ('x','y','neighbours', 'shortestPathLength', 'shortestPaths', 'routes')
def __init__( self, x, y ):
'''Creates a new Node located at the specified x and y coordinates.'''
self.x = x
self.y = y
self.neighbours = None
self.shortestPathLength = -1
self.shortestPaths = None
# This maps destinations to lists of paths
self.routes = {}
#~ def __cmp__( self, other ):
#~ if other == None:
#~ return 1
#~ val = cmp( self.x, other.x )
#~ if ( val != 0 ): return val
#~ return cmp( self.y, other.y )
#~ def __hash__( self ):
#~ return hash( self.x ) ^ hash( self.y )
def distance( self, other ):
"""Returns the distance from this node to another node."""
diffX = self.x - other.x
diffY = self.y - other.y
return math.sqrt( diffX*diffX + diffY*diffY )
class Path( object ):
"""Represents a path of connected nodes. The node instances can be
directly accessed via the self.path list."""
__slots__ = ('path','visited')
def __init__( self, source ):
self.path = [ source ]
self.visited = { source: 1 }
#~ self.neighbours = None
def __cmp__( self, other ):
if other == None:
return 1
return cmp( self.path, other.path )
def __len__( self ):
return len( self.path )
def append( self, node ):
assert( not self.visited.has_key( node ) )
self.visited[node] = 1
self.path.append( node )
def isNodeDisjoint( self, other ):
'''Returns True if this path does not share any nodes with the
other path. It returns False if it shares at least one node.'''
for node in self.path:
if node in other.visited:
return False
return True
def clone( self ):
"""Returns a clone of this object with new instances for the
first level variables (not a recursive copy)."""
shallowCopy = Path( None )
shallowCopy.path = list( self.path )
shallowCopy.visited = dict( self.visited )
return shallowCopy
def reverse( self ):
"""Return a clone of this path in reverse order."""
backwards = self.clone()
#~ print "before", backwards.path
backwards.path.reverse()
#~ print "after", backwards.path
return backwards
def distance( self ):
"""Returns the total distance of the path, computed as the
sum of the distances between each node."""
last = self.path[0]
distance = 0
for p in self.path[1:]:
distance += last.distance( p )
last = p
return distance
def source( self ):
return self.path[0]
def last( self ):
return self.path[-1]
def unvisitedPaths( self ):
unvisitedPaths = []
for neighbour in self.path[-1].neighbours:
if not self.visited.has_key( neighbour ):
# Unvisited neighbour: make a copy of this path and append the neighbour
c = self.clone()
c.append( neighbour )
unvisitedPaths.append( c )
return unvisitedPaths
class ListSubclassFifo(list):
"""A list subclass that provides better performance for enqueue and
dequeue operations.
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68436
- Constant time enqueue and dequeue
- Has a higher constant than the list
- Only faster if you are dequeuing from lists with more than ~1000 items"""
__slots__ = ('front',)
def __init__(self):
self.front = []
def enqueue(self, elt):
self.append( elt )
def dequeue(self):
if not self.front:
self.reverse()
self.front = self[:]
self[:] = []
return self.front.pop()
def __iter__(self):
# Iterate until and index exception is thrown by dequeue
try:
while 1:
yield self.dequeue()
except IndexError:
pass
def __len__(self):
return list.__len__( self ) + len( self.front )
def dequeueEnd( self ):
if not list.__len__( self ):
self.front.reverse()
self[:] = self.front
self.front = []
return self.pop()
def peekAtEnd( self ):
if list.__len__( self ):
return self[-1]
else:
return self.front[0]
class HeuristicPath2( Path ):
"""This Path subclass is used to make Dijkstra's algorithm run a bit faster."""
def append( self, node ):
Path.append( self, node )
# Add the neighbours of the last node to the visited list
for neighbour in self.path[-2].neighbours:
self.visited[neighbour] = 1
# Add the 2nd hop neighbours of the 2nd last node to the visited list
if len( self.path ) >= 3:
for neighbour in self.path[-3].neighbours:
for hopNeighbour in neighbour.neighbours:
self.visited[hopNeighbour] = 1
def clone( self ):
shallowCopy = HeuristicPath2( None )
shallowCopy.path = list( self.path )
shallowCopy.visited = dict( self.visited )
return shallowCopy
def randomNetwork( size, numNodes=None, density=None ):
"""Generates a random connected network, and computes the neighbour information."""
if numNodes == None and density == None:
raise ValueError, "Must specifiy one of numNodes OR density"
if numNodes != None and density != None:
raise ValueError, "Must specify only one of numNodes OR density, not both"
if ( numNodes == None ):
numNodes = int( size[0]*size[1]*density + 0.5 )
else:
density = float(size[0]*size[1])/numNodes
network = Network( size )
while numNodes > 0:
#~ print "Placing %d nodes ..." % (numNodes)
# Randomly place the nodes
for i in xrange( numNodes ):
# Place a random node
network.addNode( random.uniform( 0, size[0] ), random.uniform( 0, size[1] ) )
# Replace any that aren't connected to other nodes
numNodes = 0
for node in network.unconnectedNodes():
network.nodes.remove( node )
numNodes += 1
# No lonely nodes: make sure the graph is connected
if numNodes == 0:
maxConnected = network.maximumConnectedSet()
if len( maxConnected ) != len( network.nodes ):
#~ print "Disconnected graph. Keeping largest connected subset"
# We have a disconnected graph: Remove all but the largest connected subset
for node in network.nodes:
if not node in maxConnected:
network.nodes.remove( node )
numNodes += 1
return network
class Network:
"""Represents a collection of nodes."""
def __init__( self, size, range = 250 ):
"""size - A tuple of the (x, y) dimensions of the network terrain.
range - The maximum distance between connected nodes."""
if size[0] <= 0:
raise ValueError, "size[0] must be greater than zero"
if size[1] <= 0:
raise ValueError, "size[1] must be greater than zero"
if range <= 0:
raise ValueError, "range must be greater than zero"
self.size = size
self.nodes = []
self.range = range
self.neighboursFound = False
def addNode( self, x, y ):
# Check that the node is within the bounds of the network
if not (0 <= x and x <= self.size[0]):
raise ValueError, "x coordinate (%f) out of range [0, %f]" % ( x, self.size[0] )
if not (0 <= y and y <= self.size[1]):
raise ValueError, "y coordinate (%f) out of range [0, %f]" % ( y, self.size[1] )
id = len( self.nodes )
self.nodes.append( Node( x, y ) )
self.neighboursFound = False
return id
def findNeighbours( self ):
"""Recalculates the neighbour lists for each node."""
for node in self.nodes:
node.neighbours = []
for id, node in enumerate( self.nodes ):
# Iterate through all the *remaining* nodes
# this way we never check any pair of nodes more than once
for other in self.nodes[id+1:]:
# The nodes are in range of each other: add them as neighbours
if ( node.distance( other ) <= self.range ):
node.neighbours.append( other )
other.neighbours.append( node )
self.neighboursFound = True
def unconnectedNodes( self ):
if not self.neighboursFound:
self.findNeighbours()
return [ node for node in self.nodes if len( node.neighbours ) == 0 ]
def maximumConnectedSet( self ):
if not self.neighboursFound:
self.findNeighbours()
visited = {}
maximumConnectedSet = {}
# Locate the largest connected subset of nodes
while len( visited ) < len( self.nodes ):
connectedSet = {}
nodeStack = []
# Select an unvisited node
# This could be improved by changing "visited" to be "unvisited"
for node in self.nodes:
if not node in visited:
connectedSet[node] = 1
nodeStack.append( node )
break
assert( len( nodeStack ) > 0 )
while len( nodeStack ):
current = nodeStack.pop()
for neighbour in current.neighbours:
if not neighbour in connectedSet:
# We found a new neighbour: add it to the set
# And visit it eventually
connectedSet[neighbour] = 1
nodeStack.append( neighbour )
# If this connected subset is larger than previous ones, set it as the maximum
if len( maximumConnectedSet ) < len( connectedSet ):
maximumConnectedSet = connectedSet
for key in connectedSet.keys():
visited[key] = 1
return maximumConnectedSet.keys()
def findShortestPaths( self, source, destination, extraHopLimit = 0 ):
"""Finds all the shortest paths from source index node to the
destination index nodes. It will also return paths with up to
extraHopLimit hops past the shortest path."""
if not self.neighboursFound:
self.findNeighbours()
paths = []
shortestPathLength = -1
queue = ListSubclassFifo()
queue.enqueue( Path( self.nodes[source] ) )
#~ maxQueueLen = 0
#~ queueLenSum = 0
#~ queueLenCounts = 0
# Iterates efficiently (i hope) through the queue
for current in queue:
#~ if len( queue ) > maxQueueLen:
#~ maxQueueLen = len( queue )
#~ queueLenSum += len( queue )
#~ queueLenCounts += 1
#~ print "Currently at Node", current.last().x, current.last().y
if ( current.last() == self.nodes[destination] ):
# We found a valid path: add it
paths.append( current )
if ( shortestPathLength == -1 ):
# This is BFS, so the first path we find is the shortest
shortestPathLength = len( current )
# The comparison is less than because it will need one more
elif ( shortestPathLength == -1 or len( current ) < shortestPathLength + extraHopLimit ):
# If any other paths will be within the length limit, add to the queue (keep searching)
newPaths = current.unvisitedPaths()
for path in newPaths:
if destination in path.visited:
print "Destination is excluded from this path"
queue.extend( newPaths )
#~ print "Queue Length Avg = %f Max = %d" % ( float( queueLenSum ) / queueLenCounts, maxQueueLen )
return paths
def findShortestPathsHeuristic( self, | |
#!/usr/bin/env python
# coding: utf-8
import scipy
import h5py
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.coordinates import FK5
import astropy.units as u
import healpy as hp
import numpy as np
from numpy import sin,cos,pi
from scipy.integrate import quad
import matplotlib.pyplot as plt
import scipy.constants as C
import healpy as hp
import h5py
import scipy.optimize as optimize
from scipy.integrate import quad
#from matplotlib import cm
from pylab import cm
import time
#python wrapping fortran code about ne2001 model
#import pyne2001
#here produce the hangqizhi diffuse sky map kelvin value after smooth
# import diffuse map from diffuse.hdf5 produced by index_ssm.py by huangqz
#read catalog
from caput import mpiutil
from LFSM.fitting_params.save_fit_params import free_free
#from Smooth.least_sq_fit_params import free_free
#import "./F2py_file"
from LFSM.I_E_term.I_E_equation import I_E
from LFSM.interpolate_sky.interpolate_sky_map import produce_index
import ctypes as ct
import numpy as np
import os
_path = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.append(_path)
_path = os.path.split(_path)[0]
file_dir = _path
print ('_path',file_dir + '/NE2001_4python/src_NE2001/libNE2001.so')
# import the dll
libNE2001 = ct.CDLL('/public/home/wufq/congyanping/Software/NE2001_4python/src.NE2001/libNE2001.so')
# max integrated distance (kpc)
dist = 50.
class absorption_JRZ(object):
def __init__(self, v, nside, clumping_factor, index_type, distance,emi_form,I_E_form,R0_R1_equal,using_raw_diffuse,test, only_fit_Anu):
self.v = v
self.nside = nside
self.clumping_factor = clumping_factor
self.test = test
self.index_type = index_type
self.dist = distance
self.emi_form = emi_form
self.I_E_form = I_E_form
self.R0_R1_equal = R0_R1_equal
self.using_raw_diffuse = using_raw_diffuse
self.only_fit_Anu = only_fit_Anu
Te = 8000
f = produce_index(Nside = self.nside, freq = self.v, index_type = self.index_type, I_E_form = self.I_E_form)
self.Beta_G = f.pixel_dependence_index_minus_I_E()
f = produce_index(Nside = self.nside, freq = self.v, index_type = self.index_type, I_E_form = self.I_E_form)
self.Beta_G_constant = f.constant_index_minus_I_E()
def Fortran2Py_optical_deepth(self, l, b, Te = 8000):
v = self.v * 1e6 #v in MHz
rad=57.2957795
#radian per degree
#distance equals 50kpc
#dist=50.0
if self.test == True:
step = 0.1
else:
step = 0.01
N =np.int(dist/step)
#print 'N',N
nd = ct.pointer( ct.c_int(N) ) # setup the pointer
em1D = np.arange(0, N, dtype=np.float32) # setup the N-long
l_rad = l / rad #now its radian unit
b_rad = b / rad
_ = libNE2001.dmdsm1_(nd, ct.pointer( ct.c_float(l_rad) ), ct.pointer( ct.c_float(b_rad) ), ct.pointer( ct.c_float(dist) ), np.ctypeslib.as_ctypes(em1D))
#EM = pyne2001.get_dm_full(l, b, r)['EM']
Tao_mw = 3.28*1e-7 * (Te/1e4)**-1.35 * (v * 1e-9)**-2.1 * em1D
#print 'Tao_mw',Tao_mw
return Tao_mw
def raw_pyne2001_optical_deepth(self, r, l, b, Te = 8000):
v = self.v * 1e6
EM = pyne2001.get_dm_full(l, b, r)['EM']
Tao_mw = 3.28*1e-7 * (Te/1e4)**-1.35 * (v * 1e-9)**-2.1 * EM
return Tao_mw
def integrate_by_hand(self, f, a, b, args = [], dx=0.01):
if self.test == True:
dx = 0.1
step = dx
else:
dx = 0.01
step = dx
tao = self.Fortran2Py_optical_deepth(args[0], args[1])
i = a
s = 0
##I_E = args[3][-1]
I_E = self.I_E(self.v)
while i <= b:
index_ = np.int(i / step - 1)
s += (f(i,args[0],args[1],args[2],args[3]) * np.exp(-tao[index_])) * dx
i += dx
#here find the bug
s = s + I_E*np.exp(-tao[-1])
return s
def Quad(self, f, a, b, args = [], dx=0.01):
#the different to integrate_by_hand is not including I_E
if self.test == True:
dx = 0.1
step = dx
else:
dx = 0.01
step = dx
tao = self.Fortran2Py_optical_deepth(args[0], args[1])
i = a
s = 0
while i <= b:
index_ = np.int(i / step - 1)
s += (f(i,args[0],args[1],args[2],args[3]) * np.exp(-tao[index_])) * dx
i += dx
#here find the bug
s = s
return s
def integrate_by_hand_unabsorb(self, f, a, b, args = [], dx=0.01):
i = a
s = 0
while i <= b:
s += f(i,args[0],args[1],args[2],args[3]) * dx
i += dx
return s
def integrate_by_hand_low_resolution(self, f, a, b, args = [], dx=0.1):
i = a
s = 0
while i <= b:
s += f(i,args[0],args[1],args[2],args[3]) * dx
i += dx
return s
def split_array(self, container, count):
#return [container[_i::count] for _i in range(count)]
return np.split(container, count)
def gaussian(self, x, mu = 8.5, sigma = 1.33333):
f = 1./np.sqrt(2*np.pi*sigma**2)* np.exp(-(x-mu)**2 / (2*sigma**2))
return f
def sech2(self,x):
return np.square(2/(np.exp(x) + np.exp(-x)))
def I_E(self, v):
f = I_E(v,self.I_E_form)
result = f.I_E()
return result
def _new(self, r, l, b, delt_m, params):
if self.R0_R1_equal == True:
param = params
A_v = param[0]
R_0 = param[1]
R_2 = 0.1
alpha = param[2]
R_1 = param[1]
#beta = param[3]
beta = 1
Z_0 = param[3]
gamma = param[4]
if self.R0_R1_equal == False:
param = params
A_v = param[0]
R_0 = param[1]
alpha = param[2]
R_1 = param[3]
beta = param[4]
Z_0 = param[5]
gamma = param[6]
if self.only_fit_Anu == True:
param = params
A_v = param[0]
R_0 = param[1]
R_2 = 0.1
alpha = param[2]
R_1 = param[1]
#beta = param[3]
beta = 1
Z_0 = param[3]
gamma = param[4]
#I_E = param[7]
r0 = 8.5
l_rad = l * np.pi/180.
b_rad = b * np.pi/180.
"""
x = r * np.sin(np.pi/2. - b_rad) * np.cos(l_rad)
y = r * np.sin(np.pi/2. - b_rad) * np.sin(l_rad)
z = r * np.cos(np.pi/2. - b_rad)
x_1 = x - 8.5
y_1 = y
z_1 = z
r_1 = np.sqrt(np.square(x_1) + np.square(y_1) + np.square(z_1))
b_1 = np.pi/2.0 - np.arccos(z_1/r_1)
l_1 = np.arctan(y_1/x_1)
#R = r_1
R = np.sqrt(r_1**2 - z**2)
Z = r_1 * np.sin(b_1)
"""
R = np.sqrt(8.5**2 + (r*np.cos(b_rad))**2 -2*8.5*(r*np.cos(b_rad))*np.cos(l_rad))
Z = r * np.sin(b_rad)
########ne = (R/(R_0+0.1))**alpha * a * np.exp(-np.abs(Z) * 2/(B+0.1) - 2*(r_1/(20*c + 0.1))**2) + D
#emissivity = A_v * (R/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
if self.index_type == 'pixel_dependence_index_minus_I_E':
pix_number = hp.ang2pix(self.nside, l, b, lonlat = True)
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)*(self.v/408.)**self.Beta_G[pix_number]
elif self.index_type == 'constant_index_minus_I_E':
if int(self.v) == int(408):
if self.emi_form == 'exp':
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
if self.emi_form == 'sech2':
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * self.sech2(-(np.abs(Z)/Z_0)**gamma)
else:
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)*(self.v/408.)**self.Beta_G_constant[0]
else:
if self.emi_form == 'exp':
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
if self.emi_form == 'sech2':
emissivity = A_v * ((R+R_2)/R_0)**alpha * np.exp(-(R/R_1)**beta) * self.sech2(-(np.abs(Z)/Z_0)**gamma)
j_RZ = emissivity #+ delt_m/dist) #* np.exp(-tao[index])
return j_RZ
def critical_distance(self,l,b,delt_m,params):
import scipy.optimize as so
#import scipy.integrate as integrate
#bug report : the lower limit is from 0.01 not 0
value = 0.5 * self.Quad(self._new, 0.01, 50,args=(l,b,delt_m,params))
def func(x,l,b,delt_m,params):
return self.Quad(self._new, 0.01, x,args=(l,b,delt_m,params)) - value
#sol = so.fsolve(func,np.array([1]),args=(l,b,delt_m,params),xtol=1,maxfev=1000)
sol = 0
Y = []
for i in np.arange(0.01,50,0.01):
result = self.Quad(self._new,0.01,i,args=(l,b,delt_m,params)) - value
Y.append(result)
Y = list(np.abs(Y))
container = np.arange(0.01,50,0.01)
index = Y.index(min(Y))
sol = container[index]
#if np.abs(result) < 100:
# sol = i
# break
#print 'begin_crital', func(sol,l,b,delt_m,params),'end_critical','sol',sol,'min',min(np.abs(Y)),'index',index
return sol
def _new_unabsorb(self, r, l, b, delt_m, params):
param = params
A_v = param[0]
R_0 = param[1]
alpha = param[2]
R_1 = param[3]
beta = param[4]
Z_0 = param[5]
gamma = param[6]
I_E = param[7]
r0 = 8.5
l_rad = l * np.pi/180.
b_rad = b * np.pi/180.
x = r * np.sin(np.pi/2. - b_rad) * np.cos(l_rad)
y = r * np.sin(np.pi/2. - b_rad) * np.sin(l_rad)
z = r * np.cos(np.pi/2. - b_rad)
x_1 = x - 8.5
y_1 = y
z_1 = z
r_1 = np.sqrt(np.square(x_1) + np.square(y_1) + np.square(z_1))
b_1 = np.pi/2.0 - np.arccos(z_1/r_1)
l_1 = np.arctan(y_1/x_1)
R = r_1
Z = r_1 * np.sin(b_1)
########ne = (R/(R_0+0.1))**alpha * a * np.exp(-np.abs(Z) * 2/(B+0.1) - 2*(r_1/(20*c + 0.1))**2) + D
#emissivity = A_v * (R/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma)
if self.emi_form == 'exp':
emissivity = A_v * (R/R_0)**alpha * np.exp(-(R/R_1)**beta) * np.exp(-(np.abs(Z)/Z_0)**gamma) + I_E
if self.emi_form == 'sech2':
emissivity = A_v * (R/R_0)**alpha * np.exp(-(R/R_1)**beta) * self.sech2(-(np.abs(Z)/Z_0)**gamma) + I_E
j_RZ = emissivity #+ delt_m/dist) #* np.exp(-tao[index])
return j_RZ
def raw_new_absorb(self, r, l, b, delt_m, params):
param = params
A_v = param[0]
R_0 = param[1]
alpha = param[2]
R_1 = param[3]
beta = param[4]
Z_0 = param[5]
gamma = param[6]
r0 = 8.5
l_rad = l * np.pi/180.
b_rad = b * np.pi/180.
x = r * np.sin(np.pi/2. - b_rad) * np.cos(l_rad)
y = r * np.sin(np.pi/2. - b_rad) * np.sin(l_rad)
z = r * np.cos(np.pi/2. - b_rad)
x_1 = x - 8.5
y_1 = y
z_1 = z
r_1 = np.sqrt(np.square(x_1) + np.square(y_1) + np.square(z_1))
b_1 | |
<reponame>mahyar-osn/mapclientplugins.lungmodelstep<filename>mapclientplugins/lungmodelstep/model/meshmodel.py
import os, platform, time
from scaffoldmaker.utils.zinc_utils import *
from opencmiss.zinc.graphics import Graphics
from opencmiss.zinc.field import Field
from opencmiss.utils.maths import vectorops
from opencmiss.zinc.status import OK as ZINC_OK
from mapclientplugins.lungmodelstep.fields.nodes import Nodes as LungNodes
class MeshModel(object):
def __init__(self, regions, materialModule, context):
self._logger = context.getLogger()
self._path = self.getPluginPath()
self._leftRegion = regions['leftRegion']
self._rightRegion = regions['rightRegion']
self._leftAirwayRegion = regions['leftAirwayRegion']
self._rightAirwayRegion = regions['rightAirwayRegion']
self._leftArteryRegion = regions['leftArteryRegion']
self._rightArteryRegion = regions['rightArteryRegion']
self._leftVeinRegion = regions['leftVeinRegion']
self._rightVeinRegion = regions['rightVeinRegion']
self._initializeLeftLung()
self._initializeRightLung()
self._initializeLeftAirway()
self._initializeRightAirway()
self._initializeLeftArtery()
self._initializeRightArtery()
self._initializeLeftVein()
self._initializeRightVein()
self._leftMesh = None
self._rightMesh = None
self._elemGroups = {'leftUpperLobe': (63, 64, 69, 70, 75, 76, 80, 81, 85, 86, 87, 89, 90, 91, 93, 94, 96, 97,
98, 99, 101, 106, 111, 112, 113, 114, 115, 116, 117, 118),
'leftLowerLobe': (65, 66, 67, 71, 72, 73, 77, 78, 82, 83, 102, 103, 104, 107, 108, 109,
111, 112, 113, 114, 115, 116, 117, 118),
'rightUpperLobe': (23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 55, 56,
57, 58, 59, 60, 61, 62),
'rightMiddleLobe': (1, 2, 7, 8, 13, 14, 18, 19, 39, 40, 45, 46, 51, 52, 53, 54, 55, 56, 57,
58),
'rightLowerLobe': (3, 4, 5, 6, 9, 10, 11, 12, 15, 16, 17, 20, 21, 22, 41, 42, 43, 44, 47,
48, 49, 50, 51, 52, 53, 54, 59, 60, 61, 62)}
self._materialModule = materialModule
# self._settings = {'leftUpperLobe': True,
# 'leftLowerLobe': True,
# 'rightUpperLobe': True,
# 'rightMiddleLobe': True,
# 'rightLowerLobe': True,
# 'displaySurfaceLeft': True,
# 'displaySurfaceRight': True,
# 'displayLAirway': False,
# 'displayRAirway': False,
# 'displayLArtery': False,
# 'displayRArtery': False,
# 'displayLVein': False,
# 'displayRVein': False}
self._settings = {'displaySurfaceLeft': True,
'displaySurfaceRight': True,
'displayLAirway': False,
'displayRAirway': False,
'displayLArtery': False,
'displayRArtery': False,
'displayLVein': False,
'displayRVein': False}
self._generateMesh()
self._generateLeftAirway()
self._generateRightAirway()
self._generateLeftArtery()
self._generateRightArtery()
self._generateLeftVein()
self._generateRightVein()
self._nodes = LungNodes()
def _getVisibility(self, graphicsName):
return self._settings[graphicsName]
def _setVisibility(self, graphicsName, show):
self._settings[graphicsName] = show
if 'displaySurfaceLeft' in graphicsName:
graphics = self._leftRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
if 'displaySurfaceRight' in graphicsName:
graphics = self._rightRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
if 'LAirway' in graphicsName:
graphics = self._leftAirwayRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
if 'RAirway' in graphicsName:
graphics = self._rightAirwayRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
if 'LAirway' in graphicsName:
graphics = self._leftArteryRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
if 'LArtery' in graphicsName:
graphics = self._leftArteryRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
if 'RArtery' in graphicsName:
graphics = self._rightArteryRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
if 'LVein' in graphicsName:
graphics = self._leftVeinRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
if 'RVein' in graphicsName:
graphics = self._rightVeinRegion.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
def _initializeLeftLung(self):
self._initializeNodeAndElem('left_average', self._leftRegion)
def _initializeRightLung(self):
self._initializeNodeAndElem('right_average', self._rightRegion)
def _initializeLeftAirway(self):
self._initializeNodeAndElem('left_airway', self._leftAirwayRegion)
def _initializeRightAirway(self):
self._initializeNodeAndElem('right_airway', self._rightAirwayRegion)
def _initializeLeftArtery(self):
self._initializeNodeAndElem('left_artery', self._leftArteryRegion)
def _initializeRightArtery(self):
self._initializeNodeAndElem('right_artery', self._rightArteryRegion)
def _initializeLeftVein(self):
self._initializeNodeAndElem('left_vein', self._leftVeinRegion)
def _initializeRightVein(self):
self._initializeNodeAndElem('right_vein', self._rightVeinRegion)
def _initializeNodeAndElem(self, filename, region):
nodefile = filename+'.exnode'
elemfile = filename+'.exelem'
if platform.system() == 'Windows':
region.readFile(os.path.join('../', self._path, 'fields', nodefile).replace("\\","/"))
region.readFile(os.path.join('../', self._path, 'fields', elemfile).replace("\\","/"))
else:
region.readFile(os.path.join('../', self._path, 'fields', nodefile))
region.readFile(os.path.join('../', self._path, 'fields', elemfile))
def _generateMesh(self):
""" Left Lung """
self._leftScene = self._leftRegion.getScene()
fmLeft = self._leftRegion.getFieldmodule()
fmLeft.beginChange()
self._leftCoordinates = fmLeft.findFieldByName('coordinates')
self._leftMagnitude = fmLeft.createFieldMagnitude(self._leftCoordinates)
self._leftMagnitude.setName('leftmag')
self._leftMagnitude.setManaged(True)
""" Create upper and lower lobe groups """
# self._leftUpperLobe, self._leftUpperLobeMeshGroup = self._creteLobeGroup(fmLeft, 'leftUpperLobe')
# self._leftlowerLobe, self._leftlowerLobeMeshGroup = self._creteLobeGroup(fmLeft, 'leftLowerLobe')
fmLeft.endChange()
""" Right Lung """
self._rightScene = self._rightRegion.getScene()
fmRight = self._rightRegion.getFieldmodule()
fmRight.beginChange()
self._rightCoordinates = fmRight.findFieldByName('coordinates')
self._rightMagnitude = fmRight.createFieldMagnitude(self._rightCoordinates)
self._rightMagnitude.setName('rightmag')
self._rightMagnitude.setManaged(True)
""" Create upper and lower lobe groups """
# self._rightUpperLobe = self._creteLobeGroup(fmRight, 'rightUpperLobe')
# self._rightMiddleLobe = self._creteLobeGroup(fmRight, 'rightMiddleLobe')
# self._rightLowerLobe = self._creteLobeGroup(fmRight, 'rightLowerLobe')
fmRight.endChange()
self._setupScene(self._leftRegion, self._rightRegion)
def _generateLeftAirway(self):
""" Left Airway """
self._leftAirwayScene = self._leftAirwayRegion.getScene()
fmLeftAirway = self._leftAirwayRegion.getFieldmodule()
fmLeftAirway.beginChange()
self._leftAirwayCoordinates = fmLeftAirway.findFieldByName('coordinates')
self._leftAirwayMagnitude = fmLeftAirway.createFieldMagnitude(self._leftAirwayCoordinates)
self._leftAirwayMagnitude.setName('leftairwaymag')
self._leftAirwayMagnitude.setManaged(True)
fmLeftAirway.endChange()
leftAirwayScene = self._createScene(self._leftAirwayRegion)
leftAirwayScene.beginChange()
line = self._createLineGraphics(leftAirwayScene, self._leftAirwayCoordinates, 'displayLAirway', 'airway')
line.setRenderLineWidth(2)
leftAirwayScene.endChange()
graphics = self._leftAirwayRegion.getScene().findGraphicsByName('displayLAirway')
graphics.setVisibilityFlag(self._settings['displayLAirway'])
def _generateRightAirway(self):
""" Right Airway """
self._rightAirwayScene = self._rightAirwayRegion.getScene()
fmRightAirway = self._rightAirwayRegion.getFieldmodule()
fmRightAirway.beginChange()
self._rightAirwayCoordinates = fmRightAirway.findFieldByName('coordinates')
self._rightAirwayMagnitude = fmRightAirway.createFieldMagnitude(self._rightAirwayCoordinates)
self._rightAirwayMagnitude.setName('rightairwaymag')
self._rightAirwayMagnitude.setManaged(True)
fmRightAirway.endChange()
rightAirwayScene = self._createScene(self._rightAirwayRegion)
rightAirwayScene.beginChange()
line = self._createLineGraphics(rightAirwayScene, self._rightAirwayCoordinates, 'displayRAirway', 'airway')
line.setRenderLineWidth(2)
rightAirwayScene.endChange()
graphics = self._rightAirwayRegion.getScene().findGraphicsByName('displayRAirway')
graphics.setVisibilityFlag(self._settings['displayRAirway'])
def _generateLeftArtery(self):
""" Left Artery """
self._leftArteryScene = self._leftArteryRegion.getScene()
fmLeftArtery = self._leftArteryRegion.getFieldmodule()
fmLeftArtery.beginChange()
self._leftArteryCoordinates = fmLeftArtery.findFieldByName('coordinates')
self._leftArteryMagnitude = fmLeftArtery.createFieldMagnitude(self._leftArteryCoordinates)
self._leftArteryMagnitude.setName('leftarterymag')
self._leftArteryMagnitude.setManaged(True)
fmLeftArtery.endChange()
leftArteryScene = self._createScene(self._leftArteryRegion)
leftArteryScene.beginChange()
line = self._createLineGraphics(leftArteryScene, self._leftArteryCoordinates, 'displayLArtery', 'red')
line.setRenderLineWidth(2)
leftArteryScene.endChange()
graphics = self._leftArteryRegion.getScene().findGraphicsByName('displayLArtery')
graphics.setVisibilityFlag(self._settings['displayLArtery'])
def _generateRightArtery(self):
""" Right Artery """
self._rightArteryScene = self._rightArteryRegion.getScene()
fmRightArtery = self._rightArteryRegion.getFieldmodule()
fmRightArtery.beginChange()
self._rightArteryCoordinates = fmRightArtery.findFieldByName('coordinates')
self._rightArteryMagnitude = fmRightArtery.createFieldMagnitude(self._rightArteryCoordinates)
self._rightArteryMagnitude.setName('rightarterymag')
self._rightArteryMagnitude.setManaged(True)
fmRightArtery.endChange()
rightArteryScene = self._createScene(self._rightArteryRegion)
rightArteryScene.beginChange()
line = self._createLineGraphics(rightArteryScene, self._rightArteryCoordinates, 'displayRArtery', 'red')
line.setRenderLineWidth(2)
rightArteryScene.endChange()
graphics = self._rightArteryRegion.getScene().findGraphicsByName('displayRArtery')
graphics.setVisibilityFlag(self._settings['displayRArtery'])
def _generateLeftVein(self):
""" Left Vein """
self._leftVeinScene = self._leftVeinRegion.getScene()
fmLeftVein = self._leftVeinRegion.getFieldmodule()
fmLeftVein.beginChange()
self._leftVeinCoordinates = fmLeftVein.findFieldByName('coordinates')
self._leftVeinMagnitude = fmLeftVein.createFieldMagnitude(self._leftVeinCoordinates)
self._leftVeinMagnitude.setName('leftveinmag')
self._leftVeinMagnitude.setManaged(True)
fmLeftVein.endChange()
leftVeinScene = self._createScene(self._leftVeinRegion)
leftVeinScene.beginChange()
line = self._createLineGraphics(leftVeinScene, self._leftVeinCoordinates, 'displayLVein', 'blue')
line.setRenderLineWidth(2)
leftVeinScene.endChange()
graphics = self._leftVeinRegion.getScene().findGraphicsByName('displayLVein')
graphics.setVisibilityFlag(self._settings['displayLVein'])
def _generateRightVein(self):
""" Right Vein """
self._rightVeinScene = self._rightVeinRegion.getScene()
fmRightVein = self._rightVeinRegion.getFieldmodule()
fmRightVein.beginChange()
self._rightVeinCoordinates = fmRightVein.findFieldByName('coordinates')
self._rightVeinMagnitude = fmRightVein.createFieldMagnitude(self._rightVeinCoordinates)
self._rightVeinMagnitude.setName('rightveinmag')
self._rightVeinMagnitude.setManaged(True)
fmRightVein.endChange()
rightVeinScene = self._createScene(self._rightVeinRegion)
rightVeinScene.beginChange()
line = self._createLineGraphics(rightVeinScene, self._rightVeinCoordinates, 'displayRVein', 'blue')
line.setRenderLineWidth(2)
rightVeinScene.endChange()
graphics = self._rightVeinRegion.getScene().findGraphicsByName('displayRVein')
graphics.setVisibilityFlag(self._settings['displayRVein'])
def _creteLobeGroup(self, fm, name):
mesh = fm.findMeshByDimension(2)
group = self._createFieldGroup(fm, name)
elemGroup = self._createElementGroup(group, mesh)
meshGroup = elemGroup.getMeshGroup()
self._addSubElements(group)
el_iter = mesh.createElementiterator()
element = el_iter.next()
while element.isValid():
if element.getIdentifier() in self._elemGroups[name]:
meshGroup.addElement(element)
element = el_iter.next()
return group, meshGroup
def _createFieldGroup(self, fm, name):
field = fm.findFieldByName(name)
if field.isValid():
group = field.castGroup()
assert group.isValid(), 'Existing non-group field called ' + name
else:
group = fm.createFieldGroup()
group.setName(name)
group.setManaged(True)
return group
def _createElementGroup(self, grp, mesh):
elementGroup = grp.getFieldElementGroup(mesh)
if not elementGroup.isValid():
elementGroup = grp.createFieldElementGroup(mesh)
return elementGroup
def _addSubElements(self, grp):
from opencmiss.zinc.field import FieldGroup
grp.setSubelementHandlingMode(FieldGroup.SUBELEMENT_HANDLING_MODE_FULL)
fm = grp.getFieldmodule()
for dimension in range(1, 3):
mesh = fm.findMeshByDimension(dimension)
elementGroup = grp.getFieldElementGroup(mesh)
if elementGroup.isValid():
meshGroup = elementGroup.getMeshGroup()
meshGroup.addElementsConditional(elementGroup)
return None
def _setupScene(self, leftregion, rightregion):
""" Left Lung"""
leftScene = self._createScene(leftregion)
leftScene.beginChange()
line = self._createLineGraphics(leftScene, self._leftCoordinates, 'displayLinesLeft', 'transTissue')
line.setRenderLineWidth(2.5)
self._surfaceLeft = self._createSurfaceGraphics(leftScene, self._leftCoordinates, 'displaySurfaceLeft', 'solidTissue')
leftScene.endChange()
""" Right Lung"""
rightScene = self._createScene(rightregion)
rightScene.beginChange()
line = self._createLineGraphics(rightScene, self._rightCoordinates, 'displayLinesRight', 'transTissue')
line.setRenderLineWidth(2.5)
self._surfaceRight = self._createSurfaceGraphics(rightScene, self._rightCoordinates, 'displaySurfaceRight', 'solidTissue')
rightScene.endChange()
def _createScene(self, region):
return self.getScene(region)
def _createLineGraphics(self, scene, coordinates, name, color):
materialModule = self._materialModule
lines = scene.createGraphicsLines()
lines.setCoordinateField(coordinates)
lines.setName(name)
black = materialModule.findMaterialByName(color)
lines.setMaterial(black)
return lines
def _createSurfaceGraphics(self, scene, coordinates, name, color):
surface = scene.createGraphicsSurfaces()
surface.setCoordinateField(coordinates)
surface.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_SHADED)
surfacesMaterial = self._materialModule.findMaterialByName(color)
surface.setMaterial(surfacesMaterial)
surface.setName(name)
surface.setVisibilityFlag(self.isDisplaySurfaces(name))
return surface
# def setLeftUpperLobeGraphics(self):
# self._surfaceLeft.setSubgroupField(self._leftlowerLobe)
#
# def setLeftLowerLobeGraphics(self):
# self._surfaceLeft.setSubgroupField(self._leftUpperLobe)
#
# def setRightUpperLobeGraphics(self):
# self._surfaceRight.setSubgroupField(self._rightMiddleLobe)
# self._surfaceRight.setSubgroupField(self._rightLowerLobe)
#
# def setRightMiddleLobeGraphics(self):
# self._surfaceRight.setSubgroupField(self._rightUpperLobe)
# self._surfaceRight.setSubgroupField(self._rightLowerLobe)
#
# def setRighttLowerLobeGraphics(self):
# self._surfaceRight.setSubgroupField(self._rightUpperLobe)
# self._surfaceRight.setSubgroupField(self._rightMiddleLobe)
@staticmethod
def getScene(region):
return region.getScene()
@staticmethod
def getPluginPath():
if platform.system() == 'Windows':
return '/'.join(__file__.split('\\')[:-2])
else:
return '/'.join(__file__.split('/')[1:-2])
def isDisplaySurfaces(self, surfaceName):
return self._getVisibility(surfaceName)
def setDisplayObjects(self, surfaceName, show):
self._setVisibility(surfaceName, show)
def applyMorphing(self, nodeArray, lung=None):
self._setNodeParameter(nodeArray, lung=lung)
def _setNodeParameter(self, nodeArray, lung):
fieldmodule = self._leftRegion.getFieldmodule() if lung == 'left' else self._rightRegion.getFieldmodule() if 'right' == lung else Exception(
"Region invalid!")
if lung == 'left' and nodeArray.shape[0] != 99:
raise Exception("Lung and node array do not match!")
elif lung == 'right' and nodeArray.shape[0] != 126:
raise Exception("Lung and node array do not match!")
nodes = self._getLeftNodeField() if lung == 'left' else self._getRightNodeField()
cache = fieldmodule.createFieldcache()
coordinates = getOrCreateCoordinateField(fieldmodule)
nodeIndex = self._getLeftNodeIndex() if lung == 'left' else self._getRightNodeIndex()
fieldmodule.beginChange()
node_iter = nodes.createNodeiterator()
node = node_iter.next()
for n in range(nodeArray.shape[0]):
if "." not in nodeIndex[n]:
nodeID = int(nodeIndex[n])
nodeVersion = 1
else:
nodeID = int(nodeIndex[n].split('.')[0])
nodeVersion = int(nodeIndex[n].split('.')[1])
if node.getIdentifier() == nodeID:
pass
else:
node = node_iter.next()
cache.setNode(node)
resultList = list()
""" setting the node xyz coordinates """
rx = coordinates.setNodeParameters(cache, 1, Node.VALUE_LABEL_VALUE, nodeVersion, nodeArray[n, 0, 0])
ry = coordinates.setNodeParameters(cache, 2, Node.VALUE_LABEL_VALUE, nodeVersion, nodeArray[n, 1, 0])
rz = coordinates.setNodeParameters(cache, 3, Node.VALUE_LABEL_VALUE, nodeVersion, nodeArray[n, 2, 0])
""" setting the nodal x derivatives """
rxds1 = coordinates.setNodeParameters(cache, 1, Node.VALUE_LABEL_D_DS1, nodeVersion, nodeArray[n, 0, 1])
rxds2 = coordinates.setNodeParameters(cache, 1, Node.VALUE_LABEL_D_DS2, nodeVersion, nodeArray[n, 0, 2])
rxds12 = coordinates.setNodeParameters(cache, 1, Node.VALUE_LABEL_D2_DS1DS2, nodeVersion,
nodeArray[n, 0, 3])
""" setting the nodal y derivatives """
ryds1 = coordinates.setNodeParameters(cache, 2, Node.VALUE_LABEL_D_DS1, nodeVersion, nodeArray[n, 1, 1])
ryds2 = coordinates.setNodeParameters(cache, 2, Node.VALUE_LABEL_D_DS2, nodeVersion, nodeArray[n, 1, 2])
ryds12 = coordinates.setNodeParameters(cache, 2, Node.VALUE_LABEL_D2_DS1DS2, nodeVersion,
nodeArray[n, 1, 3])
""" setting the nodal z derivatives """
rzds1 = coordinates.setNodeParameters(cache, 3, Node.VALUE_LABEL_D_DS1, nodeVersion, nodeArray[n, 2, 1])
rzds2 = coordinates.setNodeParameters(cache, 3, Node.VALUE_LABEL_D_DS2, nodeVersion, nodeArray[n, 2, 2])
rzds12 = coordinates.setNodeParameters(cache, 3, Node.VALUE_LABEL_D2_DS1DS2, nodeVersion,
nodeArray[n, 2, 3])
resultList.append(rx);
resultList.append(ry);
resultList.append(rz);
resultList.append(rxds1);
resultList.append(rxds2);
resultList.append(rxds12);
resultList.append(ryds1);
resultList.append(ryds2);
resultList.append(ryds12);
| |
axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'LabelSize_%s' % axis
try :
if key in config : style.SetLabelSize ( float ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TitleColor_%s' % axis
try :
if key in config : style.SetTitleColor ( int ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TitleFont_%s' % axis
try :
if key in config : style.SetTitleFont ( int ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TitleOffset_%s' % axis
try :
if key in config : style.SetTitleOffset ( float ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TitleSize_%s' % axis
try :
if key in config : style.SetTitleSize ( float ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
## very special attribute
if 'PaperSize_X' in config and 'PaperSize_Y' in config :
key = 'PaperSize/1'
try :
style.SetPaperSize ( float ( config ['PaperSize_X'] ) ,
float ( config ['PaperSize_Y'] ) )
except :
logger.warning ( "Can't set attribute %s" % key )
elif 'PaperSize' in config :
key = 'PaperSize/2'
try :
style.SetPaperSize ( int ( config ['PaperSize'] ) )
except :
logger.warning ( "Can't set attribute %s" % key )
## one more very special attribute
for i in range ( 31 ) :
k = 'LineStyleString_%s' % i
if k in config :
style.SetLineStyleString ( i , config[k].strip() )
return style
# ============================================================================
ROOT.TStyle.dump = dump_style
ROOT.TStyle.get = dump_style
ROOT.TStyle.set = set_style
# =============================================================================
## Parse the configuration and create
# all the styles according to configuration
def make_styles ( config = None ) :
"""Parse the configuration and create
all the styles according to configuration
"""
if config is None :
import ostap.core.config as _CONFIG
config = _CONFIG.config
for key in config :
if not key.upper().startswith('STYLE') : continue
section = config [ key ]
s , c , n = key.partition (':')
if not c : continue
## the style name
name = n.strip ( )
description = section.get ( 'description' , fallback = 'The style %s' % name )
ok = section.getboolean ( 'ostaplike' , fallback = False )
## create ostap-like style
if ok : make_ostap_style ( name , description , section )
else :
## generic style
logger.info ( 'Create Generic style %s/%s' % ( name , description ) )
style = ROOT.TStyle ( name , description )
set_style ( style , section )
if name in StyleStore.styles() :
logger.warning ( "The configuration %s replaced" % name )
StyleStore.styles().update ( { name : style } )
# ==============================================================================
def get_float ( config , name , default ) :
try :
if hasattr ( config , 'getfloat' ) :
value = config.getfloat ( name , fallback = default )
else : value = config.get ( name , default )
return float ( value )
except :
return default
# =============================================================================
def get_int ( config , name , default ) :
try :
if hasattr ( config , 'getint') :
value = config.getint ( name , fallback = default )
else : value = config.get ( name , default )
return int ( value )
except :
return default
# =============================================================================
def get_str ( config , name , default ) :
try :
if hasattr ( config , 'getint') :
value = config.get ( name , fallback = default )
else : value = config.get ( name , default )
return str ( value )
except :
return default
# ============================================================================
## make Ostap-like style
def make_ostap_style ( name ,
description = 'The Style' ,
config = {} ,
colz = False ,
scale = 1.0 ,
font = ostap_font ,
line_width = ostap_line_width ) :
description = config.get ( 'description' , 'The Style' )
conf = {}
conf.update ( config )
conf [ 'FrameBorderMode' ] = get_int ( config , 'FrameBorderMode' , 0 )
conf [ 'CanvasBorderMode' ] = get_int ( config , 'CanvasBorderMode' , 0 )
conf [ 'PadBorderMode' ] = get_int ( config , 'PadBorderMode' , 0 )
conf [ 'PadColor' ] = get_int ( config , 'PadColor' , 0 )
conf [ 'CanvasColor' ] = get_int ( config , 'CanvasColor' , 0 )
conf [ 'StatColor' ] = get_int ( config , 'StatColor' , 0 )
if 'PaperSize_X' in config or 'PaperSize_Y' in config :
conf ['PaperSize_X' ] = get_float ( config , 'PaperSize_X' , 20 )
conf ['PaperSize_Y' ] = get_float ( config , 'PaperSize_Y' , 26 )
else :
a = str ( config.get ( 'PaperSize' ) ).upper()
if 'A4' in a : conf [ 'PaperSize' ] = ROOT.TStyle.kA4
elif 'US' in a : conf [ 'PaperSize' ] = ROOT.TStyle.kUSletter
elif 'LETTER' in a : conf [ 'PaperSize' ] = ROOT.TStyle.kUSletter
else : conf ['PaperSize' ] = get_int ( config , 'PaperSize' , ROOT.TStyle.kA4 )
conf [ 'PadTopMargin' ] = get_float ( config , 'PadTopMargin' , 0.05 )
conf [ 'PadRightMargin' ] = get_float ( config , 'PadRightMargin' , 0.14 if colz else 0.05 )
conf [ 'PadLeftMargin' ] = get_float ( config , 'PadLeftMargin' , 0.10 )
conf [ 'PadBottomMargin' ] = get_float ( config , 'PadBottomMargin' , 0.10 )
conf [ 'TextFont' ] = get_int ( config , 'TextFont' , font )
conf [ 'TextSize' ] = get_float ( config , 'FontSize' , 0.08 * scale )
conf [ 'LabelFont_X' ] = get_int ( config , 'LabelFont_X' , font )
conf [ 'LabelFont_Y' ] = get_int ( config , 'LabelFont_Y' , font )
conf [ 'LabelFont_Z' ] = get_int ( config , 'LabelFont_Z' , font )
conf [ 'LabelSize_X' ] = get_float ( config , 'LabelSize_X' , 0.05 * scale )
conf [ 'LabelSize_Y' ] = get_float ( config , 'LabelSize_Y' , 0.05 * scale )
conf [ 'LabelSize_Z' ] = get_float ( config , 'LabelSize_Z' , 0.05 * scale )
conf [ 'TitleFont_X' ] = get_int ( config , 'TitleFont_X' , font )
conf [ 'TitleFont_Y' ] = get_int ( config , 'TitleFont_Y' , font )
conf [ 'TitleFont_Z' ] = get_int ( config , 'TitleFont_Z' , font )
conf [ 'TitleSize_X' ] = get_float ( config , 'TitleSize_X' , -1 )
conf [ 'TitleSize_Y' ] = get_float ( config , 'TitleSize_Y' , 0.05 * scale )
conf [ 'TitleSize_Z' ] = get_float ( config , 'TitleSize_Z' , 0.05 * scale )
conf [ 'LineWidth' ] = get_int ( config , 'LineWidth' , line_width )
conf [ 'FrameWidth' ] = get_int ( config , 'FrameWidth' , line_width )
conf [ 'HistLineWidth' ] = get_int ( config , 'HistLineWidth' , line_width )
conf [ 'FuncWidth' ] = get_int ( config , 'FuncWidth' , line_width )
conf [ 'GridWidth' ] = get_int ( config , 'FuncWidth' , line_width )
conf [ 'MarkerStyle' ] = get_int ( config , 'MarkerStyle' , 20 )
conf [ 'MarkerSize' ] = get_float ( config , 'MarkerSize' , 1.2 )
conf [ 'LabelOffset' ] = | |
<reponame>lerrytang/GymOthelloEnv
"""Othello environments for reinforcement learning."""
import gym
from gym import spaces
from gym.envs.classic_control import rendering
import pyglet
from pyglet import gl
import numpy as np
BLACK_DISK = -1
NO_DISK = 0
WHITE_DISK = 1
IMAGE_SIZE = 96
MAX_INT = (1 << 31)
WINDOW_H = 480
WINDOW_W = 480
BOARDFIELD = 480
class OthelloEnv(gym.Env):
"""Wrapper of OthelloBaseEnv."""
metadata = {'render.modes': ['np_array', 'human']}
def __init__(self,
white_policy=None,
black_policy=None,
protagonist=WHITE_DISK,
board_size=8,
initial_rand_steps=0,
seed=0,
sudden_death_on_invalid_move=True,
render_in_step=False,
num_disk_as_reward=False,
possible_actions_in_obs=False):
# Create the inner environment.
self.board_size = board_size
self.num_disk_as_reward = num_disk_as_reward
self.env = OthelloBaseEnv(
board_size=board_size,
num_disk_as_reward=self.num_disk_as_reward,
sudden_death_on_invalid_move=sudden_death_on_invalid_move,
possible_actions_in_obs=possible_actions_in_obs,
)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.render_in_step = render_in_step
self.initial_rand_steps = initial_rand_steps
self.rand_seed = seed
self.rnd = np.random.RandomState(seed=self.rand_seed)
self.max_rand_steps = 0
self.rand_step_cnt = 0
# Initialize policies.
self.protagonist = protagonist
if self.protagonist == BLACK_DISK:
self.opponent = white_policy
else:
self.opponent = black_policy
def seed(self, seed=None):
if seed is not None:
self.rand_seed = seed
self.rnd = np.random.RandomState(seed=self.rand_seed)
if self.opponent is not None and hasattr(self.opponent, 'seed'):
self.opponent.seed(self.rand_seed)
def reset(self):
obs = self.env.reset()
self.max_rand_steps = self.rnd.randint(
low=0, high=self.initial_rand_steps // 2 + 1) * 2
self.rand_step_cnt = 0
print('The initial {} steps will be random'.format(self.max_rand_steps))
# This provides the opponent a chance to get env.possible_moves.
if hasattr(self.opponent, 'reset'):
try:
self.opponent.reset(self)
except TypeError:
pass
if self.env.player_turn == self.protagonist:
return obs
else:
action = self.opponent.get_action(obs)
obs, _, done, _ = self.env.step(action)
if done:
print('done==True in reset(), do it again.')
return self.reset()
else:
return obs
def step(self, action):
assert self.env.player_turn == self.protagonist
if self.rand_step_cnt < self.max_rand_steps:
ix = self.rnd.randint(0, len(self.possible_moves))
action = self.possible_moves[ix]
self.rand_step_cnt += 1
obs, reward, done, _ = self.env.step(action) # My move.
if self.render_in_step:
self.render()
if done:
return obs, reward, done, None
while not done and self.env.player_turn != self.protagonist:
if self.rand_step_cnt < self.max_rand_steps:
ix = self.rnd.randint(0, len(self.possible_moves))
opponent_move = self.possible_moves[ix]
self.rand_step_cnt += 1
else:
opponent_move = self.opponent.get_action(obs)
obs, reward, done, _ = self.env.step(opponent_move)
if self.render_in_step:
self.render()
return obs, -reward, done, None
def render(self, mode='human', close=False):
self.env.render(mode=mode, close=close)
def close(self):
self.env.close()
@property
def player_turn(self):
return self.env.player_turn
@property
def possible_moves(self):
return self.env.possible_moves
class OthelloBaseEnv(gym.Env):
"""Othello base environment."""
metadata = {'render.modes': ['np_array', 'human']}
def __init__(self,
board_size=8,
sudden_death_on_invalid_move=True,
num_disk_as_reward=False,
possible_actions_in_obs=False,
mute=False):
# Initialize members from configs.
self.board_size = max(4, board_size)
self.sudden_death_on_invalid_move = sudden_death_on_invalid_move
self.board_state = self._reset_board()
self.viewer = None
self.num_disk_as_reward = num_disk_as_reward
self.mute = mute # Log msgs can be misleading when planning with model.
self.possible_actions_in_obs = possible_actions_in_obs
# Initialize internal states.
self.player_turn = BLACK_DISK
self.winner = NO_DISK
self.terminated = False
self.possible_moves = []
# Initialize action space: one action for each board position.
self.action_space = spaces.Discrete(self.board_size ** 2)
# Initialize observation space.
if self.possible_actions_in_obs:
self.observation_space = spaces.Box(
np.zeros([2, ] + [self.board_size] * 2),
np.ones([2, ] + [self.board_size] * 2))
else:
self.observation_space = spaces.Box(
np.zeros([self.board_size] * 2), np.ones([self.board_size] * 2))
def _reset_board(self):
board_state = np.zeros([self.board_size] * 2, dtype=int)
center_row_ix = center_col_ix = self.board_size // 2
board_state[center_row_ix - 1][center_col_ix - 1] = WHITE_DISK
board_state[center_row_ix][center_col_ix] = WHITE_DISK
board_state[center_row_ix][center_col_ix - 1] = BLACK_DISK
board_state[center_row_ix - 1][center_col_ix] = BLACK_DISK
return board_state
def reset(self):
self.board_state = self._reset_board()
self.player_turn = BLACK_DISK
self.winner = NO_DISK
self.terminated = False
self.possible_moves = self.get_possible_actions()
return self.get_observation()
def get_num_killed_enemy(self, board, x, y, delta_x, delta_y):
# We overload WHITE_DISK to be our disk, and BLACK_DISK to be enemies.
# (x, y) is a valid position if the following pattern exists:
# "(x, y), BLACK_DISK, ..., BLACK_DISK, WHITE_DISK"
next_x = x + delta_x
next_y = y + delta_y
# The neighbor must be an enemy.
if (
next_x < 0 or
next_x >= self.board_size or
next_y < 0 or
next_y >= self.board_size or
board[next_x][next_y] != BLACK_DISK
):
return 0
# Keep scanning in the direction.
cnt = 0
while (
0 <= next_x < self.board_size and
0 <= next_y < self.board_size and
board[next_x][next_y] == BLACK_DISK
):
next_x += delta_x
next_y += delta_y
cnt += 1
if (
next_x < 0 or
next_x >= self.board_size or
next_y < 0 or
next_y >= self.board_size or
board[next_x][next_y] != WHITE_DISK
):
return 0
else:
return cnt
def get_possible_actions(self, board=None):
actions = []
if board is None:
if self.player_turn == WHITE_DISK:
board = self.board_state
else:
board = -self.board_state
for row_ix in range(self.board_size):
for col_ix in range(self.board_size):
if board[row_ix][col_ix] == NO_DISK:
if (
self.get_num_killed_enemy(
board, row_ix, col_ix, 1, 1) or
self.get_num_killed_enemy(
board, row_ix, col_ix, 1, 0) or
self.get_num_killed_enemy(
board, row_ix, col_ix, 1, -1) or
self.get_num_killed_enemy(
board, row_ix, col_ix, 0, 1) or
self.get_num_killed_enemy(
board, row_ix, col_ix, 0, -1) or
self.get_num_killed_enemy(
board, row_ix, col_ix, -1, 1) or
self.get_num_killed_enemy(
board, row_ix, col_ix, -1, 0) or
self.get_num_killed_enemy(
board, row_ix, col_ix, -1, -1)
):
actions.append(row_ix * self.board_size + col_ix)
return actions
def print_board(self, print_valid_moves=True):
valid_actions = self.get_possible_actions()
if print_valid_moves:
board = self.board_state.copy().ravel()
for p in valid_actions:
board[p] = 2
board = board.reshape(*self.board_state.shape)
else:
board = self.board_state
print('Turn: {}'.format(
'WHITE' if self.player_turn == WHITE_DISK else 'BLACK'))
print('Valid actions: {}'.format(valid_actions))
for row in board:
print(' '.join(map(lambda x: ['B', 'O', 'W', 'V'][x + 1], row)))
print('-' * 10)
def get_observation(self):
if self.player_turn == WHITE_DISK:
# White turn, we don't negate state since white=1.
state = self.board_state
else:
# Black turn, we negate board state such that black=1.
state = -self.board_state
if self.possible_actions_in_obs:
grid_of_possible_moves = np.zeros(self.board_size ** 2, dtype=bool)
grid_of_possible_moves[self.possible_moves] = True
return np.concatenate([np.expand_dims(state, axis=0),
grid_of_possible_moves.reshape(
[1, self.board_size, self.board_size])],
axis=0)
else:
return state
def set_board_state(self, board_state, perspective=WHITE_DISK):
"""Force setting the board state, necessary in model-based RL."""
if np.ndim(board_state) > 2:
state = board_state[0]
else:
state = board_state
if perspective == WHITE_DISK:
self.board_state = np.array(state)
else:
self.board_state = -np.array(state)
def update_board(self, action):
x = action // self.board_size
y = action % self.board_size
if self.player_turn == BLACK_DISK:
self.board_state = -self.board_state
for delta_x in [-1, 0, 1]:
for delta_y in [-1, 0, 1]:
if not (delta_x == 0 and delta_y == 0):
kill_cnt = self.get_num_killed_enemy(
self.board_state, x, y, delta_x, delta_y)
for i in range(kill_cnt):
dx = (i + 1) * delta_x
dy = (i + 1) * delta_y
self.board_state[x + dx][y + dy] = WHITE_DISK
self.board_state[x][y] = WHITE_DISK
if self.player_turn == BLACK_DISK:
self.board_state = -self.board_state
def step(self, action):
# Apply action.
if self.terminated:
raise ValueError('Game has terminated!')
if action not in self.possible_moves:
invalid_action = True
else:
invalid_action = False
if not invalid_action:
self.update_board(action)
# Determine if game should terminate.
num_vacant_positions = (self.board_state == NO_DISK).sum()
no_more_vacant_places = num_vacant_positions == 0
sudden_death = invalid_action and self.sudden_death_on_invalid_move
done = sudden_death or no_more_vacant_places
current_player = self.player_turn
if done:
# If game has terminated, determine winner.
self.winner = self.determine_winner(sudden_death=sudden_death)
else:
# If game continues, determine who moves next.
self.set_player_turn(-self.player_turn)
if len(self.possible_moves) == 0:
self.set_player_turn(-self.player_turn)
if len(self.possible_moves) == 0:
if not self.mute:
print('No possible moves for either party.')
self.winner = self.determine_winner()
reward = 0
if self.terminated:
if self.num_disk_as_reward:
if sudden_death:
# Strongly discourage invalid actions.
reward = -(self.board_size ** 2)
else:
white_cnt, black_cnt = self.count_disks()
if current_player == WHITE_DISK:
reward = white_cnt - black_cnt
if black_cnt == 0:
reward = self.board_size ** 2
else:
reward = black_cnt - white_cnt
if white_cnt == 0:
reward = self.board_size ** 2
else:
reward = self.winner * current_player
return self.get_observation(), reward, self.terminated, None
def set_player_turn(self, turn):
self.player_turn = turn
self.possible_moves = self.get_possible_actions()
def count_disks(self):
white_cnt = (self.board_state == WHITE_DISK).sum()
black_cnt = (self.board_state == BLACK_DISK).sum()
return white_cnt, black_cnt
def determine_winner(self, sudden_death=False):
self.terminated = True
if sudden_death:
if not self.mute:
print('sudden death due to rule violation')
if self.player_turn == WHITE_DISK:
if not self.mute:
print('BLACK wins')
return BLACK_DISK
else:
if not self.mute:
print('WHITE wins')
return WHITE_DISK
else:
white_cnt, black_cnt = self.count_disks()
if not self.mute:
print('white: {}, black: {}'.format(white_cnt, black_cnt))
if white_cnt > black_cnt:
if not self.mute:
print('WHITE wins')
return WHITE_DISK
elif black_cnt > white_cnt:
if not self.mute:
print('BLACK wins')
return BLACK_DISK
else:
if not self.mute:
print('DRAW')
return NO_DISK
def render(self, mode='human', close=False):
if close:
return
if mode == 'np_array':
self.print_board()
else:
self.show_gui_board()
def show_gui_board(self):
if self.viewer is None:
self.viewer = rendering.Viewer(WINDOW_W, WINDOW_H)
win = self.viewer.window
win.switch_to()
win.dispatch_events()
self.draw_board()
win.flip()
return self.viewer.isopen
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def draw_board(self):
# Draw the green background.
gl.glBegin(gl.GL_QUADS)
gl.glColor4f(0.4, 0.8, 0.4, 1.0)
gl.glVertex3f(-BOARDFIELD, +BOARDFIELD, 0)
| |
<reponame>kvmanohar22/gtsam<gh_stars>1000+
"""Define the :class:`~geographiclib.geodesic.Geodesic` class
The ellipsoid parameters are defined by the constructor. The direct and
inverse geodesic problems are solved by
* :meth:`~geographiclib.geodesic.Geodesic.Inverse` Solve the inverse
geodesic problem
* :meth:`~geographiclib.geodesic.Geodesic.Direct` Solve the direct
geodesic problem
* :meth:`~geographiclib.geodesic.Geodesic.ArcDirect` Solve the direct
geodesic problem in terms of spherical arc length
:class:`~geographiclib.geodesicline.GeodesicLine` objects can be created
with
* :meth:`~geographiclib.geodesic.Geodesic.Line`
* :meth:`~geographiclib.geodesic.Geodesic.DirectLine`
* :meth:`~geographiclib.geodesic.Geodesic.ArcDirectLine`
* :meth:`~geographiclib.geodesic.Geodesic.InverseLine`
:class:`~geographiclib.polygonarea.PolygonArea` objects can be created
with
* :meth:`~geographiclib.geodesic.Geodesic.Polygon`
The public attributes for this class are
* :attr:`~geographiclib.geodesic.Geodesic.a`
:attr:`~geographiclib.geodesic.Geodesic.f`
*outmask* and *caps* bit masks are
* :const:`~geographiclib.geodesic.Geodesic.EMPTY`
* :const:`~geographiclib.geodesic.Geodesic.LATITUDE`
* :const:`~geographiclib.geodesic.Geodesic.LONGITUDE`
* :const:`~geographiclib.geodesic.Geodesic.AZIMUTH`
* :const:`~geographiclib.geodesic.Geodesic.DISTANCE`
* :const:`~geographiclib.geodesic.Geodesic.STANDARD`
* :const:`~geographiclib.geodesic.Geodesic.DISTANCE_IN`
* :const:`~geographiclib.geodesic.Geodesic.REDUCEDLENGTH`
* :const:`~geographiclib.geodesic.Geodesic.GEODESICSCALE`
* :const:`~geographiclib.geodesic.Geodesic.AREA`
* :const:`~geographiclib.geodesic.Geodesic.ALL`
* :const:`~geographiclib.geodesic.Geodesic.LONG_UNROLL`
:Example:
>>> from geographiclib.geodesic import Geodesic
>>> # The geodesic inverse problem
... Geodesic.WGS84.Inverse(-41.32, 174.81, 40.96, -5.50)
{'lat1': -41.32,
'a12': 179.6197069334283,
's12': 19959679.26735382,
'lat2': 40.96,
'azi2': 18.825195123248392,
'azi1': 161.06766998615882,
'lon1': 174.81,
'lon2': -5.5}
"""
# geodesic.py
#
# This is a rather literal translation of the GeographicLib::Geodesic class to
# python. See the documentation for the C++ class for more information at
#
# https://geographiclib.sourceforge.io/html/annotated.html
#
# The algorithms are derived in
#
# <NAME>,
# Algorithms for geodesics, J. Geodesy 87, 43-55 (2013),
# https://doi.org/10.1007/s00190-012-0578-z
# Addenda: https://geographiclib.sourceforge.io/geod-addenda.html
#
# Copyright (c) <NAME> (2011-2017) <<EMAIL>> and licensed
# under the MIT/X11 License. For more information, see
# https://geographiclib.sourceforge.io/
######################################################################
import math
from geographiclib.geomath import Math
from geographiclib.constants import Constants
from geographiclib.geodesiccapability import GeodesicCapability
class Geodesic(object):
"""Solve geodesic problems"""
GEOGRAPHICLIB_GEODESIC_ORDER = 6
nA1_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC1_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC1p_ = GEOGRAPHICLIB_GEODESIC_ORDER
nA2_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC2_ = GEOGRAPHICLIB_GEODESIC_ORDER
nA3_ = GEOGRAPHICLIB_GEODESIC_ORDER
nA3x_ = nA3_
nC3_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC3x_ = (nC3_ * (nC3_ - 1)) // 2
nC4_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC4x_ = (nC4_ * (nC4_ + 1)) // 2
maxit1_ = 20
maxit2_ = maxit1_ + Math.digits + 10
tiny_ = math.sqrt(Math.minval)
tol0_ = Math.epsilon
tol1_ = 200 * tol0_
tol2_ = math.sqrt(tol0_)
tolb_ = tol0_ * tol2_
xthresh_ = 1000 * tol2_
CAP_NONE = GeodesicCapability.CAP_NONE
CAP_C1 = GeodesicCapability.CAP_C1
CAP_C1p = GeodesicCapability.CAP_C1p
CAP_C2 = GeodesicCapability.CAP_C2
CAP_C3 = GeodesicCapability.CAP_C3
CAP_C4 = GeodesicCapability.CAP_C4
CAP_ALL = GeodesicCapability.CAP_ALL
CAP_MASK = GeodesicCapability.CAP_MASK
OUT_ALL = GeodesicCapability.OUT_ALL
OUT_MASK = GeodesicCapability.OUT_MASK
def _SinCosSeries(sinp, sinx, cosx, c):
"""Private: Evaluate a trig series using Clenshaw summation."""
# Evaluate
# y = sinp ? sum(c[i] * sin( 2*i * x), i, 1, n) :
# sum(c[i] * cos((2*i+1) * x), i, 0, n-1)
# using Clenshaw summation. N.B. c[0] is unused for sin series
# Approx operation count = (n + 5) mult and (2 * n + 2) add
k = len(c) # Point to one beyond last element
n = k - sinp
ar = 2 * (cosx - sinx) * (cosx + sinx) # 2 * cos(2 * x)
y1 = 0 # accumulators for sum
if n & 1:
k -= 1; y0 = c[k]
else:
y0 = 0
# Now n is even
n = n // 2
while n: # while n--:
n -= 1
# Unroll loop x 2, so accumulators return to their original role
k -= 1; y1 = ar * y0 - y1 + c[k]
k -= 1; y0 = ar * y1 - y0 + c[k]
return ( 2 * sinx * cosx * y0 if sinp # sin(2 * x) * y0
else cosx * (y0 - y1) ) # cos(x) * (y0 - y1)
_SinCosSeries = staticmethod(_SinCosSeries)
def _Astroid(x, y):
"""Private: solve astroid equation."""
# Solve k^4+2*k^3-(x^2+y^2-1)*k^2-2*y^2*k-y^2 = 0 for positive root k.
# This solution is adapted from Geocentric::Reverse.
p = Math.sq(x)
q = Math.sq(y)
r = (p + q - 1) / 6
if not(q == 0 and r <= 0):
# Avoid possible division by zero when r = 0 by multiplying equations
# for s and t by r^3 and r, resp.
S = p * q / 4 # S = r^3 * s
r2 = Math.sq(r)
r3 = r * r2
# The discriminant of the quadratic equation for T3. This is zero on
# the evolute curve p^(1/3)+q^(1/3) = 1
disc = S * (S + 2 * r3)
u = r
if disc >= 0:
T3 = S + r3
# Pick the sign on the sqrt to maximize abs(T3). This minimizes loss
# of precision due to cancellation. The result is unchanged because
# of the way the T is used in definition of u.
T3 += -math.sqrt(disc) if T3 < 0 else math.sqrt(disc) # T3 = (r * t)^3
# N.B. cbrt always returns the real root. cbrt(-8) = -2.
T = Math.cbrt(T3) # T = r * t
# T can be zero; but then r2 / T -> 0.
u += T + (r2 / T if T != 0 else 0)
else:
# T is complex, but the way u is defined the result is real.
ang = math.atan2(math.sqrt(-disc), -(S + r3))
# There are three possible cube roots. We choose the root which
# avoids cancellation. Note that disc < 0 implies that r < 0.
u += 2 * r * math.cos(ang / 3)
v = math.sqrt(Math.sq(u) + q) # guaranteed positive
# Avoid loss of accuracy when u < 0.
uv = q / (v - u) if u < 0 else u + v # u+v, guaranteed positive
w = (uv - q) / (2 * v) # positive?
# Rearrange expression for k to avoid loss of accuracy due to
# subtraction. Division by 0 not possible because uv > 0, w >= 0.
k = uv / (math.sqrt(uv + Math.sq(w)) + w) # guaranteed positive
else: # q == 0 && r <= 0
# y = 0 with |x| <= 1. Handle this case directly.
# for y small, positive root is k = abs(y)/sqrt(1-x^2)
k = 0
return k
_Astroid = staticmethod(_Astroid)
def _A1m1f(eps):
"""Private: return A1-1."""
coeff = [
1, 4, 64, 0, 256,
]
m = Geodesic.nA1_//2
t = Math.polyval(m, coeff, 0, Math.sq(eps)) / coeff[m + 1]
return (t + eps) / (1 - eps)
_A1m1f = staticmethod(_A1m1f)
def _C1f(eps, c):
"""Private: return C1."""
coeff = [
-1, 6, -16, 32,
-9, 64, -128, 2048,
9, -16, 768,
3, -5, 512,
-7, 1280,
-7, 2048,
]
eps2 = Math.sq(eps)
d = eps
o = 0
for l in range(1, Geodesic.nC1_ + 1): # l is index of C1p[l]
m = (Geodesic.nC1_ - l) // 2 # order of polynomial in eps^2
c[l] = d * Math.polyval(m, coeff, o, eps2) / coeff[o + m + 1]
o += m + 2
d *= eps
_C1f = staticmethod(_C1f)
def _C1pf(eps, c):
"""Private: return C1'"""
coeff = [
205, -432, 768, 1536,
4005, -4736, 3840, 12288,
-225, 116, 384,
-7173, 2695, 7680,
3467, 7680,
38081, 61440,
]
eps2 = Math.sq(eps)
d = eps
o = 0
for l in range(1, Geodesic.nC1p_ + 1): # l is index of C1p[l]
m = (Geodesic.nC1p_ - l) // 2 # order of polynomial in eps^2
c[l] = d * Math.polyval(m, coeff, o, eps2) / coeff[o + m + 1]
o += m + 2
d *= eps
_C1pf = staticmethod(_C1pf)
def _A2m1f(eps):
"""Private: return A2-1"""
coeff = [
-11, -28, -192, 0, 256,
]
m = Geodesic.nA2_//2
t = Math.polyval(m, coeff, 0, Math.sq(eps)) / coeff[m + 1]
return (t - eps) / (1 + eps)
_A2m1f = staticmethod(_A2m1f)
def _C2f(eps, c):
"""Private: return C2"""
coeff = [
1, 2, 16, 32,
35, 64, 384, 2048,
15, 80, 768,
7, 35, 512,
63, 1280,
77, 2048,
]
eps2 = Math.sq(eps)
d = eps
o = 0
for l in range(1, Geodesic.nC2_ + 1): # l is index of C2[l]
m = (Geodesic.nC2_ - l) // 2 # order of polynomial in eps^2
c[l] = d * Math.polyval(m, coeff, o, eps2) / coeff[o + m + 1]
o += m + 2
d *= eps
_C2f = staticmethod(_C2f)
def __init__(self, a, f):
"""Construct a Geodesic object
:param a: | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import logging
import git
import tempfile
import requests
import contextlib
import re
from .actions.check_for_spdx_license import check_for_spdx_license
from .actions.check_for_download_hash import check_for_download_hash
from .actions.check_for_readme import check_for_readme
from .actions.check_for_license import check_for_license
from .actions.check_for_deprecated_generators import check_for_deprecated_generators
from .actions.check_for_deprecated_methods import check_for_deprecated_methods
from .actions.check_for_required_attributes import check_for_required_attributes
from .actions.update_a_python_version import update_a_python_version
from .actions.update_a_path_manipulation import update_a_path_manipulation
from .actions.update_a_python_environment_variable import update_a_python_environment_variable
from .actions.update_a_jobs import update_a_jobs
from .actions.update_c_deprecated_attributes import update_c_deprecated_attributes
from .actions.update_c_openssl_version_patch import update_c_openssl_version_patch
from .actions.update_c_generic_exception_to_invalid_conf import update_c_generic_exception_to_invalid_conf
from .actions.update_c_default_options_to_dict import update_c_default_options_to_dict
from .actions.update_c_tools_version import update_c_tools_version
from .actions.update_t_ci_dir_path import update_t_ci_dir_path
from .actions.update_t_macos_images import update_t_macos_images
from .actions.update_t_new_docker_image_names import update_t_new_docker_image_names
from .actions.update_t_jobs import update_t_jobs
from .actions.update_t_linux_image import update_t_linux_image
from .actions.update_t_linux_python_version import update_t_linux_python_version
from .actions.update_other_travis_to_ci_dir_name import update_other_travis_to_ci_dir_name
from .actions.update_other_pyenv_python_version import update_other_pyenv_python_version
from .actions.update_readme_travis_url import update_readme_travis_url
__version__ = '0.7.7'
__author__ = 'Bincrafters <<EMAIL>>'
__license__ = 'MIT'
LOGGING_FORMAT = '[%(levelname)s]\t%(asctime)s %(message)s'
LOGGING_LEVEL = os.getenv("BINCRAFTERS_LOGGING_LEVEL", logging.INFO)
logging.basicConfig(level=int(LOGGING_LEVEL), format=LOGGING_FORMAT, datefmt='%Y-%m-%d %H:%M:%S')
# Python version for updating files
python_version_current_pyenv = "3.7.1"
python_version_current_appveyor = "37"
python_version_current_travis_linux = "3.7"
# for AppVeyor dot zero releases need to be added without dot zero, for pyenv a second time with a dot zero
python_check_for_old_versions = ["2.7.8", "2.7.10", "2.7.14", "2.7", "3.6", "3.7.0"]
# Sometimes Travis is publishing new CI images with new XCode versions
# but they still have the same Clang version
# in this case we do NOT need to add new compiler versions and therefore jobs
# but we need to update the existing jobs
# travis_macos_images_updates = [["10.1", "10.2"]] 10.2 isn't ready yet due to zlib problems
travis_macos_images_updates = [["9.3", "9.4"]]
# What apple_clang version is available on which Travis image? What MSVC versions are available on which AppVeyor image?
travis_macos_images_compiler_mapping = {'7.3': '7.3', '8.1': '8.3', '9.0': '9', '9.1': '9.4', '10.0': '10.1'}
appveyor_win_msvc_images_compiler_mapping = {'12': '2015', '14': '2015', '15': '2017', '16': '2019'}
# This compiler versions are getting added if they are newer than the existing jobs
# and if they don't already exist
compiler_versions = {'gcc': ('6', '7', '8', '9'),
'clang': ('5.0', '6.0', '7.0', '8'),
'apple_clang': ('9.1', '10.0'),
'visual': ('15', '16')}
# This compiler versions are getting actively removed from existing jobs
compiler_versions_deletion = {'gcc': (), 'clang': (), 'apple_clang': (), 'visual': ()}
# What are the latest AVAILABLE patches for OpenSSL, which versions are End-Of-Life?
openssl_version_matrix = {'1.0.1': {'latest_patch': 'h', 'eol': True},
'1.0.2': {'latest_patch': 's', 'eol': False},
'1.1.0': {'latest_patch': 'k', 'eol': False},
'1.1.1': {'latest_patch': 'c', 'eol': False},
}
@contextlib.contextmanager
def chdir(newdir):
""" Change directory using locked scope
:param newdir: Temporary folder to move
"""
old_path = os.getcwd()
os.chdir(newdir)
try:
yield
finally:
os.chdir(old_path)
class Command(object):
""" Execute Travis file update
"""
def __init__(self):
""" Fill regex compiler
"""
self._logger = logging.getLogger(__file__)
self._logger.setLevel(logging.INFO)
def _parse_arguments(self, *args):
""" Add program arguments
:param args: User arguments
"""
parser = argparse.ArgumentParser(description="Bincrafters Conventions")
group = parser.add_mutually_exclusive_group()
group.add_argument('--remote', type=str, help='Github repo to be updated e.g. bincrafters/conan-foobar')
group.add_argument('--local', action='store_true', help='Update current local repository')
group.add_argument('-t', '--travisfile', type=str, nargs='?', const='.travis.yml',
help='Travis file to be updated e.g. .travis.yml')
group.add_argument('-a', '--appveyorfile', type=str, nargs='?', const='appveyor.yml',
help='Appveyor file to be updated e.g. appveyor.yml')
group.add_argument('--conanfile', '-c', type=str, nargs='?', const='conanfile.py',
help='Conan recipe path e.g conanfile.py')
group.add_argument('--check', action='store_true', help='Checks for additional conventions')
parser.add_argument('--dry-run', '-d', action='store_true', default=False,
help='Do not push after update from remote')
parser.add_argument('--project-pattern', '-pp', type=str,
help='Project pattern to filter over user projects e.g bincrafters/conan-*')
parser.add_argument('--branch-pattern', '-bp', type=str,
help='Branch pattern to filter over user projects e.g stable/*')
parser.add_argument('--readme', '-r', type=str, nargs='?', const='README.md',
help='README file path to be updated')
group.add_argument('--version', '-v', action='version', version='%(prog)s {}'.format(__version__))
args = parser.parse_args(*args)
return args
def run(self, *args):
""" Process file update
:param args: User arguments
"""
arguments = self._parse_arguments(*args)
if not len(sys.argv) > 1 or arguments.local:
if os.path.isfile(".travis.yml"):
self._update_compiler_jobs(".travis.yml")
if os.path.isfile("appveyor.yml"):
self._update_appveyor_file("appveyor.yml")
self._update_conanfile("conanfile.py")
if os.path.isfile("README.md"):
self._update_readme("README.md")
self._run_conventions_checks()
else:
if arguments.remote:
self._update_remote(arguments.remote, arguments.conanfile, arguments.dry_run, arguments.project_pattern,
arguments.branch_pattern)
else:
if arguments.check:
self._run_conventions_checks()
else:
if arguments.conanfile:
self._update_conanfile(arguments.conanfile)
if arguments.readme:
self._update_readme(arguments.readme)
if arguments.travisfile:
self._update_compiler_jobs(arguments.travisfile)
if arguments.appveyorfile:
self._update_appveyor_file(arguments.appveyorfile)
def _update_compiler_jobs(self, file):
""" Read Travis file and compiler jobs
:param file: Travis file path
"""
# Rename .travis -> .ci
update_other_travis_to_ci_dir_name(self)
update_t_ci_dir_path(self, file)
# Update which Python version macOS is using via pyenv
update_other_pyenv_python_version(self, '.ci/install.sh', python_version_current_pyenv, python_check_for_old_versions)
# Update Travis Linux Python version
update_t_linux_python_version(self, file, python_version_current_travis_linux, python_check_for_old_versions)
# Update which macOS image existing jobs are using
update_t_macos_images(self, file, travis_macos_images_updates)
# Update docker image names lasote -> conanio
update_t_new_docker_image_names(self, file)
# Update Travis Linux CI base image
update_t_linux_image(self, file)
if not self._is_header_only("conanfile.py"):
# Add new compiler versions to CI jobs
update_t_jobs(self, file, compiler_versions, travis_macos_images_compiler_mapping, compiler_versions_deletion)
def _update_appveyor_file(self, file):
update_a_python_environment_variable(self, file)
update_a_python_version(self, file, python_version_current_appveyor, python_check_for_old_versions)
update_a_path_manipulation(self, file)
if not self._is_header_only("conanfile.py"):
# Add new compiler versions to CI jobs
update_a_jobs(self, file, compiler_versions, appveyor_win_msvc_images_compiler_mapping, compiler_versions_deletion)
def replace_in_file(self, file, old, new):
""" Read file and replace ALL occurrences of old by new
:param file: target file
:param old: pattern to match
:param new: new string to be used
:return: True if was replaced. Otherwise, False.
"""
result = False
if os.path.isfile(file):
with open(file) as ifd:
content = ifd.read()
result = old in content
if result:
with open(file, 'w', newline="\n") as ofd:
ofd.write(content.replace(old, new))
else:
self._logger.warning("Could not update {}: File does not exist".format(file))
return result
def file_contains(self, file, word):
""" Read file and search for word
:param file: File path to be read
:param word: word to be found
:return: True if found. Otherwise, False
"""
if os.path.isfile(file):
with open(file) as ifd:
content = ifd.read()
if word in content:
return True
return False
def _is_header_only(self, conanfile):
""" Check if Conan recipe is header-only
:param conanfile: Conan recipe path
:return: True if recipe is header-only. Otherwise, False.
"""
if self.file_contains(conanfile, "self.info.header_only()"):
return True
return False
def _get_branch_names(self, git_repo):
""" Retrieve branch names from current git repo
:param git_repo: Git repository
"""
branches = []
for branch in git_repo.references:
if "HEAD" in str(branch):
continue
branches.append(str(branch).replace("origin/", ""))
# filter non-tags
filtered_branches = [branch for branch in branches if branch not in git_repo.tags]
# remove duplicates
filtered_branches = list(set(filtered_branches))
return filtered_branches
def _update_branch(self, git_repo, branch, file, conanfile, skip_push):
""" Update local branch and push to origin
:param git_repo: Git repository
:param branch: Branch name to be updated
:param file: File name to be updated
:param conanfile: Conan recipe path
:param skip_push: Do not push
"""
git_repo.git.checkout(branch)
self._logger.info("On branch {}".format(git_repo.active_branch))
try:
header_only = self._is_header_only(conanfile)
travis_updater = self._update_compiler_jobs
if header_only:
travis_updater = update_t_ci_dir_path(self, conanfile)
self._logger.info("Conan recipe for header-only project")
else:
self._logger.info("Conan recipe is not for header-only project")
result = (update_other_travis_to_ci_dir_name(self),
update_other_pyenv_python_version(self, '.ci/install.sh', python_version_current_pyenv, python_check_for_old_versions),
self._update_conanfile(conanfile),
travis_updater(file),
self._update_appveyor_file('appveyor.yml'))
self._logger.info("RESULT: {}".format(result))
if True in result:
self._logger.debug("Add file {} on branch {}".format(file, git_repo.active_branch))
git_repo.git.add('--all')
self._logger.debug("Commit file {} on branch {}".format(file, git_repo.active_branch))
git_repo.index.commit("#482 Update Conan conventions [build=outdated]")
if not skip_push:
self._logger.debug("Push branch {} to origin".format(git_repo.active_branch))
git_repo.git.push('origin', branch)
except Exception as error:
self._logger.warning(error)
pass
def _update_conanfile(self, conanfile):
""" Update Conan recipe with Conan conventions
:param conanfile: Conan recipe path
:return:
"""
return (update_c_deprecated_attributes(self, conanfile),
update_c_default_options_to_dict(self, conanfile),
update_c_generic_exception_to_invalid_conf(self, conanfile),
update_c_openssl_version_patch(self, conanfile, openssl_version_matrix),
update_c_tools_version(self, conanfile))
def _update_readme(self, readme):
""" Update README.md file with new URL
:param readme: Readme file path
:return: True if updated. Otherwise, False.
"""
return update_readme_travis_url(self, readme)
def _run_conventions_checks(self, conanfile="conanfile.py"):
""" Checks for conventions which we can't automatically update
when they should fail
"""
return (check_for_readme(self),
check_for_license(self),
check_for_required_attributes(self, conanfile),
check_for_spdx_license(self, conanfile),
check_for_download_hash(self, conanfile),
check_for_deprecated_generators(self, conanfile),
check_for_deprecated_methods(self, conanfile))
def output_result_update(self, title):
self._logger.info("[\033[1;32mUPDATED\033[0m] {}".format(title))
def output_result_check(self, passed: bool, title, reason="", skipped=False):
if not reason == "":
reason = ": {}".format(reason)
if skipped:
self._logger.info("[SKIPPED] {}{}".format(title, reason))
elif passed:
self._logger.info("[\033[1;32mPASSED\033[0m] {}{}".format(title, reason))
else:
self._logger.error("[\033[1;31mFAILED\033[0m] {}{}".format(title, reason))
def _clone_project(self, github_url):
""" Clone Github project to temporary directory
:param github_url: Project url
"""
temp_dir = tempfile.mkdtemp(prefix='github')
project = github_url[(github_url.rfind('/') + 1):]
project_path = os.path.join(temp_dir, project)
repo = git.Repo.clone_from(github_url, project_path)
self._logger.info("Clone project {} to {}".format(github_url, project_path))
return repo, project_path
def _list_user_projects(self, user):
""" List all projects from Github public account
:param user: User name
"""
projects = []
repos_url = 'https://api.github.com/users/{}/repos'.format(user)
response = requests.get(repos_url)
if not response.ok:
raise Exception("Could not retrieve {}".format(repos_url))
for project in response.json():
projects.append(project["full_name"])
return projects
def _filter_list(self, names, pattern):
""" Filter list by user pattern
:param names: User list names
:param pattern: Project user filter name
"""
regex = re.compile(pattern)
filtered_list = [name for name in names if regex.match(name)]
self._logger.debug("Filtered list: {}".format(filtered_list))
return filtered_list
def _update_remote_project(self, remote, conanfile, skip_push, branch_pattern):
""" Clone remote project, update Travis and maybe upload
:param remote: Project full name
:param conanfile: Conan recipe path
:param | |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from grakn.service.Session.util import enums
from grakn.service.Session.util.RequestBuilder import RequestBuilder
from grakn.exception.GraknError import GraknError
class Concept(object):
def __init__(self, concept_id, base_type, tx_service):
self.id = concept_id
self.base_type = base_type
self._tx_service = tx_service
def delete(self):
del_request = RequestBuilder.ConceptMethod.delete()
method_response = self._tx_service.run_concept_method(self.id, del_request)
return
def is_schema_concept(self):
""" Check if this concept is a schema concept """
return isinstance(self, SchemaConcept)
is_schema_concept.__annotations__ = {'return': bool}
def is_type(self):
""" Check if this concept is a Type concept """
return isinstance(self, Type)
is_type.__annotations__ = {'return': bool}
def is_thing(self):
""" Check if this concept is a Thing concept """
return isinstance(self, Thing)
is_thing.__annotations__ = {'return': bool}
def is_attribute_type(self):
""" Check if this concept is an AttributeType concept """
return isinstance(self, AttributeType)
is_attribute_type.__annotations__ = {'return': bool}
def is_entity_type(self):
""" Check if this concept is an EntityType concept """
return isinstance(self, EntityType)
is_entity_type.__annotations__ = {'return': bool}
def is_relation_type(self):
""" Check if this concept is a RelationType concept """
return isinstance(self, RelationType)
is_relation_type.__annotations__ = {'return': bool}
def is_role(self):
""" Check if this concept is a Role """
return isinstance(self, Role)
is_role.__annotations__ = {'return': bool}
def is_rule(self):
""" Check if this concept is a Rule concept """
return isinstance(self, Rule)
is_rule.__annotations__ = {'return': bool}
def is_attribute(self):
""" Check if this concept is an Attribute concept """
return isinstance(self, Attribute)
is_attribute.__annotations__ = {'return': bool}
def is_entity(self):
""" Check if this concept is an Entity concept """
return isinstance(self, Entity)
is_entity.__annotations__ = {'return': bool}
def is_relation(self):
""" Check if this concept is a Relation concept """
return isinstance(self, Relation)
is_relation.__annotations__ = {'return': bool}
class SchemaConcept(Concept):
def label(self, value=None):
"""
Get or set label of this schema concept.
If used as setter returns self
"""
if value is None:
get_label_req = RequestBuilder.ConceptMethod.SchemaConcept.get_label()
method_response = self._tx_service.run_concept_method(self.id, get_label_req)
return method_response.schemaConcept_getLabel_res.label
else:
set_label_req = RequestBuilder.ConceptMethod.SchemaConcept.set_label(value)
method_response = self._tx_service.run_concept_method(self.id, set_label_req)
return self
def is_implicit(self):
""" Check if this schema concept is implicit """
is_implicit_req = RequestBuilder.ConceptMethod.SchemaConcept.is_implicit()
method_response = self._tx_service.run_concept_method(self.id, is_implicit_req)
return method_response.schemaConcept_isImplicit_res.implicit
def sup(self, super_concept=None):
"""
Get or set super schema concept.
If used as a setter returns self
"""
if super_concept is None:
# get direct super schema concept
get_sup_req = RequestBuilder.ConceptMethod.SchemaConcept.get_sup()
method_response = self._tx_service.run_concept_method(self.id, get_sup_req)
get_sup_response = method_response.schemaConcept_getSup_res
# check if received a Null or Concept
whichone = get_sup_response.WhichOneof('res')
if whichone == 'schemaConcept':
grpc_schema_concept = get_sup_response.schemaConcept
from grakn.service.Session.Concept import ConceptFactory
concept = ConceptFactory.create_concept(self._tx_service, grpc_schema_concept)
return concept
elif whichone == 'null':
return None
else:
raise GraknError("Unknown response concent for getting super schema concept: {0}".format(whichone))
else:
# set direct super SchemaConcept of this SchemaConcept
set_sup_req = RequestBuilder.ConceptMethod.SchemaConcept.set_sup(super_concept)
method_response = self._tx_service.run_concept_method(self.id, set_sup_req)
return self
def subs(self):
""" Retrieve the sub schema concepts of this schema concept, as an iterator """
subs_req = RequestBuilder.ConceptMethod.SchemaConcept.subs()
method_response = self._tx_service.run_concept_method(self.id, subs_req)
from grakn.service.Session.util import ResponseReader
from grakn.service.Session.Concept import ConceptFactory
return ResponseReader.ResponseReader.iter_res_to_iterator(
self._tx_service,
method_response.schemaConcept_subs_iter.id,
lambda tx_serv, iter_res:
ConceptFactory.create_concept(tx_serv,
iter_res.conceptMethod_iter_res.schemaConcept_subs_iter_res.schemaConcept)
)
def sups(self):
""" Retrieve the all supertypes (direct and higher level) of this schema concept as an iterator """
sups_req = RequestBuilder.ConceptMethod.SchemaConcept.sups()
method_response = self._tx_service.run_concept_method(self.id, sups_req)
from grakn.service.Session.util import ResponseReader
from grakn.service.Session.Concept import ConceptFactory
return ResponseReader.ResponseReader.iter_res_to_iterator(
self._tx_service,
method_response.schemaConcept_sups_iter.id,
lambda tx_serv, iter_res:
ConceptFactory.create_concept(tx_serv,
iter_res.conceptMethod_iter_res.schemaConcept_sups_iter_res.schemaConcept)
)
class Type(SchemaConcept):
def is_abstract(self, value=None):
"""
Get/Set whether this schema Type object is abstract.
When used as a setter returns `self`
"""
if value is None:
# return True/False if the type is set to abstract
is_abstract_req = RequestBuilder.ConceptMethod.Type.is_abstract()
method_response = self._tx_service.run_concept_method(self.id, is_abstract_req)
return method_response.type_isAbstract_res.abstract
else:
set_abstract_req = RequestBuilder.ConceptMethod.Type.set_abstract(value)
method_response = self._tx_service.run_concept_method(self.id, set_abstract_req)
return self
is_abstract.__annotations__ = {'value': bool, 'return': bool}
def attributes(self):
""" Retrieve all attributes attached to this Type as an iterator """
attributes_req = RequestBuilder.ConceptMethod.Type.attributes()
method_response = self._tx_service.run_concept_method(self.id, attributes_req)
from grakn.service.Session.util import ResponseReader
from grakn.service.Session.Concept import ConceptFactory
return ResponseReader.ResponseReader.iter_res_to_iterator(
self._tx_service,
method_response.type_attributes_iter.id,
lambda tx_serv, iter_res:
ConceptFactory.create_concept(tx_serv,
iter_res.conceptMethod_iter_res.type_attributes_iter_res.attributeType)
)
def instances(self):
""" Retrieve all instances of this Type as an iterator """
instances_req = RequestBuilder.ConceptMethod.Type.instances()
method_response = self._tx_service.run_concept_method(self.id, instances_req)
from grakn.service.Session.util import ResponseReader
from grakn.service.Session.Concept import ConceptFactory
return ResponseReader.ResponseReader.iter_res_to_iterator(
self._tx_service,
method_response.type_instances_iter.id,
lambda tx_serv, iter_res:
ConceptFactory.create_concept(tx_serv,
iter_res.conceptMethod_iter_res.type_instances_iter_res.thing)
)
def playing(self):
""" Retrieve iterator of roles played by this type """
playing_req = RequestBuilder.ConceptMethod.Type.playing()
method_response = self._tx_service.run_concept_method(self.id, playing_req)
from grakn.service.Session.util import ResponseReader
from grakn.service.Session.Concept import ConceptFactory
return ResponseReader.ResponseReader.iter_res_to_iterator(
self._tx_service,
method_response.type_playing_iter.id,
lambda tx_serv, iter_res:
ConceptFactory.create_concept(tx_serv,
iter_res.conceptMethod_iter_res.type_playing_iter_res.role)
)
def plays(self, role_concept):
""" Set a role that is played by this Type """
plays_req = RequestBuilder.ConceptMethod.Type.plays(role_concept)
method_response = self._tx_service.run_concept_method(self.id, plays_req)
return self
def unplay(self, role_concept):
""" Remove a role that is played by this Type """
unplay_req = RequestBuilder.ConceptMethod.Type.unplay(role_concept)
method_response = self._tx_service.run_concept_method(self.id, unplay_req)
return
def has(self, attribute_concept_type):
""" Attach an attributeType concept to the type """
has_req = RequestBuilder.ConceptMethod.Type.has(attribute_concept_type)
method_response = self._tx_service.run_concept_method(self.id, has_req)
return self
def unhas(self, attribute_concept_type):
""" Remove an attribute type concept from this type """
unhas_req = RequestBuilder.ConceptMethod.Type.unhas(attribute_concept_type)
method_response = self._tx_service.run_concept_method(self.id, unhas_req)
return self
def keys(self):
""" Retrieve an iterator of attribute types that this Type uses as keys """
keys_req = RequestBuilder.ConceptMethod.Type.keys()
method_response = self._tx_service.run_concept_method(self.id, keys_req)
from grakn.service.Session.util import ResponseReader
from grakn.service.Session.Concept import ConceptFactory
return ResponseReader.ResponseReader.iter_res_to_iterator(
self._tx_service,
method_response.type_keys_iter.id,
lambda tx_serv, iter_res:
ConceptFactory.create_concept(tx_serv,
iter_res.conceptMethod_iter_res.type_keys_iter_res.attributeType)
)
def key(self, attribute_concept_type):
""" Add an attribute type to be a key for this Type """
key_req = RequestBuilder.ConceptMethod.Type.key(attribute_concept_type)
method_response = self._tx_service.run_concept_method(self.id, key_req)
return self
def unkey(self, attribute_concept_type):
""" Remove an attribute type from this Type from being a key """
unkey_req = RequestBuilder.ConceptMethod.Type.unkey(attribute_concept_type)
method_response = self._tx_service.run_concept_method(self.id, unkey_req)
return self
class EntityType(Type):
def create(self):
""" Instantiate an entity of the given type and return it """
create_req = RequestBuilder.ConceptMethod.EntityType.create()
method_response = self._tx_service.run_concept_method(self.id, create_req)
grpc_entity_concept = method_response.entityType_create_res.entity
from grakn.service.Session.Concept import ConceptFactory
return ConceptFactory.create_concept(self._tx_service, grpc_entity_concept)
class AttributeType(Type):
def create(self, value):
""" Create an instance with this AttributeType """
self_data_type = self.data_type()
create_inst_req = RequestBuilder.ConceptMethod.AttributeType.create(value, self_data_type)
method_response = self._tx_service.run_concept_method(self.id, create_inst_req)
grpc_attribute_concept = method_response.attributeType_create_res.attribute
from grakn.service.Session.Concept import ConceptFactory
return ConceptFactory.create_concept(self._tx_service, grpc_attribute_concept)
def attribute(self, value):
""" Retrieve an attribute instance by value if it exists """
self_data_type = self.data_type()
get_attribute_req = RequestBuilder.ConceptMethod.AttributeType.attribute(value, self_data_type)
method_response = self._tx_service.run_concept_method(self.id, get_attribute_req)
response = method_response.attributeType_attribute_res
whichone = response.WhichOneof('res')
if whichone == 'attribute':
from grakn.service.Session.Concept import ConceptFactory
return ConceptFactory.create_concept(self._tx_service, response.attribute)
elif whichone == 'null':
return None
else:
raise GraknError("Unknown `res` key in AttributeType `attribute` response: {0}".format(whichone))
def data_type(self):
""" Get the DataType enum (grakn.DataType) corresponding to the type of this attribute """
get_data_type_req = RequestBuilder.ConceptMethod.AttributeType.data_type()
method_response = self._tx_service.run_concept_method(self.id, get_data_type_req)
response = method_response.attributeType_dataType_res
whichone = response.WhichOneof('res')
if whichone == 'dataType':
# iterate over enum DataType enum to find matching data type
for e in enums.DataType:
if e.value == response.dataType:
return e
else:
# loop exited normally
raise GraknError("Reported datatype NOT in enum: {0}".format(response.dataType))
elif whichone == 'null':
return None
else:
raise GraknError("Unknown datatype response for AttributeType: {0}".format(whichone))
def regex(self, pattern=None):
""" Get or set regex """
if pattern is None:
get_regex_req = RequestBuilder.ConceptMethod.AttributeType.get_regex()
method_response = self._tx_service.run_concept_method(self.id, get_regex_req)
return method_response.attributeType_getRegex_res.regex
else:
set_regex_req = RequestBuilder.ConceptMethod.AttributeType.set_regex(pattern)
method_response = self._tx_service.run_concept_method(self.id, set_regex_req)
return self
regex.__annotations__ = {'pattern': str}
class RelationType(Type):
def create(self):
""" Create an instance of a relation with this type """
create_rel_inst_req = RequestBuilder.ConceptMethod.RelationType.create()
method_response = self._tx_service.run_concept_method(self.id, create_rel_inst_req)
grpc_relation_concept = method_response.relationType_create_res.relation
from grakn.service.Session.Concept import ConceptFactory
return ConceptFactory.create_concept(self._tx_service, grpc_relation_concept)
def roles(self):
""" Retrieve roles in this relation schema type """
get_roles = RequestBuilder.ConceptMethod.RelationType.roles()
method_response = self._tx_service.run_concept_method(self.id, get_roles)
from grakn.service.Session.util import ResponseReader
from grakn.service.Session.Concept import ConceptFactory
return ResponseReader.ResponseReader.iter_res_to_iterator(
self._tx_service,
method_response.relationType_roles_iter.id,
lambda tx_serv, iter_res:
ConceptFactory.create_concept(tx_serv,
iter_res.conceptMethod_iter_res.relationType_roles_iter_res.role)
| |
= [MD5, SHA1]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = ""
if alg == MD5:
url = "http://md5.rednoize.com/?p&s=md5&q=%s&_=" % (hashvalue)
else:
url = "http://md5.rednoize.com/?p&s=sha1&q=%s&_=" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
return html
class CMD5:
name = "cmd5"
url = "http://www.cmd5.org"
supported_algorithm = [MD5, NTLM]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Look for hidden parameters
response = do_HTTP_request ( "http://www.cmd5.org/" )
html = None
if response:
html = response.read()
else:
return None
match = search (r'<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="[^"]*" />', html)
viewstate = None
if match:
viewstate = match.group().split('"')[7]
match = search (r'<input type="hidden" name="ctl00.ContentPlaceHolder1.HiddenField1" id="ctl00_ContentPlaceHolder1_HiddenField1" value="[^"]*" />', html)
ContentPlaceHolder1 = ""
if match:
ContentPlaceHolder1 = match.group().split('"')[7]
match = search (r'<input type="hidden" name="ctl00.ContentPlaceHolder1.HiddenField2" id="ctl00_ContentPlaceHolder1_HiddenField2" value="[^"]*" />', html)
ContentPlaceHolder2 = ""
if match:
ContentPlaceHolder2 = match.group().split('"')[7]
# Build the URL
url = "http://www.cmd5.org/"
hash2 = ""
if alg == MD5:
hash2 = hashvalue
else:
if ':' in hashvalue:
hash2 = hashvalue.split(':')[1]
# Build the parameters
params = { "__EVENTTARGET" : "",
"__EVENTARGUMENT" : "",
"__VIEWSTATE" : viewstate,
"ctl00$ContentPlaceHolder1$TextBoxq" : hash2,
"ctl00$ContentPlaceHolder1$InputHashType" : alg,
"ctl00$ContentPlaceHolder1$Button1" : "decrypt",
"ctl00$ContentPlaceHolder1$HiddenField1" : ContentPlaceHolder1,
"ctl00$ContentPlaceHolder1$HiddenField2" : ContentPlaceHolder2 }
header = { "Referer" : "http://www.cmd5.org/" }
# Make the request
response = do_HTTP_request ( url, params, header )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'<span id="ctl00_ContentPlaceHolder1_LabelResult">[^<]*</span>', html)
if match:
return match.group().split('>')[1][:-6]
else:
return None
class AUTHSECUCISCO7:
name = "authsecu"
url = "http://www.authsecu.com"
supported_algorithm = [CISCO7]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL and the headers
url = "http://www.authsecu.com/decrypter-dechiffrer-cracker-password-cisco-7/script-password-cisco-7-launcher.php"
# Build the parameters
params = { "valeur_bouton" : "dechiffrage",
"champ1" : hashvalue,
"dechiffrer.x" : 43,
"dechiffrer.y" : 16 }
# Make the request
response = do_HTTP_request ( url, params )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = findall (r'<td><p class="chapitre---texte-du-tableau-de-niveau-1">[^<]*</p></td>', html)
if match:
return match[1].split('>')[2][:-3]
else:
return None
class CACIN:
name = "cacin"
url = "http://cacin.net"
supported_algorithm = [CISCO7]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL and the headers
url = "http://cacin.net/cgi-bin/decrypt-cisco.pl?cisco_hash=%s" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'<tr>Cisco password 7: [^<]*</tr><br><tr><th><br>Decrypted password: .*', html)
if match:
return match.group().split(':')[2][1:]
else:
return None
class IBEAST:
name = "ibeast"
url = "http://www.ibeast.com"
supported_algorithm = [CISCO7]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL and the headers
url = "http://www.ibeast.com/content/tools/CiscoPassword/decrypt.php?txtPassword=%s&submit1=Enviar+consulta" % (hashvalue)
# Make the request
response = do_HTTP_request ( url )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'<font size="\+2">Your Password is [^<]*<br>', html)
if match:
return match.group().split('is ')[1][:-4]
else:
return None
class PASSWORD_DECRYPT:
name = "password-decrypt"
url = "http://password-decrypt.com"
supported_algorithm = [CISCO7, JUNIPER]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL and the parameters
url = ""
params = None
if alg == CISCO7:
url = "http://password-decrypt.com/cisco.cgi"
params = { "submit" : "Submit",
"cisco_password" : <PASSWORD>,
"submit" : "Submit" }
else:
url = "http://password-decrypt.com/juniper.cgi"
params = { "submit" : "Submit",
"juniper_password" : <PASSWORD>,
"submit" : "Submit" }
# Make the request
response = do_HTTP_request ( url, params )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'Decrypted Password: <B>[^<]*</B> </p>', html)
if match:
return match.group().split('B>')[1][:-2]
else:
return None
class BIGTRAPEZE:
name = "bigtrapeze"
url = "http://www.bigtrapeze.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL and the headers
url = "http://www.bigtrapeze.com/md5/index.php"
# Build the parameters
params = { "query" : hashvalue,
" Crack " : "Enviar consulta" }
# Build the Headers with a random User-Agent
headers = { "User-Agent" : USER_AGENTS[randint(0, len(USER_AGENTS))-1] }
# Make the request
response = do_HTTP_request ( url, params, headers )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'Congratulations!<li>The hash <strong>[^<]*</strong> has been deciphered to: <strong>[^<]*</strong></li>', html)
if match:
return match.group().split('strong>')[3][:-2]
else:
return None
class HASHCHECKER:
name = "hashchecker"
url = "http://www.hashchecker.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL and the headers
url = "http://www.hashchecker.com/index.php"
# Build the parameters
params = { "search_field" : hashvalue,
"Submit" : "search" }
# Make the request
response = do_HTTP_request ( url, params )
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'<td><li>Your md5 hash is :<br><li>[^\s]* is <b>[^<]*</b> used charlist :2</td>', html)
if match:
return match.group().split('b>')[1][:-2]
else:
return None
class MD5HASHCRACKER:
name = "md5hashcracker"
url = "http://md5hashcracker.appspot.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://md5hashcracker.appspot.com/crack"
# Build the parameters
params = { "query" : hashvalue,
"submit" : "Crack" }
# Make the firt request
response = do_HTTP_request ( url, params )
# Build the second URL
url = "http://md5hashcracker.appspot.com/status"
# Make the second request
response = do_HTTP_request ( url )
# Analyze the response
if response:
html = response.read()
else:
return None
match = search (r'<td id="cra[^"]*">not cracked</td>', html)
if not match:
match = search (r'<td id="cra[^"]*">cracked</td>', html)
regexp = r'<td id="pla_' + match.group().split('"')[1][4:] + '">[^<]*</td>'
match2 = search (regexp, html)
if match2:
return match2.group().split('>')[1][:-4]
else:
return None
class PASSCRACKING:
name = "passcracking"
url = "http://passcracking.com"
supported_algorithm = [MD5]
def isSupported (self, alg):
"""Return True if HASHCRACK can crack this type of algorithm and
False if it cannot."""
if alg in self.supported_algorithm:
return True
else:
return False
def crack (self, hashvalue, alg):
"""Try to crack the hash.
@param hashvalue Hash to crack.
@param alg Algorithm to crack."""
# Check if the cracker can crack this kind of algorithm
if not self.isSupported (alg):
return None
# Build the URL
url = "http://passcracking.com/index.php"
# Build the parameters
boundary = "-----------------------------" + str(randint(1000000000000000000000000000,9999999999999999999999999999))
params = [ '--' + boundary,
'Content-Disposition: form-data; name="admin"',
'',
'false',
'--' + boundary,
'Content-Disposition: form-data; name="admin2"',
'',
'77.php',
'--' + boundary,
'Content-Disposition: form-data; name="datafromuser"',
'',
'%s' % (hashvalue) ,
'--' + boundary + '--', '' ]
body = '\r\n'.join(params)
# Build the headers
headers = { "Content-Type" : "multipart/form-data; boundary=%s" % (boundary),
"Content-length" : len(body) }
# Make the request
request = urllib2.Request ( url )
request.add_header ( "Content-Type", "multipart/form-data; boundary=%s" % (boundary) )
request.add_header ( "Content-length", len(body) )
request.add_data(body)
try:
response = urllib2.urlopen(request)
except:
return None
# Analyze the response
html = None
if response:
html = response.read()
else:
return None
match = search (r'<td>md5 Database</td><td>[^<]*</td><td bgcolor=.FF0000>[^<]*</td>', html)
if match:
return match.group().split('>')[5][:-4]
else:
return None
class ASKCHECK:
name = "askcheck"
url = "http://askcheck.com"
supported_algorithm = [MD4, MD5, SHA1, SHA256]
def isSupported | |
catalog
path = A.link(B)
```
To link tables with more than one foreign key reference between them, use explicit `on` clause.
```
# let A.c1 be a column that is a simple foreign key to B.c1 that is a simple key in B
path = A.link(B, on=(A.c1 == B.c1))
```
To link tables with foreign keys on composite keys, use a conjunction of 2 or more equality comparisons in the
`on` clause.
```
# let A.c1, A.c2 be columns that form a foreign key to B.c1, B.c2 that are a composite key in B
path = A.link(B, on=((A.c1 == B.c1) & (A.c2 == B.c2)))
```
By default links use inner join semantics on the foreign key / key equality comparison. The `join_type`
parameter can be used to specify `left`, `right`, or `full` outer join semantics.
:param right: the right hand table of the link expression
:param on: an equality comparison between key and foreign key columns or a conjunction of such comparisons
:param join_type: the join type of this link which may be 'left', 'right', 'full' outer joins or '' for inner
join link by default.
:return: self
"""
if not isinstance(right, _TableWrapper):
raise TypeError("'right' must be a '_TableWrapper' instance")
if on and not (isinstance(on, _ComparisonPredicate) or (isinstance(on, _ConjunctionPredicate) and
on.is_valid_join_condition)):
raise TypeError("'on' must be a comparison or conjuction of comparisons")
if join_type and on is None:
raise ValueError("'on' must be specified for outer joins")
if right._schema._catalog != self._root._schema._catalog:
raise ValueError("'right' is from a different catalog. Cannot link across catalogs.")
if isinstance(right, _TableAlias) and right._name in self._table_instances:
raise ValueError("'right' is a table alias that has already been used.")
else:
# Generate an unused alias name for the table
table_name = right._name
alias_name = table_name
counter = 1
while alias_name in self._table_instances:
counter += 1
alias_name = table_name + str(counter)
right = right.alias(alias_name)
if on is None:
on = right
# Extend path expression
self._path_expression = _Link(self._path_expression, on, right, join_type)
# Bind alias and this data path
self._bind_table_instance(right)
return self
def entities(self):
"""Returns a results set of whole entities from this data path's current context.
```
results1 = my_path.entities()
```
:return: a result set of entities where each element is a whole entity per the table definition and policy.
"""
return self._query()
def aggregates(self, *functions):
"""Returns a results set of computed aggregates from this data path.
By using the built-in subclasses of the `AggregateFunction` class, including `Min`, `Max`, `Sum`, `Avg`, `Cnt`,
`CntD`, `Array`, and `ArrayD`, aggregates can be computed and fetched. These aggregates must be passed as named
parameters since they require _alias names_.
```
results1 = my_path.aggregates(Min(col1).alias('mincol1'), Array(col2).alias('arrcol2'))
results2 = my_path.aggregates(Min(col1), Array(col2)) # Error! Aggregates must be aliased.
results3 = my_path.aggregates(col1, Array(col2).alias('arrcol2')) # Error! Cannot mix columns and aggregate functions.
```
:param functions: aliased aggregate functions
:return: a results set with a single row of results.
"""
return self._query(mode=_Project.AGGREGATE, projection=list(functions))
def attributes(self, *attributes):
"""Returns a results set of attributes projected and optionally renamed from this data path.
```
results1 = my_path.attributes(col1, col2) # fetch a subset of attributes of the path
results2 = my_path.attributes(col1.alias('col_1'), col2.alias('col_2')) # fetch and rename the attributes
results3 = my_path.attributes(col1, col2.alias('col_2')) # rename some but not others
```
:param attributes: a list of Columns.
:return: a results set of the projected attributes from this data path.
"""
return self._query(mode=_Project.ATTRIBUTE, projection=list(attributes))
def groupby(self, *keys):
"""Returns an attribute group object.
The attribute group object returned by this method can be used to get a results set of computed aggregates for
groups of attributes from this data path.
With a single group key:
```
results1 = my_path.groupby(col1).attributes(Min(col2).alias('min_col1'), Array(col3).alias('arr_col2'))
```
With more than one group key:
```
results2 = my_path.groupby(col1, col2).attributes(Min(col3).alias('min_col1'), Array(col4).alias('arr_col2'))
```
With aliased group keys:
```
results3 = my_path.groupby(col1.alias('key_one'), col2.alias('keyTwo'))\
.attributes(Min(col3).alias('min_col1'), Array(col4).alias('arr_col2'))
```
With binning:
```
results3 = my_path.groupby(col1.alias('key_one'), Bin(col2;10;0;9999).alias('my_bin'))\
.attributes(Min(col3).alias('min_col1'), Array(col4).alias('arr_col2'))
```
:param keys: a list of columns, aliased columns, or aliased bins, to be used as the grouping key.
:return: an attribute group that supports an `.attributes(...)` method that accepts columns, aliased columns,
and/or aliased aggregate functions as its arguments.
"""
return _AttributeGroup(self, self._query, keys)
def _query(self, mode='entity', projection=[], group_key=[], context=None):
"""Internal method for querying the data path from the perspective of the given 'context'.
:param mode: a valid mode in Project.MODES
:param projection: a projection list.
:param group_key: a group key list (only for attributegroup queries).
:param context: optional context for the query.
:return: a results set.
"""
assert context is None or isinstance(context, _TableAlias)
catalog = self._root._schema._catalog._wrapped_catalog
expression = self._path_expression
if context:
expression = _ResetContext(expression, context)
if mode != _Project.ENTITY:
expression = _Project(expression, mode, projection, group_key)
base_path = str(expression)
def fetcher(limit=None, sort=None):
assert limit is None or isinstance(limit, int)
assert sort is None or hasattr(sort, '__iter__')
limiting = '?limit=%d' % limit if limit else ''
sorting = '@sort(' + ','.join([col._uname for col in sort]) + ')' if sort else ''
path = base_path + sorting + limiting
logger.debug("Fetching " + path)
try:
resp = catalog.get(path)
return resp.json()
except HTTPError as e:
logger.debug(e.response.text)
if 400 <= e.response.status_code < 500:
raise DataPathException(_http_error_message(e), e)
else:
raise e
return _ResultSet(self._base_uri + base_path, fetcher)
def merge(self, path):
"""Merges the current path with the given path.
The right-hand 'path' must be rooted on a `_TableAlias` object that exists (by alias name) within this path
(the left-hand path). It _must not_ have other shared table aliases.
:param path: a `DataPath` object rooted on a table alias that can be found in this path
:return: this path merged with the given (right-hand) path
"""
if not isinstance(path, DataPath):
raise TypeError("'path' must be an instance of %s" % type(self).__name__)
if path._root._name not in self._table_instances:
raise ValueError("right-hand path root not found in this path's table instances")
if not path._root._equivalent(self._table_instances[path._root._name]):
raise ValueError("right-hand path root is not equivalent to the matching table instance in this path")
if self._table_instances.keys() & path._table_instances.keys() != {path._root._name}:
raise ValueError("overlapping table instances found in right-hand path")
# update this path as rebased right-hand path
temp = copy.deepcopy(path._path_expression)
temp.rebase(self._path_expression, self._table_instances[path._root._name])
self._path_expression = temp
# copy and bind table instances from right-hand path
for alias in path._table_instances:
if alias not in self.table_instances:
self._bind_table_instance(copy.deepcopy(path._table_instances[alias]))
# set the context
self._context = self._table_instances[path._context._name]
return self
class _ResultSet (object):
"""A set of results for various queries or data manipulations.
The result set is produced by a path. The results may be explicitly fetched. The result set behaves like a
container. If the result set has not been fetched explicitly, on first use of container operations, it will
be implicitly fetched from the catalog.
"""
def __init__(self, uri, fetcher_fn):
"""Initializes the _ResultSet.
:param uri: the uri for the entity set in the catalog.
:param fetcher_fn: a function that fetches the entities from the catalog.
"""
assert fetcher_fn is not None
self._fetcher_fn = fetcher_fn
self._results_doc = None
self._sort_keys = None
self.uri = uri
@property
def _results(self):
if self._results_doc is None:
self.fetch()
return self._results_doc
def __len__(self):
return len(self._results)
def __getitem__(self, item):
return self._results[item]
def __iter__(self):
return iter(self._results)
def sort(self, *attributes):
"""Orders the results set by the given attributes.
:param keys: Columns, column aliases, or aggregate function aliases. The sort attributes must be projected by
the originating query.
:return: self
"""
if not attributes:
raise ValueError("No sort attributes given.")
if not all(isinstance(a, _ColumnWrapper) or isinstance(a, _ColumnAlias) or isinstance(a, _AggregateFunctionAlias)
or isinstance(a, _SortDescending) for a in attributes):
raise TypeError("Sort keys must be column, column alias, or aggregate function alias")
self._sort_keys = attributes
self._results_doc = None
return self
def fetch(self, limit=None):
"""Fetches the results from the catalog.
:param limit: maximum number of results to fetch from the catalog.
:return: self
"""
limit = int(limit) if limit else None
self._results_doc = self._fetcher_fn(limit, self._sort_keys)
logger.debug("Fetched %d entities" % len(self._results_doc))
return self
class _TableWrapper (object):
"""Wraps a Table for datapath expressions.
"""
def __init__(self, schema, table):
"""Creates a _TableWrapper object.
:param schema: the schema objec to which this table | |
self._VertexCounter += 40
def _opVertexColNorm(self):
# Opcode 69
newObject = dict()
newObject['Datatype'] = "VertexColourWithNormal"
newObject['ColourNameIdx'] = self._readUShort()
newObject['Flags'] = self._readUShort()
newObject['Coordinate'] = np.zeros((1, 3))
# For x, y and z
for colIdx in range(3):
newObject['Coordinate'][0, colIdx] = self._readDouble()
newObject['Normal'] = np.zeros((1, 3))
# For i, j and k
for colIdx in range(3):
newObject['Normal'][0, colIdx] = self._readFloat()
newObject['PackedColour'] = self._readUInt()
newObject['VertexColourIndex'] = self._readUInt()
self._skip(4)
self._addObject(newObject)
self.Records['Vertices'][self._VertexCounter] = newObject
self.Records['VertexUV'].append(None)
self._VertexCounter += 56
def _opVertexColNormUV(self):
# Opcode 70
newObject = dict()
newObject['Datatype'] = "VertexColourWithNormalUV"
newObject['ColourNameIdx'] = self._readUShort()
newObject['Flags'] = self._readUShort()
newObject['Coordinate'] = np.zeros((1, 3))
# For x, y and z
for colIdx in range(3):
newObject['Coordinate'][0, colIdx] = self._readDouble()
newObject['Normal'] = np.zeros((1, 3))
# For i, j and k
for colIdx in range(3):
newObject['Normal'][0, colIdx] = self._readFloat()
newObject['TextureCoordinate'] = np.zeros((1, 2))
newObject['TextureCoordinate'][0, 0] = self._readFloat()
newObject['TextureCoordinate'][0, 1] = self._readFloat()
newObject['PackedColour'] = self._readUInt()
newObject['VertexColourIndex'] = self._readUInt()
self._skip(4)
self._addObject(newObject)
self.Records['Vertices'][self._VertexCounter] = newObject
self.Records['VertexUV'].append(newObject['TextureCoordinate'])
self._VertexCounter += 64
def _opVertexColUV(self):
# Opcode 71
newObject = dict()
newObject['Datatype'] = "VertexColourWithUV"
newObject['ColourNameIdx'] = self._readUShort()
newObject['Flags'] = self._readUShort()
newObject['Coordinate'] = np.zeros((1, 3))
# For x, y and z
for colIdx in range(3):
newObject['Coordinate'][0, colIdx] = self._readDouble()
newObject['TextureCoordinate'] = np.zeros((1, 2))
newObject['TextureCoordinate'][0, 0] = self._readFloat()
newObject['TextureCoordinate'][0, 1] = self._readFloat()
newObject['PackedColour'] = self._readUInt()
newObject['VertexColourIndex'] = self._readUInt()
self._addObject(newObject)
self.Records['Vertices'][self._VertexCounter] = newObject
self.Records['VertexUV'].append(newObject['TextureCoordinate'])
self._VertexCounter += 48
def _opVertexList(self):
# Opcode 72
newObject = dict()
# Read the data to memory and extract data as normal with modified
# read functions.
self._readChunk()
newObject['Datatype'] = "VertexList"
RecordLength = len(self._Chunk)
newObject['ByteOffset'] = []
for verIdx in range((RecordLength / 4)):
newObject['ByteOffset'].append(self._readUInt(fromChunk = True))
# The data chunk should be processed. Reset the variable to None:
self._Chunk = None
self._addObject(newObject)
# And keep a copy in the vertex list
self.Records["VertexList"].append(newObject['ByteOffset'])
self.Records["TexturePatterns"].append(self._TexturePatternIdx)
def _opLoD(self):
# Opcode 73
newObject = dict()
newObject['Datatype'] = 'LevelOfDetail'
newObject['ASCIIID'] = self._readString(8)
# Skip over the reserved area
self.read.seek(4, os.SEEK_CUR)
newObject['SwitchInDistance'] = self._readDouble()
newObject['SwitchOutDistance'] = self._readDouble()
newObject['FXID1'] = self._readShort()
newObject['FXID2'] = self._readShort()
newObject['Flags'] = self._readUInt()
varNames = ['x', 'y', 'z']
for varName in varNames:
newObject[varName + 'Centre'] = self._readDouble()
newObject['TransitionRange'] = self._readDouble()
newObject['SignificantSize'] = self._readDouble()
self._addObject(newObject)
def _opBoundingBox(self):
# Opcode 74
newObject = dict()
newObject['Datatype'] = 'BoundingBox'
# Skip over the reserved area
self._skip(4)
Positions = ['Lowest', 'Highest']
Axes = ['x', 'y', 'z']
for position in Positions:
for axis in Axes:
newObject[axis + position] = self._readDouble()
# Finally, add the object to the stack
self._addObject(newObject)
def _opRotEdge(self):
# Opcode 76
newObject = dict()
newObject['Datatype'] = 'RotateAboutEdge'
self._skip(4)
varNames = ['FirstPoint', 'SecondPoint']
for varName in varNames:
newObject[varName] = np.zeros((1, 3))
for colIdx in range(3):
newObject[varName][0, colIdx] = self._readDouble()
newObject['Angle'] = self._readFloat()
self._skip(4)
self._addObject(newObject)
def _opTranslate(self):
# Opcode 78
newObject = dict()
newObject['Datatype'] = 'Translate'
self._skip(4)
varNames = ['From', 'Delta']
for varName in varNames:
newObject[varName] = np.zeros((1, 3))
for colIdx in range(3):
newObject[varName][0, colIdx] = self._readDouble()
self._addObject(newObject)
def _opScale(self):
# Opcode 79
newObject = dict()
newObject['Datatype'] = 'Scale'
self._skip(4)
newObject['ScaleCentre'] = np.zeros((1, 3))
for colIdx in range(3):
newObject['ScaleCentre'][0, colIdx] = self._readDouble()
varNames = ['xScale', 'yScale', 'zScale']
for varName in varNames:
newObject[varName] = self._readFloat()
self._skip(4)
self._addObject(newObject)
def _opRotPoint(self):
# Opcode 80
newObject = dict()
newObject['Datatype'] = 'RotateAboutPoint'
self._skip(4)
newObject['RotationCentre'] = np.zeros((1, 3))
for colIdx in range(3):
newObject['RotationCentre'][0, colIdx] = self._readDouble()
varNames = ['iAxis', 'jAxis', 'kAxis', 'Angle']
for varName in varNames:
newObject[varName] = self._readFloat()
self._addObject(newObject)
def _opRotScPoint(self):
# Opcode 81
newObject = dict()
newObject['Datatype'] = 'RotateScaleToPoint'
self._skip(4)
varNames = ['ScaleCentre', 'ReferencePoint', 'ToPoint']
for varName in varNames:
newObject[varName] = np.zeros((1, 3))
for colIdx in range(3):
newObject[varName][0, colIdx] = self._readDouble()
varNames = ['OverallScale', 'ScaleInDirection', 'Angle']
for varName in varNames:
newObject[varName] = self._readFloat()
self._skip(4)
self._addObject(newObject)
def _opPut(self):
# Opcode 82
newObject = dict()
newObject['Datatype'] = 'Put'
self._skip(4)
varNames = ['FromOrigin', 'FromAlign', 'FromTrack', 'ToOrigin', 'ToAlign', 'ToTrack']
for varName in varNames:
newObject[varName] = np.zeros((1, 3))
for colIdx in range(3):
newObject[varName][0, colIdx] = self._readDouble()
self._addObject(newObject)
def _opEyeTrackPalette(self):
# Opcode 83
newObject = dict()
newObject['Datatype'] = 'EyepointAndTrackplanePalette'
self._skip(4)
for eyePointIdx in range(10):
# Keep this simple
eyePoint = 'EyePoint' + format(eyePointIdx, '02d')
newObject[eyePoint] = dict()
# Now the file
newObject[eyePoint]['RotationCentre'] = np.zeros((1, 3))
for colIdx in range(3):
newObject[eyePoint]['RotationCentre'][0, colIdx] = self._readDouble()
varNames = ['Yaw', 'Pitch', 'Roll']
for varName in Varnames:
newObject[eyePoint][varName] = self._readFloat()
newObject[eyePoint]['RotationMatrix'] = np.zeros((4, 4))
for n in range(16):
# Enter elements of a matrix by going across their columns
newObject[eyePoint]['RotationMatrix'][int(n) / 4, n % 4] = self._readFloat()
varNames = ['FieldOfView', 'Scale', 'NearClippingPlane', 'FarClippingPlane']
for varName in Varnames:
newObject[eyePoint][varName] = self._readFloat()
newObject[eyePoint]['FlythroughMatrix'] = np.zeros((4, 4))
for n in range(16):
# Enter elements of a matrix by going across their columns
newObject[eyePoint]['FlythroughMatrix'][int(n) / 4, n % 4] = self._readFloat()
newObject[eyePoint]['EyepointPosition'] = np.zeros((1, 3))
for colIdx in range(3):
newObject[eyePoint]['EyepointPosition'][0, colIdx] = self._readFloat()
newObject[eyePoint]['YawFlythrough'] = self._readFloat()
newObject[eyePoint]['PitchFlythrough'] = self._readFloat()
newObject[eyePoint]['EyepointDirection'] = np.zeros((1, 3))
for colIdx in range(3):
newObject[eyePoint]['EyepointDirection'][0, colIdx] = self._readFloat()
varNames = ['NoFlythrough', 'OrthoView', 'ValidEyepoint', 'xImageOffset', 'yImageOffset', 'ImageZoom']
for varName in varNames:
newObject[eyePoint][varName] = self._readInt()
# Skip over 4*8 + 4 of reserved space
self._skip(36)
for trackplaneIdx in range(10):
trackplane = 'Trackplane' + format(trackplaneIdx, '02d')
newObject[trackplane] = dict()
newObject[trackplane]['Valid'] = self._readInt()
self._skip(4)
varNames = ['Origin', 'Alignment', 'Plane']
for varName in varNames:
newObject[eyePoint][varName] = np.zeros((1, 3))
for colIdx in range(3):
newObject[eyePoint][varName][0, colIdx] = self._readDouble()
newObject[trackplane]['GridVisible'] = self._readBool()
varNames = ['GridType', 'GridUnder']
for varName in varNames:
newObject[trackplane][varName] = self._readUChar()
self._skip(1)
newObject[trackplane]['GridAngle'] = self._readFloat()
varNames = ['xGridSpace', 'yGridSpace']
for varName in varNames:
newObject[trackplane]['varName'] = self._readDouble()
varNames = ['RadialGridDirection', 'RectangularGridDirection']
for varName in varNames:
newObject[trackplane][varName] = self._readSChar()
newObject[trackplane]['SnapToGrid'] = self._readUChar()
self._skip(2)
newObject[trackplane]['GridSize'] = self._readDouble()
# This may be incorrect. Record says a 4 byte boolean! I assume 4 * 1 byte booleans.
for quadrant in range(1, 5):
newObject[trackplane]['VisibleGridMask' + quadrant] = self._readBool()
self._skip(4)
self._addObject(newObject)
def _opMesh(self):
# Opcode 84
newObject = dict()
newObject['Datatype'] = 'Mesh'
# This is identical to the face record.
newObject['ASCIIID'] = self._readString(8)
self._skip(4)
newObject['IRColourCode'] = self._readUInt()
newObject['RelativePriority'] = self._readShort()
newObject['DrawType'] = self._readUChar()
drawTypes = [0, 1, 2, 3, 4, 8, 9, 10]
if newObject['DrawType'] not in drawTypes:
raise Exception("Unable to determine draw type.")
newObject['TextureWhite'] = self._readBool()
newObject['ColourNameIdx'] = self._readUShort()
newObject['AltColourNameIdx'] = self._readUShort()
# Skip over reserved
self._skip(1)
templateTypes = [0, 1, 2, 4]
newObject['Template'] = self._readUChar()
if newObject['Template'] not in templateTypes:
raise Exception("Unable to determine template type.")
varNames = ['DetailTexturePatternIdx', 'TexturePatternIdx', 'MaterialIdx']
for varName in varNames:
newObject[varName] = self._readShort()
if newObject[varName] == -1:
newObject[varName] = None
newObject['SurfaceMaterialCode'] = self._readShort()
newObject['FeatureID'] = self._readShort()
newObject['IRMaterialCode'] = self._readUInt()
newObject['Transparency'] = self._readUShort()
newObject['LODGenerationControl'] = self._readUChar()
newObject['LineStyleIdx'] = self._readUChar()
newObject['Flags'] = self._readUInt()
lightModes = [0, 1, 2, 3]
newObject['LightMode'] = self._readUChar()
if newObject['LightMode'] not in lightModes:
raise Exception("Unable to determine light mode.")
# Skip over reserved
self._skip(7)
newObject['PackedColour'] = self._readUInt()
newObject['AltPackedColour'] = self._readUInt()
newObject['TextureMappingIdx'] = self._readShort()
if newObject['TextureMappingIdx'] == -1:
newObject['TextureMappingIdx'] = None
self._skip(2)
newObject['PrimaryColourIdx'] = self._readUInt()
if newObject['PrimaryColourIdx'] == -1:
newObject['PrimaryColourIdx'] = None
newObject['AltColourIdx'] = self._readUInt()
if newObject['AltColourIdx'] == -1:
newObject['AltColourIdx'] = None
self._skip(2)
newObject['ShaderIdx'] = self._readShort()
if newObject['ShaderIdx'] == -1:
newObject['ShaderIdx'] = None
self._addObject(newObject)
def _opLocVertexPool(self):
# Opcode 85
newObject = dict()
# Read the data to memory and extract data as normal with modified
# read functions
self._readChunk()
newObject['Datatype'] = | |
if not IsSourceFile(filename_cc):
return (False, '')
fileinfo_h = FileInfo(filename_h)
if not IsHeaderFile(filename_h):
return (False, '')
filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in range(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_headers_maybe_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
matched = pattern.search(line)
if matched:
# Don't warn about IWYU in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = list(include_dict.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if not header_found:
if IsSourceFile(filename):
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in sorted(required, key=required.__getitem__):
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = regex.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in range(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in range(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is | |
filter2_4_2_vale = ndarray.array(np.random.normal(0, 0.5, (192, 160, 1, 7)), executor_ctx)
filter2_4_3_val = ndarray.array(np.random.normal(0, 0.5, (192, 768, 1, 1)), executor_ctx)
# branch_0
incep2_4_0 = self.conv2dplusrelu(concat2_3, filter2_4_0, "NCHW", "SAME", 1, 1)
# branch 1
incep2_4_1a = self.conv2dplusrelu(concat2_3, filter2_4_1a, "NCHW", "SAME", 1, 1)
incep2_4_1b = self.conv2dplusrelu(incep2_4_1a, filter2_4_1b, "NCHW", "SAME", 1, 1)
incep2_4_1 = self.conv2dplusrelu(incep2_4_1b, filter2_4_1c, "NCHW", "SAME", 1, 1)
# branch 2
incep2_4_2a = self.conv2dplusrelu(concat2_3, filter2_4_2a, "NCHW", "SAME", 1, 1)
incep2_4_2b = self.conv2dplusrelu(incep2_4_2a, filter2_4_2b, "NCHW", "SAME", 1, 1)
incep2_4_2c = self.conv2dplusrelu(incep2_4_2b, filter2_4_2c, "NCHW", "SAME", 1, 1)
incep2_4_2d = self.conv2dplusrelu(incep2_4_2c, filter2_4_2d, "NCHW", "SAME", 1, 1)
incep2_4_2 = self.conv2dplusrelu(incep2_4_2d, filter2_4_2e, "NCHW", "SAME", 1, 1)
# branch 3
incep2_4_3a = self.ad.pooling_2d_forward_op(concat2_3, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep2_4_3 = self.conv2dplusrelu(incep2_4_3a, filter2_4_3, "NCHW", "SAME", 1, 1)
concat2_4a = self.ad.concat_forward_op(incep2_4_0, incep2_4_1)
concat2_4b = self.ad.concat_forward_op(concat2_4a, incep2_4_2)
concat2_4 = self.ad.concat_forward_op(concat2_4b, incep2_4_3)
# inception_moudle2_5
filter2_5_0 = self.ad.Variable("filter2_5_0")
filter2_5_1a = self.ad.Variable("filter2_5_1a")
filter2_5_1b = self.ad.Variable("filter2_5_1b")
filter2_5_1c = self.ad.Variable("filter2_5_1c")
filter2_5_2a = self.ad.Variable("filter2_5_2a")
filter2_5_2b = self.ad.Variable("filter2_5_2b")
filter2_5_2c = self.ad.Variable("filter2_5_2c")
filter2_5_2d = self.ad.Variable("filter2_5_2d")
filter2_5_2e = self.ad.Variable("filter2_5_2e")
filter2_5_3 = self.ad.Variable("filter2_5_3a")
filter2_5_0_val = ndarray.array(np.random.normal(0, 0.5, (192, 768, 1, 1)), executor_ctx)
filter2_5_1_vala = ndarray.array(np.random.normal(0, 0.5, (160, 768, 1, 1)), executor_ctx)
filter2_5_1_valb = ndarray.array(np.random.normal(0, 0.5, (160, 160, 1, 7)), executor_ctx)
filter2_5_1_valc = ndarray.array(np.random.normal(0, 0.5, (192, 160, 7, 1)), executor_ctx)
filter2_5_2_vala = ndarray.array(np.random.normal(0, 0.5, (160, 768, 1, 1)), executor_ctx)
filter2_5_2_valb = ndarray.array(np.random.normal(0, 0.5, (160, 160, 7, 1)), executor_ctx)
filter2_5_2_valc = ndarray.array(np.random.normal(0, 0.5, (160, 160, 1, 7)), executor_ctx)
filter2_5_2_vald = ndarray.array(np.random.normal(0, 0.5, (160, 160, 7, 1)), executor_ctx)
filter2_5_2_vale = ndarray.array(np.random.normal(0, 0.5, (192, 160, 1, 7)), executor_ctx)
filter2_5_3_val = ndarray.array(np.random.normal(0, 0.5, (192, 768, 1, 1)), executor_ctx)
# branch_0
incep2_5_0 = self.conv2dplusrelu(concat2_4, filter2_5_0, "NCHW", "SAME", 1, 1)
# branch 1
incep2_5_1a = self.conv2dplusrelu(concat2_4, filter2_5_1a, "NCHW", "SAME", 1, 1)
incep2_5_1b = self.conv2dplusrelu(incep2_5_1a, filter2_5_1b, "NCHW", "SAME", 1, 1)
incep2_5_1 = self.conv2dplusrelu(incep2_5_1b, filter2_5_1c, "NCHW", "SAME", 1, 1)
# branch 2
incep2_5_2a = self.conv2dplusrelu(concat2_4, filter2_5_2a, "NCHW", "SAME", 1, 1)
incep2_5_2b = self.conv2dplusrelu(incep2_5_2a, filter2_5_2b, "NCHW", "SAME", 1, 1)
incep2_5_2c = self.conv2dplusrelu(incep2_5_2b, filter2_5_2c, "NCHW", "SAME", 1, 1)
incep2_5_2d = self.conv2dplusrelu(incep2_5_2c, filter2_5_2d, "NCHW", "SAME", 1, 1)
incep2_5_2 = self.conv2dplusrelu(incep2_5_2d, filter2_5_2e, "NCHW", "SAME", 1, 1)
# branch 3
incep2_5_3a = self.ad.pooling_2d_forward_op(concat2_4, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep2_5_3 = self.conv2dplusrelu(incep2_5_3a, filter2_5_3, "NCHW", "SAME", 1, 1)
concat2_5a = self.ad.concat_forward_op(incep2_5_0, incep2_5_1)
concat2_5b = self.ad.concat_forward_op(concat2_5a, incep2_5_2)
concat2_5 = self.ad.concat_forward_op(concat2_5b, incep2_5_3)
# # inception_moudle3
# inception_moudle3_1
filter3_1_0a = self.ad.Variable("filter3_1_0a")
filter3_1_0b = self.ad.Variable("filter3_1_0b")
filter3_1_1a = self.ad.Variable("filter3_1_1a")
filter3_1_1b = self.ad.Variable("filter3_1_1b")
filter3_1_1c = self.ad.Variable("filter3_1_1c")
filter3_1_1d = self.ad.Variable("filter3_1_1d")
filter3_1_0_vala = ndarray.array(np.random.normal(0, 0.5, (192, 768, 1, 1)), executor_ctx)
filter3_1_0_valb = ndarray.array(np.random.normal(0, 0.5, (320, 192, 3, 3)), executor_ctx)
filter3_1_1_vala = ndarray.array(np.random.normal(0, 0.5, (192, 768, 1, 1)), executor_ctx)
filter3_1_1_valb = ndarray.array(np.random.normal(0, 0.5, (192, 192, 1, 7)), executor_ctx)
filter3_1_1_valc = ndarray.array(np.random.normal(0, 0.5, (192, 192, 7, 1)), executor_ctx)
filter3_1_1_vald = ndarray.array(np.random.normal(0, 0.5, (192, 192, 3, 3)), executor_ctx)
# branch_0
incep3_1_0a = self.conv2dplusrelu(concat2_5, filter3_1_0a, "NCHW", "SAME", 1, 1)
incep3_1_0 = self.conv2dplusrelu(incep3_1_0a, filter3_1_0b, "NCHW", "VALID", 2, 2)
# branch 1
incep3_1_1a = self.conv2dplusrelu(concat2_2, filter3_1_1a, "NCHW", "SAME", 1, 1)
incep3_1_1b = self.conv2dplusrelu(incep3_1_1a, filter3_1_1b, "NCHW", "SAME", 1, 1)
incep3_1_1c = self.conv2dplusrelu(incep3_1_1b, filter3_1_1c, "NCHW", "SAME", 1, 1)
incep3_1_1 = self.conv2dplusrelu(incep3_1_1c, filter3_1_1d, "NCHW", "VALID", 2, 2)
# branch 2
incep3_1_2 = self.ad.pooling_2d_forward_op(concat2_2, "NCHW", "mean", 0, 0, 2, 2, 3, 3)
concat3_1a = self.ad.concat_forward_op(incep3_1_0, incep3_1_1)
concat3_1 = self.ad.concat_forward_op(concat3_1a, incep3_1_2)
# inception_moudle3_2
filter3_2_0 = self.ad.Variable("filter3_2_0")
filter3_2_1a = self.ad.Variable("filter3_2_1a")
filter3_2_1b = self.ad.Variable("filter3_2_1b")
filter3_2_1c = self.ad.Variable("filter3_2_1c")
filter3_2_2a = self.ad.Variable("filter3_2_2a")
filter3_2_2b = self.ad.Variable("filter3_2_2b")
filter3_2_2c = self.ad.Variable("filter3_2_2c")
filter3_2_2d = self.ad.Variable("filter3_2_2d")
filter3_2_3 = self.ad.Variable("filter3_2_3a")
filter3_2_0_val = ndarray.array(np.random.normal(0, 0.5, (320, 1280, 1, 1)), executor_ctx)
filter3_2_1_vala = ndarray.array(np.random.normal(0, 0.5, (384, 1280, 1, 1)), executor_ctx)
filter3_2_1_valb = ndarray.array(np.random.normal(0, 0.5, (384, 384, 1, 3)), executor_ctx)
filter3_2_1_valc = ndarray.array(np.random.normal(0, 0.5, (384, 384, 3, 1)), executor_ctx)
filter3_2_2_vala = ndarray.array(np.random.normal(0, 0.5, (448, 1280, 1, 1)), executor_ctx)
filter3_2_2_valb = ndarray.array(np.random.normal(0, 0.5, (384, 448, 3, 3)), executor_ctx)
filter3_2_2_valc = ndarray.array(np.random.normal(0, 0.5, (384, 384, 1, 3)), executor_ctx)
filter3_2_2_vald = ndarray.array(np.random.normal(0, 0.5, (384, 384, 3, 1)), executor_ctx)
filter3_2_3_val = ndarray.array(np.random.normal(0, 0.5, (192, 1280, 1, 1)), executor_ctx)
# branch_0
incep3_2_0 = self.conv2dplusrelu(concat3_1, filter3_2_0, "NCHW", "SAME", 1, 1)
# branch 1
incep3_2_1a = self.conv2dplusrelu(concat3_1, filter3_2_1a, "NCHW", "SAME", 1, 1)
incep3_2_1b = self.conv2dplusrelu(incep3_2_1a, filter3_2_1b, "NCHW", "SAME", 1, 1)
incep3_2_1c = self.conv2dplusrelu(incep3_2_1a, filter3_2_1c, "NCHW", "SAME", 1, 1)
incep3_2_1 = self.ad.concat_forward_op(incep3_2_1b, incep3_2_1c)
# branch 2
incep3_2_2a = self.conv2dplusrelu(concat3_1, filter3_2_2a, "NCHW", "SAME", 1, 1)
incep3_2_2b = self.conv2dplusrelu(incep3_2_2a, filter3_2_2b, "NCHW", "SAME", 1, 1)
incep3_2_2c = self.conv2dplusrelu(incep3_2_2b, filter3_2_2c, "NCHW", "SAME", 1, 1)
incep3_2_2d = self.conv2dplusrelu(incep3_2_2b, filter3_2_2d, "NCHW", "SAME", 1, 1)
incep3_2_2 = self.ad.concat_forward_op(incep3_2_2c, incep3_2_2d)
# branch 3
incep3_2_3a = self.ad.pooling_2d_forward_op(concat3_1, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep3_2_3 = self.conv2dplusrelu(incep3_2_3a, filter3_2_3, "NCHW", "SAME", 1, 1)
concat3_2a = self.ad.concat_forward_op(incep3_2_0, incep3_2_1)
concat3_2b = self.ad.concat_forward_op(concat3_2a, incep3_2_2)
concat3_2 = self.ad.concat_forward_op(concat3_2b, incep3_2_3)
# # inception_moudle3_3
filter3_3_0 = self.ad.Variable("filter3_3_0")
filter3_3_1a = self.ad.Variable("filter3_3_1a")
filter3_3_1b = self.ad.Variable("filter3_3_1b")
filter3_3_1c = self.ad.Variable("filter3_3_1c")
filter3_3_2a = self.ad.Variable("filter3_3_2a")
filter3_3_2b = self.ad.Variable("filter3_3_2b")
filter3_3_2c = self.ad.Variable("filter3_3_2c")
filter3_3_2d = self.ad.Variable("filter3_3_2d")
filter3_3_3 = self.ad.Variable("filter3_3_3a")
filter3_3_0_val = ndarray.array(np.random.normal(0, 0.5, (320, 2048, 1, 1)), executor_ctx)
filter3_3_1_vala = ndarray.array(np.random.normal(0, 0.5, (384, 2048, 1, 1)), executor_ctx)
filter3_3_1_valb = ndarray.array(np.random.normal(0, 0.5, (384, 384, 1, 3)), executor_ctx)
filter3_3_1_valc = ndarray.array(np.random.normal(0, 0.5, (384, 384, 3, 1)), executor_ctx)
filter3_3_2_vala = ndarray.array(np.random.normal(0, 0.5, (448, 2048, 1, 1)), executor_ctx)
filter3_3_2_valb = ndarray.array(np.random.normal(0, 0.5, (384, 448, 3, 3)), executor_ctx)
filter3_3_2_valc = ndarray.array(np.random.normal(0, 0.5, (384, 384, 1, 3)), executor_ctx)
filter3_3_2_vald = ndarray.array(np.random.normal(0, 0.5, (384, 384, 3, 1)), executor_ctx)
filter3_3_3_val = ndarray.array(np.random.normal(0, 0.5, (192, 2048, 1, 1)), executor_ctx)
# branch_0
incep3_3_0 = self.conv2dplusrelu(concat3_2, filter3_3_0, "NCHW", "SAME", 1, 1)
# branch 1
incep3_3_1a = self.conv2dplusrelu(concat3_2, filter3_3_1a, "NCHW", "SAME", 1, 1)
incep3_3_1b = self.conv2dplusrelu(incep3_3_1a, filter3_3_1b, "NCHW", "SAME", 1, 1)
incep3_3_1c = self.conv2dplusrelu(incep3_3_1a, filter3_3_1c, "NCHW", "SAME", 1, 1)
incep3_3_1 = self.ad.concat_forward_op(incep3_3_1b, incep3_3_1c)
# branch 2
incep3_3_2a = self.conv2dplusrelu(concat3_2, filter3_3_2a, "NCHW", "SAME", 1, 1)
incep3_3_2b = self.conv2dplusrelu(incep3_3_2a, filter3_3_2b, "NCHW", "SAME", 1, 1)
incep3_3_2c = self.conv2dplusrelu(incep3_3_2b, filter3_3_2c, "NCHW", "SAME", 1, 1)
incep3_3_2d = self.conv2dplusrelu(incep3_3_2b, filter3_3_2d, "NCHW", "SAME", 1, 1)
incep3_3_2 = self.ad.concat_forward_op(incep3_3_2c, incep3_3_2d)
# branch 3
incep3_3_3a = self.ad.pooling_2d_forward_op(concat3_2, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep3_3_3 = self.conv2dplusrelu(incep3_3_3a, filter3_3_3, "NCHW", "SAME", 1, 1)
concat3_3a = self.ad.concat_forward_op(incep3_3_0, incep3_3_1)
concat3_3b = self.ad.concat_forward_op(concat3_3a, incep3_3_2)
concat3_3 = self.ad.concat_forward_op(concat3_3b, incep3_3_3)
filtera1 = self.ad.Variable("filtera1")
filtera1val = ndarray.array(np.random.normal(0, 0.5, (1000, 2048, 1, 1)), executor_ctx)
W = self.ad.Variable("filtersmul")
W_val = ndarray.array(np.random.normal(0, 0.5, (1000, 1000)), executor_ctx)
b = self.ad.Variable("biases")
b_val = ndarray.array(np.random.normal(0, 0.5, (1000)), executor_ctx)
poollast = self.ad.pooling_2d_forward_op(concat3_3, "NCHW", "mean", 0, 0, 1, 1, 8, 8)
dropout = self.ad.dropout_forward_op(poollast, "NCHW", 0.8)
convlast = self.conv2dplusrelu(dropout, filtera1, "NCHW", "SAME", 1, 1)
squeeze = self.ad.squeeze_op(convlast)
dense = self.ad.dense(squeeze, W, b)
y = self.ad.fullyactivation_forward_op(dense, "NCHW", "softmax")
loss = self.ad.crossEntropy_loss(y, y_)
# fc8
executor = self.ad.Executor(loss, y, 0.001, top_control_queue=top_control_queue,
top_message_queue=top_message_queue, log_path=self.log_path)
feed_dict = {filterb_1: filtersb_val1, filterb_2: filtersb_val2, filterb_3: filtersb_val3
, filterb_4: filtersb_val4, filterb_5: filtersb_val5,
filter1_1_0: filter1_1_0_val, filter1_1_1a: filter1_1_1_vala, filter1_1_1b: filter1_1_1_valb, filter1_1_2a: filter1_1_2_vala, filter1_1_2b: filter1_1_2_valb
, filter1_1_2c: filter1_1_2_valc, filter1_1_3: filter1_1_3_val
, filter1_2_0: filter1_2_0_val, filter1_2_1a: filter1_2_1_vala,
filter1_2_1b: filter1_2_1_valb, filter1_2_2a: filter1_2_2_vala,
filter1_2_2b: filter1_2_2_valb, filter1_2_2c: filter1_2_2_valc, filter1_2_3: filter1_2_3_val
, filter1_3_0: filter1_3_0_val, filter1_3_1a: filter1_3_1_vala,
filter1_3_1b: filter1_3_1_valb, filter1_3_2a: filter1_3_2_vala,
filter1_3_2b: filter1_3_2_valb, filter1_3_2c: filter1_3_2_valc,
filter1_3_3: filter1_3_3_val
, filter2_1_0: filter2_1_0_val, filter2_1_1a: filter2_1_1_vala, filter2_1_1b: filter2_1_1_valb, filter2_1_1c: filter2_1_1_valc
, filter2_2_0: filter2_2_0_val, filter2_2_1a: filter2_2_1_vala, filter2_2_1b: filter2_2_1_valb, filter2_2_1c: filter2_2_1_valc,
filter2_2_2a: filter2_2_2_vala, filter2_2_2b: filter2_2_2_valb, filter2_2_2c: filter2_2_2_valc, filter2_2_2d: filter2_2_2_vald, filter2_2_2e: filter2_2_2_vale,
filter2_2_3: filter2_2_3_val
, filter2_3_0: filter2_3_0_val, filter2_3_1a: filter2_3_1_vala, filter2_3_1b: filter2_3_1_valb,
filter2_3_1c: filter2_3_1_valc,
filter2_3_2a: filter2_3_2_vala, filter2_3_2b: filter2_3_2_valb,
filter2_3_2c: filter2_3_2_valc, filter2_3_2d: filter2_3_2_vald,
filter2_3_2e: filter2_3_2_vale, filter2_3_3: filter2_3_3_val
, filter2_4_0: filter2_4_0_val, filter2_4_1a: filter2_4_1_vala, filter2_4_1b: filter2_4_1_valb,
filter2_4_1c: filter2_4_1_valc,
filter2_4_2a: filter2_4_2_vala, filter2_4_2b: filter2_4_2_valb,
filter2_4_2c: filter2_4_2_valc, filter2_4_2d: filter2_4_2_vald,
filter2_4_2e: filter2_4_2_vale, filter2_4_3: filter2_4_3_val
, filter2_5_0: filter2_5_0_val, filter2_5_1a: filter2_5_1_vala, filter2_5_1b: filter2_5_1_valb,
filter2_5_1c: filter2_5_1_valc,
filter2_5_2a: filter2_5_2_vala, filter2_5_2b: filter2_5_2_valb,
filter2_5_2c: filter2_5_2_valc, filter2_5_2d: filter2_5_2_vald,
filter2_5_2e: filter2_5_2_vale, filter2_5_3: filter2_5_3_val
, filter3_1_0a: filter3_1_0_vala, filter3_1_0b: filter3_1_0_valb, filter3_1_1a: filter3_1_1_vala, filter3_1_1b: filter3_1_1_valb,
filter3_1_1c: filter3_1_1_valc, filter3_1_1d: filter3_1_1_vald
, filter3_2_0: filter3_2_0_val, filter3_2_1a: filter3_2_1_vala,
filter3_2_1b: filter3_2_1_valb,
filter3_2_1c: filter3_2_1_valc, filter3_2_2a: filter3_2_2_vala, filter3_2_2b: filter3_2_2_valb,
filter3_2_2c: filter3_2_2_valc, filter3_2_2d: filter3_2_2_vald, filter3_2_3: filter3_2_3_val
, filter3_3_0: filter3_3_0_val, filter3_3_1a: filter3_3_1_vala,
filter3_3_1b: filter3_3_1_valb,
filter3_3_1c: filter3_3_1_valc, filter3_3_2a: filter3_3_2_vala,
filter3_3_2b: filter3_3_2_valb,
filter3_3_2c: filter3_3_2_valc, filter3_3_2d: filter3_3_2_vald,
filter3_3_3: filter3_3_3_val
, filtera1: filtera1val, W: W_val, b: b_val}
feed_dict_mv = {}
for key, value in feed_dict.items():
print(key)
m_key = executor.Variable_node_to_mv[key][0]
m_val = ndarray.array(np.zeros(shape=value.shape), executor_ctx)
v_key = executor.Variable_node_to_mv[key][1]
v_val | |
<reponame>omelnikov/nbgrader
import os
import glob
import re
import shutil
import sqlalchemy
import traceback
import importlib
from rapidfuzz import fuzz
from traitlets.config import LoggingConfigurable, Config
from traitlets import Bool, List, Dict, Integer, Instance, Type, Any
from traitlets import default, validate
from textwrap import dedent
from nbconvert.exporters import Exporter, NotebookExporter
from nbconvert.writers import FilesWriter
from ..coursedir import CourseDirectory
from ..utils import find_all_files, rmtree, remove
from ..preprocessors.execute import UnresponsiveKernelError
from ..nbgraderformat import SchemaTooOldError, SchemaTooNewError
import typing
from nbconvert.exporters.exporter import ResourcesDict
class NbGraderException(Exception):
pass
class BaseConverter(LoggingConfigurable):
notebooks = List([])
assignments = Dict({})
writer = Instance(FilesWriter)
exporter = Instance(Exporter)
exporter_class = Type(NotebookExporter, klass=Exporter).tag(config=True)
preprocessors = List([])
force = Bool(False, help="Whether to overwrite existing assignments/submissions").tag(config=True)
pre_convert_hook = Any(
None,
config=True,
allow_none=True,
help=dedent("""
An optional hook function that you can implement to do some
bootstrapping work before converting.
This function is called before the notebooks are converted
and should be used for specific converters such as Autograde,
GenerateAssignment or GenerateFeedback.
It will be called as (all arguments are passed as keywords)::
hook(assignment=assignment, student=student, notebooks=notebooks)
""")
)
post_convert_hook = Any(
None,
config=True,
allow_none=True,
help=dedent("""
An optional hook function that you can implement to do some
work after converting.
This function is called after the notebooks are converted
and should be used for specific converters such as Autograde,
GenerateAssignment or GenerateFeedback.
It will be called as (all arguments are passed as keywords)::
hook(assignment=assignment, student=student, notebooks=notebooks)
""")
)
permissions = Integer(
help=dedent(
"""
Permissions to set on files output by nbgrader. The default is
generally read-only (444), with the exception of nbgrader
generate_assignment and nbgrader generate_feedback, in which case
the user also has write permission.
"""
)
).tag(config=True)
@default("permissions")
def _permissions_default(self) -> int:
return 664 if self.coursedir.groupshared else 444
@validate('pre_convert_hook')
def _validate_pre_convert_hook(self, proposal):
value = proposal['value']
if isinstance(value, str):
module, function = value.rsplit('.', 1)
value = getattr(importlib.import_module(module), function)
if not callable(value):
raise TraitError("pre_convert_hook must be callable")
return value
@validate('post_convert_hook')
def _validate_post_convert_hook(self, proposal):
value = proposal['value']
if isinstance(value, str):
module, function = value.rsplit('.', 1)
value = getattr(importlib.import_module(module), function)
if not callable(value):
raise TraitError("post_convert_hook must be callable")
return value
coursedir = Instance(CourseDirectory, allow_none=True)
def __init__(self, coursedir: CourseDirectory = None, **kwargs: typing.Any) -> None:
self.coursedir = coursedir
super(BaseConverter, self).__init__(**kwargs)
if self.parent and hasattr(self.parent, "logfile"):
self.logfile = self.parent.logfile
else:
self.logfile = None
c = Config()
c.Exporter.default_preprocessors = []
self.update_config(c)
def start(self) -> None:
self.init_notebooks()
self.writer = FilesWriter(parent=self, config=self.config)
self.exporter = self.exporter_class(parent=self, config=self.config)
for pp in self.preprocessors:
self.exporter.register_preprocessor(pp)
currdir = os.getcwd()
os.chdir(self.coursedir.root)
try:
self.convert_notebooks()
finally:
os.chdir(currdir)
@default("classes")
def _classes_default(self):
classes = super(BaseConverter, self)._classes_default()
classes.append(FilesWriter)
classes.append(Exporter)
for pp in self.preprocessors:
if len(pp.class_traits(config=True)) > 0:
classes.append(pp)
return classes
@property
def _input_directory(self):
raise NotImplementedError
@property
def _output_directory(self):
raise NotImplementedError
def _format_source(self, assignment_id: str, student_id: str, escape: bool = False) -> str:
return self.coursedir.format_path(self._input_directory, student_id, assignment_id, escape=escape)
def _format_dest(self, assignment_id: str, student_id: str, escape: bool = False) -> str:
return self.coursedir.format_path(self._output_directory, student_id, assignment_id, escape=escape)
def init_notebooks(self) -> None:
self.assignments = {}
self.notebooks = []
assignment_glob = self._format_source(self.coursedir.assignment_id, self.coursedir.student_id)
for assignment in glob.glob(assignment_glob):
notebook_glob = os.path.join(assignment, self.coursedir.notebook_id + ".ipynb")
found = glob.glob(notebook_glob)
if len(found) == 0:
self.log.warning("No notebooks were matched by '%s'", notebook_glob)
continue
self.assignments[assignment] = found
if len(self.assignments) == 0:
msg = "No notebooks were matched by '%s'" % assignment_glob
self.log.error(msg)
assignment_glob2 = self._format_source("*", self.coursedir.student_id)
found = glob.glob(assignment_glob2)
if found:
scores = sorted([(fuzz.ratio(assignment_glob, x), x) for x in found])
self.log.error("Did you mean: %s", scores[-1][1])
raise NbGraderException(msg)
def init_single_notebook_resources(self, notebook_filename: str) -> typing.Dict[str, typing.Any]:
regexp = re.escape(os.path.sep).join([
self._format_source("(?P<assignment_id>.*)", "(?P<student_id>.*)", escape=True),
"(?P<notebook_id>.*).ipynb"
])
m = re.match(regexp, notebook_filename)
if m is None:
msg = "Could not match '%s' with regexp '%s'" % (notebook_filename, regexp)
self.log.error(msg)
raise NbGraderException(msg)
gd = m.groupdict()
self.log.debug("Student: %s", gd['student_id'])
self.log.debug("Assignment: %s", gd['assignment_id'])
self.log.debug("Notebook: %s", gd['notebook_id'])
resources = {}
resources['unique_key'] = gd['notebook_id']
resources['output_files_dir'] = '%s_files' % gd['notebook_id']
resources['nbgrader'] = {}
resources['nbgrader']['student'] = gd['student_id']
resources['nbgrader']['assignment'] = gd['assignment_id']
resources['nbgrader']['notebook'] = gd['notebook_id']
resources['nbgrader']['db_url'] = self.coursedir.db_url
return resources
def write_single_notebook(self, output: str, resources: ResourcesDict) -> None:
# configure the writer build directory
self.writer.build_directory = self._format_dest(
resources['nbgrader']['assignment'], resources['nbgrader']['student'])
# write out the results
self.writer.write(output, resources, notebook_name=resources['unique_key'])
def init_destination(self, assignment_id: str, student_id: str) -> bool:
"""Initialize the destination for an assignment. Returns whether the
assignment should actually be processed or not (i.e. whether the
initialization was successful).
"""
if self.coursedir.student_id_exclude:
exclude_ids = self.coursedir.student_id_exclude.split(',')
if student_id in exclude_ids:
return False
dest = os.path.normpath(self._format_dest(assignment_id, student_id))
# the destination doesn't exist, so we haven't processed it
if self.coursedir.notebook_id == "*":
if not os.path.exists(dest):
return True
else:
# if any of the notebooks don't exist, then we want to process them
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if not os.path.exists(path):
return True
# if we have specified --force, then always remove existing stuff
if self.force:
if self.coursedir.notebook_id == "*":
self.log.warning("Removing existing assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Removing existing notebook: {}".format(path))
remove(path)
return True
src = self._format_source(assignment_id, student_id)
new_timestamp = self.coursedir.get_existing_timestamp(src)
old_timestamp = self.coursedir.get_existing_timestamp(dest)
# if --force hasn't been specified, but the source assignment is newer,
# then we want to overwrite it
if new_timestamp is not None and old_timestamp is not None and new_timestamp > old_timestamp:
if self.coursedir.notebook_id == "*":
self.log.warning("Updating existing assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Updating existing notebook: {}".format(path))
remove(path)
return True
# otherwise, we should skip the assignment
self.log.info("Skipping existing assignment: {}".format(dest))
return False
def init_assignment(self, assignment_id: str, student_id: str) -> None:
"""Initializes resources/dependencies/etc. that are common to all
notebooks in an assignment.
"""
source = self._format_source(assignment_id, student_id)
dest = self._format_dest(assignment_id, student_id)
# detect other files in the source directory
for filename in find_all_files(source, self.coursedir.ignore + ["*.ipynb"]):
# Make sure folder exists.
path = os.path.join(dest, os.path.relpath(filename, source))
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if os.path.exists(path):
remove(path)
self.log.info("Copying %s -> %s", filename, path)
shutil.copy(filename, path)
def set_permissions(self, assignment_id: str, student_id: str) -> None:
self.log.info("Setting destination file permissions to %s", self.permissions)
dest = os.path.normpath(self._format_dest(assignment_id, student_id))
permissions = int(str(self.permissions), 8)
for dirname, _, filenames in os.walk(dest):
for filename in filenames:
os.chmod(os.path.join(dirname, filename), permissions)
# If groupshared, set dir permissions - see comment below.
st_mode = os.stat(dirname).st_mode
if self.coursedir.groupshared and st_mode & 0o2770 != 0o2770:
try:
os.chmod(dirname, (st_mode|0o2770) & 0o2777)
except PermissionError:
self.log.warning("Could not update permissions of %s to make it groupshared", dirname)
# If groupshared, set write permissions on directories. Directories
# are created within ipython_genutils.path.ensure_dir_exists via
# nbconvert.writer, (unless there are supplementary files) with a
# default mode of 755 and there is no way to pass the mode arguments
# all the way to there! So we have to walk and fix.
if self.coursedir.groupshared:
# Root may be created in this step, and is not included above.
rootdir = self.coursedir.format_path(self._output_directory, '.', '.')
# Add 2770 to existing dir permissions (don't unconditionally override)
st_mode = os.stat(rootdir).st_mode
if st_mode & 0o2770 != 0o2770:
try:
os.chmod(rootdir, (st_mode|0o2770) & 0o2777)
except PermissionError:
self.log.warning("Could not update permissions of %s to make it groupshared", rootdir)
def convert_single_notebook(self, notebook_filename: str) -> None:
"""
Convert a single notebook.
Performs the following steps:
1. Initialize notebook resources
2. Export the notebook to a particular format
3. Write the exported notebook to file
"""
self.log.info("Converting notebook %s", notebook_filename)
resources = self.init_single_notebook_resources(notebook_filename)
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
self.write_single_notebook(output, resources)
def convert_notebooks(self) -> None:
errors = []
def _handle_failure(gd: typing.Dict[str, str]) -> None:
dest = os.path.normpath(self._format_dest(gd['assignment_id'], gd['student_id']))
if self.coursedir.notebook_id == "*":
if os.path.exists(dest):
self.log.warning("Removing failed assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Removing failed notebook: {}".format(path))
remove(path)
for assignment in sorted(self.assignments.keys()):
# initialize the list of notebooks and the exporter
self.notebooks = sorted(self.assignments[assignment])
# parse out the assignment and student ids
regexp = self._format_source("(?P<assignment_id>.*)", "(?P<student_id>.*)", escape=True)
m = re.match(regexp, assignment)
if m is None:
msg = "Could not match '%s' with regexp '%s'" % (assignment, regexp)
self.log.error(msg)
raise NbGraderException(msg)
gd = m.groupdict()
try:
# determine whether we actually even want to process this submission
| |
<filename>aha/workflows/episodic_few_shot_workflow.py
# Copyright (C) 2019 Project AGI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""EpisodicFewShotWorkflow class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import random
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from pagi.components.summarize_levels import SummarizeLevels
from pagi.utils import logger_utils, tf_utils, image_utils, data_utils
from pagi.utils.np_utils import print_simple_stats
from pagi.datasets.omniglot_dataset import OmniglotDataset
from aha.components.episodic_component import EpisodicComponent
from aha.datasets.omniglot_lake_dataset import OmniglotLakeDataset
from aha.datasets.omniglot_lake_runs_dataset import OmniglotLakeRunsDataset
from aha.datasets.omniglot_lakelike_runs_dataset import OmniglotLakelikeRunsDataset
from aha.datasets.omniglot_unseen_oneshot_dataset import OmniglotUnseenOneShotDataset
from aha.workflows.episodic_workflow import EpisodicWorkflow
from aha.workflows.pattern_completion_workflow import PatternCompletionWorkflow, UseTrainForTest
from aha.utils.generic_utils import compute_overlap, overlap_sample_batch
from aha.utils.few_shot_utils import compute_matching, create_and_add_comparison_image, add_completion_summary, \
add_completion_summary_paper, mod_hausdorff_distance
match_mse_key = 'match_mse'
match_acc_mse_key = 'acc_mse'
sum_ambiguous_mse_key = 'amb_mse'
match_mse_tf_key = 'match_mse_tf'
match_acc_mse_tf_key = 'acc_mse_tf'
sum_ambiguous_mse_tf_key = 'amb_mse_tf'
match_olap_key = 'matching_matrices_olp'
match_olap_tf_key = 'matching_matrices_olp_tf'
match_acc_olap_key = 'acc_olp'
match_acc_olap_tf_key = 'acc_olp_tf'
sum_ambiguous_olap_key = 'amb_olp'
sum_ambiguous_olap_tf_key = 'amb_olp_tf'
class EpisodicFewShotWorkflow(EpisodicWorkflow, PatternCompletionWorkflow):
"""
Few Shot Learning with the Episodic Component.
modes:
oneshot = classic lake test
instance = like lake test, but identifying the same instance out of distractor exemplars from same class
test_vc = visualise histograms of overlap
create_data_subset = filter batch to create an 'idealised' batch where intra overlap is > inter overlap
"""
@staticmethod
def default_opts():
"""Builds an HParam object with default workflow options."""
return tf.contrib.training.HParams(
num_repeats=1,
num_replays=1,
consolidation_steps=0,
random_recall=None,
replay_buffer_threshold=0.0,
superclass=False,
class_proportion=1.0,
invert_images=False,
resize_images_factor=1.0,
min_val=0, # set any 0 in the input image, to this new min_val. ---> if >0, then don't do anything
train_classes=['5', '6', '7', '8', '9'],
test_classes=['5', '6', '7', '8', '9'],
batch_all_classes=False,
batch_no_duplicates=False,
degrade_type='vertical', # vertical, horizontal or random: the model completes image degraded by this method
degrade_step='hidden', # 'test', 'input', 'hidden' or 'none'
degrade_factor=0.5, # fraction to be degraded, if supported by the degrade_type option
degrade_value=0, # when degrading pixels, set them to this value
noise_type='vertical', # sp_float' or 'sp_binary'
noise_step='test', # 'test' or 'none'
noise_factor=0.5, # fraction of image to be noise
noise_value=0, # when degrading pixels, set them to this value
completion_gain=1.0,
train_recurse=False,
test_recurse=False,
recurse_iterations=0,
rsummary_batches=2,
input_mode={
"train_first": "complete",
"train_inference": "complete",
"test_first": "complete",
"test_inference": "complete"
},
evaluate=True,
train=True,
visualise_vc=False, # show final vc decoded through vc (relevant for hierarchical VC)
visualise_if_at_vc=False, # show the IF encodings decoded through VC
evaluate_mode=['oneshot', 'test_vc', 'test_dg'],
evaluate_supermode='none', # 'none' or 'same_train_and_test' used for debugging, isolating units
summarize_completion='none' # the big matplotlib fig: 'none', 'to_file' or 'to_screen'
"""
Notes on evaluate_mode:
A list, containing non exclusive modes that can be:
'oneshot', 'instance', 'test_vc', 'test_dg', 'test_if' or 'create_data_subset'
"""
)
def __init__(self, session, dataset_type, dataset_location, component_type, hparams_override, eval_opts, export_opts,
opts=None, summarize=True, seed=None, summary_dir=None, checkpoint_opts=None):
super().__init__(session, dataset_type, dataset_location, component_type, hparams_override, eval_opts, export_opts,
opts, summarize, seed, summary_dir, checkpoint_opts)
self._training_features = {}
self._testing_features = {}
self._total_losses = {} # dic for accumulating total (and then average) loss over all batches
self._test_inputs = None
self._summary_images = {}
self._test_recurse_opts = self._opts['test_recurse']
# dictionaries for all the overlap metrics (calculated by compute_overlap()
self._vc_overlap = {}
self._vc_core_overlap = {}
self._dg_overlap = {}
self._dg_overlap_cumulative = []
# set these options
self._is_overlap_per_label = False # visualisation of overlap metrics per label or not
self._add_comparison_images = False # add the comparison images to the completion_summary if it is used
self._paper = False # figures rendered for the paper
self._replay_inputs = []
self._replay_labels = []
self._replay_step = 0
self._consolidation_step = 0
self._replay_inh = None
self._big_loop = False
self._all_replay_inputs = []
self._all_replay_patterns = []
self._all_replay_labels = []
self._load_next_checkpoint = 0
self._run_ended = True
self._unseen_label = None
self._unseen_inputs = []
self._unseen_labels = []
def _test_consistency(self):
super()._test_consistency()
if self.test_if_mode():
if not self._hparams.use_interest_filter:
raise RuntimeError("Using `test_if` mode, but not using InterestFilter!")
def test_vc_mode(self):
return 'test_vc' in self._opts['evaluate_mode']
def test_if_mode(self):
return 'test_if' in self._opts['evaluate_mode']
def test_dg_mode(self):
return 'test_dg' in self._opts['evaluate_mode']
def instance_mode(self):
return 'instance' in self._opts['evaluate_mode']
def oneshot_mode(self):
return 'oneshot' in self._opts['evaluate_mode']
def create_data_subset_mode(self):
return 'create_data_subset' in self._opts['evaluate_mode']
def _is_eval_batch(self, batch):
"""
If using PC with recall path, then we need to do many iterations of recall learning, before an 'evaluation'
So, check if PC is ready for evaluation, otherwise, consider it an 'evaluation' batch.
"""
if self._component.get_pc() is None:
return True
if not self._component.get_pc().use_nn_in_pr_path():
return True
# PC with recall path --> must be on num_repeats
if (batch + 1) % self._opts['num_repeats'] == 0:
return True
return False
def _is_decoding_vc_at_vc(self):
return self._is_using_visualise_vc()
def _is_decoding_pc_at_dg(self):
if self._hparams.pc_type != 'none':
if self._hparams.dg_type == 'fc':
return True
return False
def _is_decoding_pc_at_vc(self):
if self._hparams.pc_type != 'none':
if self._hparams.dg_type != 'stub':
if not self._hparams.use_interest_filter:
return True
return False
def _is_decoding_if_at_vc(self):
return self._is_using_if_at_vc()
def _generate_datasets(self):
"""Override in child classes with your options"""
same_train_and_test = UseTrainForTest.NO
# some modes need same train and test
if self.instance_mode():
same_train_and_test = UseTrainForTest.IDENTICAL # UseTrainForTest.SHUFFLED
# option to override regardless
if self._opts['evaluate_supermode'] == 'same_train_and_test':
same_train_and_test = UseTrainForTest.IDENTICAL # UseTrainForTest.SHUFFLED
degrade_test = (self._opts['degrade_step'] == 'test')
noise_test = (self._opts['noise_step'] == 'test')
# Temporary fix for mismatched train/test inputs & labels
# TODO(@abdel): Investigate this; as PAGI Decoder shouldn't be triggering the iterator
additional_decode = 0
if self._is_decoding_pc_at_dg():
additional_decode += 1
if self._is_decoding_pc_at_vc():
additional_decode += 1
test_recurse_iterations = self._opts['recurse_iterations']
if self._replay_mode():
test_recurse_iterations += self._opts['num_replays'] * self._opts['recurse_iterations']
train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],
self._opts['test_classes'],
is_superclass=self._opts['superclass'],
class_proportion=self._opts['class_proportion'],
degrade_test=degrade_test,
degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True
degrade_val=self._opts['degrade_value'], # only relevant if degrade_test = True
degrade_factor=self._opts['degrade_factor'],
noise_test=noise_test,
noise_type=self._opts['noise_type'],
noise_factor=self._opts['noise_factor'],
recurse_train=self._is_train_recursive(),
recurse_test=self._is_inference_recursive(),
num_batch_repeats=self._opts['num_repeats'],
recurse_iterations=test_recurse_iterations,
additional_test_decodes=additional_decode,
evaluate_step=self._opts['evaluate'],
use_trainset_for_tests=same_train_and_test,
invert_images=self._opts['invert_images'],
min_val=self._opts['min_val'])
return train_dataset, test_dataset
def _create_dataset(self):
if self._is_omniglot_lake():
if self._dataset_type.__name__ == OmniglotLakeDataset.__name__:
self._dataset = self._dataset_type(self._dataset_location,
self._hparams.batch_size,
self._opts['test_classes'],
self.instance_mode())
elif self._dataset_type.__name__ == OmniglotLakeRunsDataset.__name__:
self._dataset = self._dataset_type(self._dataset_location,
self._hparams.batch_size)
elif self._dataset_type.__name__ == OmniglotLakelikeRunsDataset.__name__:
self._dataset = self._dataset_type(self._dataset_location,
self._hparams.batch_size,
self._opts['evaluate_mode'])
elif self._dataset_type.__name__ == OmniglotUnseenOneShotDataset.__name__:
self._dataset = self._dataset_type(self._dataset_location,
self._hparams.batch_size)
else:
self._dataset = self._dataset_type(self._dataset_location)
else:
self._dataset = self._dataset_type(self._dataset_location)
def _setup_recursive_train_modes(self, batch_type):
""" Set the appropriate mode depending on batch type.
This is only called when recursive training """
mode = 'training'
# If each component is in encoding mode
if all(v == 'encoding' for v in batch_type.values()):
mode = 'inference'
return mode
def _setup_checkpoint_saver(self):
"""Handles the saving and restoration of graph state and variables."""
max_to_keep = self._export_opts['max_to_keep']
def load_checkpoint(checkpoint_path):
# Loads a subset of the checkpoint, specified by variable scopes
if self._checkpoint_opts['checkpoint_load_scope']:
load_scope = []
init_scope = []
scope_list = self._checkpoint_opts['checkpoint_load_scope'].split(',')
for i, scope in enumerate(scope_list):
scope_list[i] = scope.lstrip().rstrip()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for variable in global_variables:
# Add any variables that match the specified scope to a separate list
# Note: global_step is excluded and re-initialised, even if within scope
if variable.name.startswith(tuple(scope_list)) and 'global_step' not in variable.name:
load_scope.append(variable)
else:
init_scope.append(variable)
# Load variables from specified scope
saver = tf.train.Saver(load_scope)
self._restore_checkpoint(saver, checkpoint_path)
# Re-initialise any variables outside specified scope, including global_step
init_scope_op = tf.variables_initializer(init_scope, name='init')
self._session.run(init_scope_op)
# Switch to encoding mode if freezing loaded scope
if self._checkpoint_opts['checkpoint_frozen_scope']:
self._freeze_training = True
# Create new tf.Saver to save new checkpoints
self._saver = tf.train.Saver(max_to_keep=max_to_keep)
self._last_step = 0
# Otherwise, attempt to load the entire checkpoint
else:
self._saver = tf.train.Saver(max_to_keep=max_to_keep)
self._last_step = self._restore_checkpoint(self._saver, checkpoint_path)
if not self._replay_mode():
if 'model.ckpt' in self._checkpoint_opts['checkpoint_path']:
return super()._setup_checkpoint_saver()
# First call to this function by setup()
if self._load_next_checkpoint == 0:
self._saver = tf.train.Saver(max_to_keep=max_to_keep)
self._last_step = 0
return
folder_name = 'run' + str(self._load_next_checkpoint).zfill(2)
checkpoint_dir = os.path.join(self._checkpoint_opts['checkpoint_path'], folder_name)
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
print('Checkpoint Path =', checkpoint_path)
load_checkpoint(checkpoint_path)
return
# First call to this function by setup()
if self._load_next_checkpoint == 0:
self._saver = tf.train.Saver(max_to_keep=max_to_keep)
self._last_step = 0
return
filename = 'model.ckpt-' + str(self._load_next_checkpoint)
checkpoint_path = os.path.join(self._checkpoint_opts['checkpoint_path'], filename)
print('Checkpoint Path =', checkpoint_path)
load_checkpoint(checkpoint_path)
def run(self, num_batches, evaluate, train=True):
"""Run Experiment"""
# Training
# -------------------------------------------------------------------------
training_handle = self._session.run(self._dataset_iterators['training'].string_handle())
self._session.run(self._dataset_iterators['training'].initializer)
if evaluate:
test_handle = self._session.run(self._dataset_iterators['test'].string_handle())
self._session.run(self._dataset_iterators['test'].initializer)
self._on_before_training_batches()
# set some hyperparams to instance variables for access in train and complete methods
# (to be compatible with base class method signatures)
self._rsummary_from_batch = num_batches - self._opts['rsummary_batches'] # | |
<reponame>ACea15/pyNastran
from __future__ import annotations
from typing import Union, Callable
import numpy as np
import h5py
from .h5_utils import get_tree, passer
from pyNastran.bdf.bdf import BDF
GeomCallable = Callable[[h5py._hl.dataset.Dataset, BDF], # inputs
None] # output
def read_grid(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
"""
Dataset:
attrs : <Attributes of HDF5 object at 2221931292520>
chunks : (450,)
compression : 'gzip'
compression_opts : 1
dims : <Dimensions of HDF5 object at 2221931292520>
dtype : dtype([('ID', '<i8'), ('CP', '<i8'), ('X', '<f8', (3,)), ('CD', '<i8'), ('PS', '<i8'), ('SEID', '<i8'), ('DOMAIN_ID', '<i8')])
external : None
file : <HDF5 file "6+element-nastran-sol103.h5" (mode r)>
fillvalue : (0, 0, [0., 0., 0.], 0, 0, 0, 0)
fletcher32 : False
id : <h5py.h5d.DatasetID object at 0x00000205556CE768>
is_virtual : False
maxshape : (None,)
name : '/NASTRAN/INPUT/NODE/GRID'
nbytes : 3528
ndim : 1
parent : <HDF5 group "/NASTRAN/INPUT/NODE" (1 members)>
ref : <HDF5 object reference>
regionref : <h5py._hl.base._RegionProxy object at 0x00000205569A4D48>
scaleoffset : None
shape : (49,)
shuffle : True
size : 49
"""
geom_model.card_count[name] = len(group['ID'])
setattr(geom_model, name, group)
return
ID = group['ID']
CP = group['CP']
X = group['X']
CD = group['CD']
PS = group['PS']
SEID = group['SEID']
DOMAIN_ID = group['DOMAIN_ID']
u_seid = np.unique(SEID)
u_domain = np.unique(DOMAIN_ID)
assert len(u_seid) == 1, u_seid
assert len(u_domain) == 1, u_domain
assert u_seid[0] == 0, u_seid
assert u_domain[0] == 1, u_domain
for nid, cp, xyz, cd, ps, seid in zip(ID, CP, X, CD, PS, SEID):
if ps == 0:
ps = ''
geom_model.add_grid(nid, xyz, cp=cp, cd=cd, ps=ps, seid=seid, comment='')
def read_spoint(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
ID = group['ID']
DOMAIN_ID = group['DOMAIN_ID']
u_domain = np.unique(DOMAIN_ID)
assert len(u_domain) == 1, u_domain
assert u_domain[0] == 1, u_domain
geom_model.add_spoint(ID)
def read_epoint(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
ID = group['ID']
DOMAIN_ID = group['DOMAIN_ID']
u_domain = np.unique(DOMAIN_ID)
assert len(u_domain) == 1, u_domain
assert u_domain[0] == 1, u_domain
geom_model.add_spoint(ID)
def read_desvar(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('ID', 'LABEL', 'XINIT', 'XLB', 'XUB', 'DELX', 'DVID', 'DOMAIN_ID')
ID = group['ID']
LABEL = group['LABEL']
XINIT = group['XINIT']
XLB = group['XLB']
XUB = group['XUB']
DELX = group['DELX']
DVID = group['DVID']
DOMAIN_ID = group['DOMAIN_ID']
for desvar_id, label, xinit, xlb, xub, delx, ddval in zip(ID, LABEL, XINIT, XLB, XUB, DELX, DVID):
label_str = label.strip().decode('latin1')
obj = geom_model.add_desvar(desvar_id, label_str, xinit, xlb=xlb, xub=xub,
delx=delx, ddval=ddval, comment='')
obj.validate()
str(obj)
def read_dconstr(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('DCID', 'RID', 'LALLOW', 'UALLOW', 'LOWFQ', 'HIGHFQ', 'DTYPE', 'DOMAIN_ID')
DCID = group['DCID']
RID = group['RID']
LALLOW = group['LALLOW']
UALLOW = group['UALLOW']
LOWFQ = group['LOWFQ']
HIGHFQ = group['HIGHFQ']
DTYPE = group['DTYPE']
DOMAIN_ID = group['DOMAIN_ID']
for oid, dresp_id, lallow, uallow, lowfq, highfq, dtype in zip(
DCID, RID, LALLOW, UALLOW, LOWFQ, HIGHFQ, DTYPE):
obj = geom_model.add_dconstr(oid, dresp_id, lid=lallow, uid=uallow,
lowfq=lowfq, highfq=highfq, comment='')
obj.validate()
str(obj)
def read_cord2c(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
_read_cord2(name, group, geom_model.add_cord2c)
def read_cord2r(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
_read_cord2(name, group, geom_model.add_cord2r)
def read_cord2s(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
_read_cord2(name, group, geom_model.add_cord2s)
def _read_cord2(name: str, group: h5py._hl.dataset.Dataset,
add_func: Callable) -> None:
assert len(group.dtype.names) == 12, group.dtype.names
CID = group['CID']
RID = group['RID']
A1 = group['A1']
A2 = group['A2']
A3 = group['A3']
B1 = group['B1']
B2 = group['B2']
B3 = group['B3']
C1 = group['C1']
C2 = group['C2']
C3 = group['C3']
DOMAIN_ID = group['DOMAIN_ID']
for cid, rid, a1, a2, a3, b1, b2, b3, c1, c2, c3 in zip(CID, RID, A1, A2, A3, B1, B2, B3, C1, C2, C3):
origin = np.array([a1, a2, a3])
zaxis = np.array([b1, b2, b3])
xzplane = np.array([c1, c2, c3])
obj = add_func(cid, origin, zaxis, xzplane, rid=rid, setup=True, comment='')
obj.validate()
str(obj)
def read_mat1(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
"""
Dataset:
attrs : <Attributes of HDF5 object at 2553977821512>
chunks : (310,)
compression : 'gzip'
compression_opts : 1
dims : <Dimensions of HDF5 object at 2553977821512>
dtype : dtype([('MID', '<i8'), ('E', '<f8'), ('G', '<f8'), ('NU', '<f8'), ('RHO', '<f8'), ('A', '<f8'), ('TREF', '<f8'), ('GE', '<f8'), ('ST', '<f8'), ('SC', '<f8'), ('SS', '<f8'), ('MCSID', '<i8'), ('DOMAIN_ID', '<i8')])
external : None
file : <HDF5 file "6+element-nastran-sol103.h5" (mode r)>
fillvalue : (0, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0, 0)
fletcher32 : False
id : <h5py.h5d.DatasetID object at 0x00000252A4F0D948>
is_virtual : False
maxshape : (None,)
name : '/NASTRAN/INPUT/MATERIAL/MAT1'
nbytes : 104
ndim : 1
parent : <HDF5 group "/NASTRAN/INPUT/MATERIAL" (1 members)>
ref : <HDF5 object reference>
regionref : <h5py._hl.base._RegionProxy object at 0x00000252A60614C8>
scaleoffset : None
shape : (1,)
shuffle : True
size : 1
"""
#'MID', 'E', 'G', 'NU', 'RHO', 'A', 'TREF', 'GE', 'ST', 'SC', 'SS', 'MCSID', 'DOMAIN_ID',
assert len(group.dtype.names) == 13, group.dtype.names
MID = group['MID']
E = group['E']
G = group['G']
NU = group['NU']
RHO = group['RHO']
A = group['A']
TREF = group['TREF']
GE = group['GE']
ST = group['ST']
SC = group['SC']
SS = group['SS']
MCSID = group['MCSID']
DOMAIN_ID = group['DOMAIN_ID']
for mid, e, g, nu, rho, a, tref, ge, st, sc, ss, mcsid in zip(MID, E, G, NU, RHO, A, TREF, GE, ST, SC, SS, MCSID):
#if mcid == -1:
#theta_mcid = theta
#else:
#asdf
#assert tflag == 0, tflag
#t1, t2, t3, t4 = [ti if ti != -1.0 else None
#for ti in t]
assert mcsid == 0, mcsid
obj = geom_model.add_mat1(mid, e, g, nu, rho=rho, a=a, tref=tref, ge=ge,
St=st, Sc=sc, Ss=ss, mcsid=mcsid, comment='')
obj.validate()
str(obj)
def read_mat2(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
assert len(group.dtype.names) == 18, group.dtype.names
MID = group['MID']
G11 = group['G11']
G12 = group['G12']
G13 = group['G13']
G22 = group['G22']
G23 = group['G23']
G33 = group['G33']
RHO = group['RHO']
A1 = group['A1']
A2 = group['A2']
A3 = group['A12']
TREF = group['TREF']
GE = group['GE']
ST = group['ST']
SC = group['SC']
SS = group['SS']
MCSID = group['MCSID']
DOMAIN_ID = group['DOMAIN_ID']
for mid, g11, g12, g13, g22, g23, g33, rho, a1, a2, a3, tref, ge, st, sc, ss, mcsid in zip(
MID, G11, G12, G13, G22, G23, G33, RHO, A1, A2, A3, TREF, GE, ST, SC, SS, MCSID):
if mid > 100_000_000:
continue
assert mcsid == 0, mcsid
obj = geom_model.add_mat2(mid, g11, g12, g13, g22, g23, g33,
rho=rho, a1=a1, a2=a2, a3=a3,
tref=tref, ge=ge,
St=st, Sc=sc, Ss=ss, mcsid=None, comment='')
obj.validate()
str(obj)
def read_mat8(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#assert len(group.dtype.names) == 18, group.dtype.names
#('MID', 'E1', 'E2', 'NU12', 'G12', 'G1Z', 'G2Z', 'RHO', 'A1', 'A2', 'TREF', 'XT', 'XC', 'YT', 'YC',
# 'S', 'GE', 'F12', 'STRN', 'DOMAIN_ID')
MID = group['MID']
E1 = group['E1']
E2 = group['E2']
NU12 = group['NU12']
G12 = group['G12']
G1Z = group['G1Z']
G2Z = group['G2Z']
RHO = group['RHO']
A1 = group['A1']
A2 = group['A2']
TREF = group['TREF']
XT = group['XT']
XC = group['XC']
YT = group['YT']
YC = group['YC']
S = group['S']
GE = group['GE']
F12 = group['F12']
STRN = group['STRN']
DOMAIN_ID = group['DOMAIN_ID']
for mid, e11, e22, nu12, g12, g1z, g2z, rho, a1, a2, tref, xt, xc, yt, yc, s, ge, f12, strn in zip(
MID, E1, E2, NU12, G12, G1Z, G2Z, RHO, A1, A2, TREF, XT, XC, YT, YC, S, GE, F12, STRN):
obj = geom_model.add_mat8(mid, e11, e22, nu12, g12=g12, g1z=g1z, g2z=g2z,
rho=rho, a1=a1, a2=a2, tref=tref,
Xt=xt, Xc=xc, Yt=yt, Yc=yc, S=s, ge=ge, F12=f12, strn=strn, comment='')
obj.validate()
str(obj)
def read_dvprel1(name: str, group: h5py._hl.group.Group, geom_model: BDF) -> None:
# TODO: group, not dataset
# {'ATTI': None, 'IDENTITY': None}
('ID', 'TYPE', 'PID', 'FID', 'PMIN', 'PMAX', 'C0', 'PNAME', 'START', 'LEN', 'DOMAIN_ID')
IDENTITY = group.get('IDENTITY')
RELATION = group.get('RELATION') # ('DVID', 'COEF')
DVID = RELATION['DVID']
COEF = RELATION['COEF']
ID = IDENTITY['ID']
TYPE = IDENTITY['TYPE']
PID = IDENTITY['PID']
FID = IDENTITY['FID']
PMIN = IDENTITY['PMIN']
PMAX = IDENTITY['PMAX']
C0 = IDENTITY['C0']
PNAME = IDENTITY['PNAME']
START = IDENTITY['START']
LEN = IDENTITY['LEN']
DOMAIN_ID = IDENTITY['DOMAIN_ID']
for oid, typei, pid, fid, pmin, pmax, c0, pname, ipos, ilen in zip(
ID, TYPE, PID, FID, PMIN, PMAX, C0, PNAME, START, LEN):
pname_str = pname.strip().decode('latin1')
prop_type = typei.strip().decode('latin1')
dvids = DVID[ipos:ipos+ilen]
coeffs = COEF[ipos:ipos+ilen]
if pname_str:
pname_fid = pname_str
elif fid != 0:
pname_fid = fid
else:
out = (pname_str, fid)
raise RuntimeError(out)
obj = geom_model.add_dvprel1(oid, prop_type, pid, pname_fid, dvids, coeffs,
p_min=pmin, p_max=pmax, c0=c0, validate=True, | |
<reponame>abdurakhimov/zhinst-toolkit<gh_stars>0
# Copyright (C) 2020 Zurich Instruments
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import textwrap
import attr
import numpy as np
from pathlib import Path
import deprecation
from .sequence_commands import SequenceCommand
from .utils import SequenceType, TriggerMode, Alignment
from zhinst.toolkit.interface import DeviceTypes, LoggerModule
from zhinst.toolkit._version import version as __version__
_logger = LoggerModule(__name__)
def is_greater_equal(min_value):
"""Check if the attribute value is greater than or equal to a minimum value.
This validator can handle both lists and single element attributes. If it
is a list, it checks if the element with the smallest value is greater than
or equal to the specified minimum value.
"""
def compare(self, attribute, value):
if type(value) is not list:
value = [value]
if np.min(value) < min_value:
_logger.error(
f"{attribute.name} cannot be smaller than {min_value}!",
_logger.ExceptionTypes.ValueError,
)
return compare
def is_smaller_equal(max_value):
"""Check if the attribute value is smaller than or equal to a maximum value.
This validator can handle both lists and single element attributes. If it
is a list, it checks if the element with the greatest value is smaller than
or equal to the specified maximum value.
"""
def compare(self, attribute, value):
if type(value) is not list:
value = [value]
if np.max(value) > max_value:
_logger.error(
f"{attribute.name} cannot be greater than {max_value}!",
_logger.ExceptionTypes.ValueError,
)
return compare
def is_multiple(factor):
"""Check if the attribute value is multiple of a certain factor.
This validator is the most useful for checking if an attribute related
to waveform length comply with the waveform granularity specification of
an instrument.
The validator can handle both lists and single element attributes. If it
is a list, it checks if each element is multiple of the given factor.
"""
def compare(self, attribute, value):
if type(value) is not list:
value = [value]
for i in value:
if i % factor != 0:
_logger.error(
f"{attribute.name} must be multiple of {factor}!",
_logger.ExceptionTypes.ValueError,
)
return compare
@attr.s
class Sequence(object):
"""Base class for an AWG sequence to be programmed on a :class:`AWGCore` .
Attributes:
period (double): Period in seconds at which the experiment is repeated.
trigger_mode (str or :class:`TriggerMode` enum): The trigger mode of the
sequence, i.e if the AWG Core is used to send out the triger signal
(*'Send Triger'* or :class:`TriggerMode.SEND_TRIGGER`), to wait
for an external trigger signal (*'Receive Triger'* or
:class:`TriggerMode.RECEIVE_TRIGGER`) or to wait for an external
signal to send out the triger signal (*'Send and Receive Triger'* or
:class:`TriggerMode.SEND_AND_RECEIVE_TRIGGER`). (default:
:class:`TriggerMode.NONE`)
trigger_samples (int): The duration of the trigger signal sent out by
the AWG Core. It is given in number of samples. (default: 32)
repetitions (int): The number of repetitions of the experiment.
alignment (str): The alignment of the played waveform with the trigger
signal, i.e. if the waveform should start with the trigger (or the
time origin `t=0` of the sequence). Waveforms can either *'Start
with Trigger'* (:class:`Alignment.START_WITH_TRIGGER`) or *'End with
Trigger'* (:class:`Alignment.END_WITH_TRIGGER`).
dead_time (double): The `dead time` of a sequence is the time in seconds
after the time origin of the sequence before the next trigger
signal is sent / expected. This time defines the maximum length of a
waveform played after the time origin, otherwise triggers can be
missed. (default: 5 us)
trigger_delay (double): The `trigger delay` is an addittional delay in
seconds that shifts the time origin `t=0` with respect to the
trigger signal. (default: 0)
latency (double): The `latency` is a time in seconds that compensated
for different trigger latencies of different instruments. It works
as a constant `trigger_delay`.
latency_adjustment (int): In order to compensate for different trigger
latencies of different instrument types, it is necessary for some
instruments to wait for certain number of sequencer cycles after
receiving the trigger. This way, it is possible to align the
waveforms sent out from different instruments. The attribute
`latency_adjustment` is an additional latency given as number of
sequencer cycles that is used to increase the time an instrument
waits after receiving the trigger signal. (default: 0)
reset_phase (bool): A flag that specifies if the phase of the modulation
oscillator should be reset to 0 for every repetition of the
experiment before the waveform is played.
"""
target = attr.ib(
default=DeviceTypes.HDAWG,
validator=attr.validators.in_(
[
DeviceTypes.HDAWG,
DeviceTypes.UHFQA,
DeviceTypes.UHFLI,
DeviceTypes.SHFQA,
DeviceTypes.SHFSG,
]
),
)
clock_rate = attr.ib(default=2.4e9, validator=is_greater_equal(0))
period = attr.ib(default=100e-6, validator=is_greater_equal(0))
trigger_mode = attr.ib(
default=TriggerMode.SEND_TRIGGER,
converter=lambda m: TriggerMode.NONE if m == "None" else TriggerMode(m),
)
trigger_samples = attr.ib(
default=32,
validator=[is_greater_equal(32), is_multiple(16)],
)
repetitions = attr.ib(default=1)
alignment = attr.ib(
default=Alignment.END_WITH_TRIGGER, converter=lambda a: Alignment(a)
)
n_HW_loop = attr.ib(default=1, converter=int, validator=is_greater_equal(0))
dead_time = attr.ib(default=5e-6, validator=is_greater_equal(0))
trigger_delay = attr.ib(default=0)
latency = attr.ib(default=160e-9, validator=is_greater_equal(0))
latency_cycles = attr.ib(default=27, validator=is_greater_equal(0))
latency_adjustment = attr.ib(default=0, validator=is_greater_equal(0))
trigger_cmd_1 = attr.ib(default="//")
trigger_cmd_2 = attr.ib(default="//")
trigger_cmd_define = attr.ib(default="//\n")
trigger_cmd_send = attr.ib(default="//\n")
trigger_cmd_wait = attr.ib(default="//\n")
trigger_cmd_latency = attr.ib(default="//\n")
readout_cmd_trigger = attr.ib(default="//\n")
osc_cmd_reset = attr.ib(default="//\n")
wait_cycles = attr.ib(
default=28500, validator=is_greater_equal(0)
) # 95 us by default
dead_cycles = attr.ib(
default=1500, validator=is_greater_equal(0)
) # 5 us by default
wait_samples = attr.ib(
default=228000, validator=is_greater_equal(0)
) # 95 us by default (Assuming HDAWG)
dead_samples = attr.ib(
default=12000, validator=is_greater_equal(0)
) # 5 us by default (Assuming HDAWG)
reset_phase = attr.ib(default=False)
ct = list()
def set(self, **settings):
"""Sets attributes, updates related attributes and checks attributes."""
for key in settings:
if hasattr(self, key):
setattr(self, key, settings[key])
self.update_params()
self.check_attributes()
def get(self):
"""Updates and checks attributes, writes and returns the sequence program."""
self.update_params()
self.check_attributes()
self.write_sequence()
self.ct = list()
self.write_ct()
return [self.sequence, self.ct]
def write_sequence(self):
"""Create header for the sequencer program.
The header displays the sequence type, trigger mode and alignment
information of the program. Sequence type is temporarily selected as
`None` here. It will be overwritten by the children classes depending
on the actual sequence type.
"""
self.sequence = SequenceCommand.header_info(
SequenceType.NONE, self.trigger_mode, self.alignment
)
def write_ct(self):
"""generate commandtable"""
def update_params(self):
"""Update interrelated parameters."""
# Convert wait_time to number of samples
self.wait_samples = self.time_to_samples(
self.period - self.dead_time + self.trigger_delay
)
# Convert dead_time to number of samples
self.dead_samples = self.time_to_samples(self.dead_time - self.trigger_delay)
# Set the correct clock rate, trigger latency compensation
# and QA trigger command depending on the device type
if self.target in [DeviceTypes.HDAWG]:
self.clock_rate = 2.4e9
if self.trigger_mode in [TriggerMode.ZSYNC_TRIGGER]:
# Default trigger latency for HDAWG with ZSync trigger
# = 0 cycles
self.latency_cycles = 0 + self.latency_adjustment
else:
# Default trigger latency for HDAWG with Master trigger
# = 27 cycles
self.latency_cycles = 27 + self.latency_adjustment
# HDAWG has no quantum analyzer
self.readout_cmd_trigger = SequenceCommand.comment_line()
elif self.target in [DeviceTypes.UHFLI, DeviceTypes.UHFQA]:
self.clock_rate = 1.8e9
# Default trigger latency compensation for UHFQA = 0 cycles
self.latency_cycles = 0 + self.latency_adjustment
# UHFLI has no has quantum analyzer, only UHFQA has quantum analyzer
if self.target in [DeviceTypes.UHFQA]:
self.readout_cmd_trigger = SequenceCommand.readout_trigger()
else:
self.readout_cmd_trigger = SequenceCommand.comment_line()
elif self.target in [DeviceTypes.SHFSG]:
self.clock_rate = 2e9
if self.trigger_mode in [TriggerMode.ZSYNC_TRIGGER]:
# Default trigger latency for HDAWG with ZSync trigger
# = 0 cycles
self.latency_cycles = 0 + self.latency_adjustment
else:
# Default trigger latency for HDAWG with Master trigger
# = 27 cycles
self.latency_cycles = 27 + self.latency_adjustment
# HDAWG has no quantum analyzer
self.readout_cmd_trigger = SequenceCommand.comment_line()
elif self.target in [DeviceTypes.SHFQA]:
self.clock_rate = 2e9
# Set the oscillator phase to 0 if the reset_phase option is on
if self.reset_phase:
self.osc_cmd_reset = SequenceCommand.reset_osc_phase()
else:
self.osc_cmd_reset = SequenceCommand.comment_line()
# Set the trigger latency command depending on the `latency_cycles`
if self.latency_cycles == 0:
self.trigger_cmd_latency = SequenceCommand.comment_line()
else:
# strip '\n' at the end and add an inline comment
self.trigger_cmd_latency = (
SequenceCommand.wait(self.latency_cycles).rstrip()
+ SequenceCommand.space()
+ SequenceCommand.inline_comment(
f"Wait to compensate for trigger latency"
)
)
# Set the trigger commands depending on the trigger mode
if self.trigger_mode == TriggerMode.NONE:
self.trigger_cmd_1 = SequenceCommand.comment_line()
self.trigger_cmd_2 = SequenceCommand.comment_line()
self.dead_cycles = self.time_to_cycles(self.dead_time)
self.trigger_cmd_define = SequenceCommand.comment_line()
self.trigger_cmd_send = SequenceCommand.comment_line()
self.trigger_cmd_wait = SequenceCommand.comment_line()
# No trigger latency compensation in TriggerMode.NONE
self.trigger_cmd_latency = SequenceCommand.comment_line()
elif self.trigger_mode == TriggerMode.SEND_AND_RECEIVE_TRIGGER:
# Define a waveform to send out as trigger
self.trigger_cmd_define = SequenceCommand.define_trigger(
self.trigger_samples
)
# Wait for an external clock to send | |
= ''
contig_info[contig_id]['molecule_type'] = 'chromosome'
contig_info[contig_id]['mash_nearest_neighbor'] = ''
contig_info[contig_id]['mash_neighbor_distance'] = ''
contig_info[contig_id]['mash_neighbor_identification'] = ''
return contig_info
def get_reconstructed_cluster_dists(mash_db, mash_distance, cluster_contig_links, out_dir, contig_seqs, num_threads=1):
m = mash()
cluster_dists = {}
for clust_id in cluster_contig_links:
contigs = cluster_contig_links[clust_id]
seq_dict = {}
tmp_fasta = os.path.join(out_dir, "clust_{}.fasta".format(clust_id))
for contig_id in contigs:
if contig_id in contig_seqs:
seq_dict[contig_id] = contig_seqs[contig_id]
write_fasta_dict(seq_dict, tmp_fasta)
distances = parseMash(
m.run_mash(reference_db=mash_db, input_fasta=tmp_fasta, table=False, num_threads=num_threads))
os.remove(tmp_fasta)
for query in distances:
for ref in distances[query]:
score = float(distances[query][ref])
if score <= mash_distance:
if clust_id not in cluster_dists:
cluster_dists[clust_id] = {}
cluster_dists[clust_id][ref] = score
for clust_id in cluster_dists:
cluster_dists[clust_id] = OrderedDict(
sorted(iter(list(cluster_dists[clust_id].items())), key=lambda x: x[1], reverse=False))
return cluster_dists
def get_seq_links(contig_reference_coverage, reference_sequence_meta):
reference_clust_members = {}
for contig_id in contig_reference_coverage:
for ref_id in contig_reference_coverage[contig_id]:
if ref_id in reference_sequence_meta:
clust_id = reference_sequence_meta[ref_id]['primary_cluster_id']
if not clust_id in reference_clust_members:
reference_clust_members[clust_id] = {}
if not contig_id in reference_clust_members[clust_id]:
reference_clust_members[clust_id][contig_id] = 0
reference_clust_members[clust_id][contig_id] += 1
return reference_clust_members
def update_group_members(target_contigs, group_membership, contig_reference_coverage, reference_sequence_meta,
group_membership_key, reference_seq_key):
for contig_id in target_contigs:
types = target_contigs[contig_id]
if not contig_id in group_membership:
group_membership[contig_id] = {
'clust_id': None,
'score': 0,
'is_circular': False,
'contains_replicon': False,
'contains_relaxase': False,
'rep_type': '',
'mob_type': ''
}
group_membership[contig_id][group_membership_key] = target_contigs[contig_id]
contig_hit_scores = contig_reference_coverage[contig_id]
if group_membership[contig_id]['clust_id'] is not None:
continue
for hsp in contig_hit_scores:
hsp_score = contig_hit_scores[hsp]
if hsp not in reference_sequence_meta:
continue
clust_id = reference_sequence_meta[hsp]['primary_cluster_id']
if reference_sequence_meta[hsp][reference_seq_key] == '-':
hsp_rep_types = []
else:
hsp_rep_types = reference_sequence_meta[hsp][reference_seq_key].split(",")
for r in types:
if r in hsp_rep_types:
group_membership[contig_id]['clust_id'] = clust_id
break
return group_membership
def filter_contig_df_by_index(indicies, contig_blast_df, reference_hit_coverage):
for index in indicies:
row = contig_blast_df.iloc[index]
query = str(row['qseqid'])
pID = str(row['sseqid'])
score = float(row['bitscore'])
aln_length = int(row['length'])
total_len = int(row['slen'])
if pID not in reference_hit_coverage:
logging.warning("Seqid {} in blast results but not cluster file".format(pID))
continue
if pID in reference_hit_coverage:
reference_hit_coverage[pID]['score'] -= score
reference_hit_coverage[pID]['covered_bases'] -= aln_length
else:
logging.warning("{} not found".format(pID))
return reference_hit_coverage
def main():
args = parse_args()
if args.debug:
logger = init_console_logger(3)
else:
logger = init_console_logger(2)
logger.info("MOB-recon version {} ".format(__version__))
logger.debug("Debug log reporting set on successfully")
check_dependencies(logger)
validate_args(args, logger)
max_contig_size = args.max_contig_size
max_plasmid_size = args.max_plasmid_size
keep_tmp = args.keep_tmp
input_fasta = args.infile
out_dir = args.outdir
num_threads = args.num_threads
tmp_dir = os.path.join(out_dir, '__tmp')
fixed_fasta = os.path.join(tmp_dir, 'fixed.input.fasta')
chromosome_file = os.path.join(out_dir, 'chromosome.fasta')
replicon_blast_results = os.path.join(tmp_dir, 'replicon_blast_results.txt')
mob_blast_results = os.path.join(tmp_dir, 'mob_blast_results.txt')
mpf_blast_results = os.path.join(tmp_dir, 'mpf_blast_results.txt')
orit_blast_results = os.path.join(tmp_dir, 'orit_blast_results.txt')
repetitive_blast_results = os.path.join(tmp_dir, 'repetitive_blast_results.txt')
contig_blast_results = os.path.join(tmp_dir, 'contig_blast_results.txt')
contig_blast_results = os.path.join(tmp_dir, 'contig_blast_results.txt')
prefix = None
if args.prefix is not None:
prefix = args.prefix
contig_report = os.path.join(out_dir, 'contig_report.txt')
if prefix is not None:
contig_report = os.path.join(out_dir, "{}.contig_report.txt".format(prefix))
chromosome_file = os.path.join(out_dir, "{}.chromosome.fasta".format(prefix))
logger.info('Processing fasta file {}'.format(args.infile))
logger.info('Analysis directory {}'.format(args.outdir))
database_dir = os.path.abspath(args.database_directory)
if database_dir == default_database_dir:
plasmid_ref_db = args.plasmid_db
mob_ref = args.plasmid_mob
mash_db = args.plasmid_mash_db
replicon_ref = args.plasmid_replicons
plasmid_meta = args.plasmid_meta
repetitive_mask_file = args.repetitive_mask
mpf_ref = args.plasmid_mpf
plasmid_orit = args.plasmid_orit
verify_init(logger, database_dir)
else:
plasmid_ref_db = os.path.join(database_dir, 'ncbi_plasmid_full_seqs.fas')
mob_ref = os.path.join(database_dir, 'mob.proteins.faa')
mash_db = os.path.join(database_dir, 'ncbi_plasmid_full_seqs.fas.msh')
replicon_ref = os.path.join(database_dir, 'rep.dna.fas')
plasmid_meta = os.path.join(database_dir, 'clusters.txt')
repetitive_mask_file = os.path.join(database_dir, 'repetitive.dna.fas')
mpf_ref = os.path.join(database_dir, 'mpf.proteins.faa')
plasmid_orit = os.path.join(database_dir, 'orit.fas')
ETE3DBTAXAFILE = os.path.abspath(database_dir + "/taxa.sqlite")
LIT_PLASMID_TAXONOMY_FILE = os.path.join(database_dir, "host_range_literature_plasmidDB.txt")
NCBI_PLASMID_TAXONOMY_FILE = plasmid_meta
if args.sample_id is None:
sample_id = re.sub(r"\.(fasta|fas|fa){1,1}", "", os.path.basename(args.infile))
else:
sample_id = args.sample_id
verify_init(logger, database_dir)
run_overhang = args.run_overhang
unicycler_contigs = args.unicycler_contigs
# initialize analysis directory
if not os.path.isdir(args.outdir):
os.mkdir(args.outdir, 0o755)
elif not args.force:
logger.error("Error output directory exists, please specify a new directory or use --force to overwrite")
sys.exit(-1)
else:
shutil.rmtree(args.outdir)
os.mkdir(args.outdir, 0o755)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir, 0o755)
else:
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir, 0o755)
# Initialize clustering distance thresholds
if not (args.primary_cluster_dist >= 0 and args.primary_cluster_dist <= 1):
logging.error('Error distance thresholds must be between 0 - 1: {}'.format(args.primary_cluster_dist))
sys.exit()
else:
primary_distance = float(args.primary_cluster_dist)
if not (args.secondary_cluster_dist >= 0 and args.secondary_cluster_dist <= 1):
logging.error('Error distance thresholds must be between 0 - 1: {}'.format(args.secondary_cluster_dist))
sys.exit()
else:
secondary_distance = float(args.secondary_cluster_dist)
# Input numeric params
min_overlapp = int(args.min_overlap)
min_length = int(args.min_length)
min_rep_ident = float(args.min_rep_ident)
min_mob_ident = float(args.min_mob_ident)
min_mpf_ident = float(args.min_mob_ident) # Left in for future if we decide to allow modification
min_con_ident = float(args.min_con_ident)
min_rpp_ident = float(args.min_rpp_ident)
min_rep_cov = float(args.min_rep_cov)
min_mob_cov = float(args.min_mob_cov)
min_mpf_cov = float(args.min_mob_cov) # Left in for future if we decide to allow modification
min_con_cov = float(args.min_con_cov)
min_rpp_cov = float(args.min_rpp_cov)
min_rep_evalue = float(args.min_rep_evalue)
min_mob_evalue = float(args.min_mob_evalue)
min_mpf_evalue = float(args.min_mob_evalue) # Left in for future if we decide to allow modification
min_con_evalue = float(args.min_con_evalue)
min_rpp_evalue = float(args.min_rpp_evalue)
# Parse reference cluster information
reference_sequence_meta = read_sequence_info(plasmid_meta, MOB_CLUSTER_INFO_HEADER)
# process input fasta
logger.info('Writing cleaned header input fasta file from {} to {}'.format(input_fasta, fixed_fasta))
id_mapping = fix_fasta_header(input_fasta, fixed_fasta)
contig_seqs = read_fasta_dict(fixed_fasta)
contig_info = {}
br = BlastRunner(fixed_fasta, tmp_dir)
br.makeblastdb(fixed_fasta, dbtype='nucl', logging=logging)
del (br)
# Detect circular sequences
circular_contigs = {}
if run_overhang:
logger.info('Running internal circular contig detection on {}'.format(fixed_fasta))
circular_contigs = circularize(fixed_fasta, tmp_dir, logging)
for id in contig_seqs:
seq = contig_seqs[id]
contig_info[id] = {}
for feature in MOB_RECON_INFO_HEADER:
contig_info[id][feature] = ''
contig_info[id]['md5'] = calc_md5(seq)
contig_info[id]['gc'] = GC(seq)
contig_info[id]['size'] = len(seq)
contig_info[id]['contig_id'] = id
contig_info[id]['sample_id'] = sample_id
contig_info[id]['molecule_type'] = 'chromosome'
contig_info[id]['filtering_reason'] = 'none'
if run_overhang:
if id in circular_contigs:
contig_info[id]['circularity_status'] = 'circular'
else:
contig_info[id]['circularity_status'] = 'incomplete'
if unicycler_contigs:
if 'circular=true' in id or id in circular_contigs:
contig_info[id]['circularity_status'] = 'circular'
elif id not in circular_contigs:
contig_info[id]['circularity_status'] = 'incomplete'
if contig_info[id]['circularity_status'] == '':
contig_info[id]['circularity_status'] = 'not tested'
# Blast reference databases
identify_biomarkers(contig_info, fixed_fasta, tmp_dir, min_length, logging, \
replicon_ref, min_rep_ident, min_rep_cov, min_rep_evalue, replicon_blast_results, \
mob_ref, min_mob_ident, min_mob_cov, min_mob_evalue, mob_blast_results, \
mpf_ref, min_mpf_ident, min_mpf_cov, min_mpf_evalue, mpf_blast_results, \
repetitive_mask_file, min_rpp_ident, min_rpp_cov, min_rpp_evalue, \
plasmid_orit, orit_blast_results, repetitive_blast_results, \
num_threads=num_threads)
# Filtering contigs against chromosome database
chrom_filter = False
if args.genome_filter_db_prefix:
chrom_filter = True
genome_filter_db_prefix = args.genome_filter_db_prefix
logger.info('Genome filter sequences provided: {}'.format(genome_filter_db_prefix))
matched = (glob.glob(genome_filter_db_prefix + "*"))
extensions = ['nsq', 'nin', 'nhr']
found = [0, 0, 0]
for f in matched:
for i in range(0, len(extensions)):
e = extensions[i]
if e in f:
found[i] += 1
for i in found:
if i == 0:
logger.error('Error blast database not found with prefix: {}'.format(genome_filter_db_prefix))
sys.exit()
if not os.path.isfile(genome_filter_db_prefix + '.msh'):
logger.error('Error mash sketch not found with prefix: {}'.format(genome_filter_db_prefix))
sys.exit()
if chrom_filter:
cutoff_distance = float(args.mash_genome_neighbor_threshold)
chr_mash_dists = os.path.join(tmp_dir, 'mash_chr_dists.txt')
chr_blast_filter = os.path.join(tmp_dir, 'chr_contig_filter_report.txt')
chr_mash_sketch = genome_filter_db_prefix + ".msh"
close_genome_reps = find_mash_genomes(chr_mash_sketch, fixed_fasta, chr_mash_dists, cutoff_distance,
num_threads)
if len(close_genome_reps) > 0:
logger.info('Found close genome matches: {}'.format(",".join(close_genome_reps)))
seq_id_file = os.path.join(tmp_dir, "seqids.txt")
sf = open(seq_id_file, 'w')
for s in close_genome_reps:
sf.write("{}\n".format(s))
sf.close()
# fix labels to match the seq id format parsed by makeblastdb
for i in range(0, len(close_genome_reps)):
close_genome_reps[i] = "ref|{}|".format(close_genome_reps[i])
blastn(input_fasta=fixed_fasta, blastdb=genome_filter_db_prefix, min_ident=min_con_ident,
min_cov=min_con_cov,
evalue=min_con_evalue, min_length=min_length, out_dir=tmp_dir,
blast_results_file=chr_blast_filter, num_threads=num_threads, logging=logging,
seq_filterfile=seq_id_file)
chromosome_filter_seqs = BlastReader(chr_blast_filter, logging).df.drop(0)['qseqid'].tolist()
for contig_id in chromosome_filter_seqs:
if contig_id in contig_info:
contig_info[contig_id]['filtering_reason'] = 'chromosome'
logger.info('Filtering contig: {} due to inclusion in genome filter {}'.format(contig_id,
genome_filter_db_prefix))
else:
logger.error('Contig: {} not found in contig_df this is likely an error'.format(contig_id))
del (chromosome_filter_seqs)
else:
logger.info('No close genome matches found')
# Filter out sequences based on user filter
if args.filter_db:
filter_db = args.filter_db
logger.info('Filter sequences provided: {}'.format(filter_db))
if not os.path.isfile(filter_db + '.nsq') or \
os.path.isfile(filter_db + '.nin') or \
os.path.isfile(filter_db + '.nhr'):
br = BlastRunner(filter_db, os.path.dirname(filter_db))
br.makeblastdb(filter_db, dbtype='nucl', logging=logging)
run_filter = True
else:
run_filter = False
if run_filter:
logger.info('Blasting input fasta {} against filter db {}'.format(input_fasta, filter_db))
blast_filter = os.path.join(tmp_dir, 'contig_filter_report.txt')
blastn(input_fasta=fixed_fasta, blastdb=filter_db, min_ident=min_con_ident, min_cov=min_con_cov,
evalue=min_con_evalue, min_length=min_length, out_dir=tmp_dir,
blast_results_file=blast_filter, num_threads=num_threads, logging=logging)
user_filter_seqs = BlastReader(blast_filter, logging).df
if len(user_filter_seqs) > 0:
user_filter_seqs = user_filter_seqs.drop(0)['qseqid'].tolist()
else:
user_filter_seqs = []
for contig_id in user_filter_seqs:
if contig_id in contig_info:
contig_info[contig_id]['filtering_reason'] = 'user'
logger.info(
'Filtering contig: {} due to inclusion in genome filter {}'.format(contig_id, filter_db))
else:
logger.error('Contig: {} not found in contig_df this is likely an error'.format(contig_id))
del (user_filter_seqs)
# blast plasmid database
logging.info("Blasting contigs against reference sequence db: {}".format(plasmid_ref_db))
blastn(input_fasta=fixed_fasta, blastdb=plasmid_ref_db, min_ident=min_con_ident, min_cov=min_con_cov,
evalue=min_con_evalue, min_length=min_length, out_dir=tmp_dir,
blast_results_file=contig_blast_results, num_threads=num_threads, logging=logging, seq_filterfile=None)
logging.info("Filtering contig blast results: {}".format(contig_blast_results))
contig_blast_df = BlastReader(contig_blast_results, logging).df
if len(contig_blast_df) > 0:
contig_blast_df = fixStart(contig_blast_df.drop(0)).sort_values(
['sseqid', 'qseqid', 'sstart', 'send', 'bitscore'])
contig_blast_df = filter_overlaping_records(contig_blast_df, 500, 'qseqid', 'qstart', 'qend', 'bitscore')
contig_blast_df.reset_index(drop=True)
# remove blast formatting of seq id
for index, row in contig_blast_df.iterrows():
line = row['sseqid'].split('|')
if len(line) >= 2:
contig_blast_df.at[index, 'sseqid'] = line[1]
| |
'0')
self.assertEqual(demjson.encode(12345), '12345')
self.assertEqual(demjson.encode(-12345), '-12345')
# Floating point numbers must be "approximately" compared to
# allow for slight changes due to rounding errors in the
# least significant digits.
self.assertMatchesRegex(demjson.encode(1.5),
r'1.(' \
r'(5(000+[0-9])?)' \
r'|' \
r'(4999(9+[0-9])?)' \
r')' )
self.assertMatchesRegex(demjson.encode(-1.5),
r'-1.(' \
r'(5(000+[0-9])?)' \
r'|' \
r'(4999(9+[0-9])?)' \
r')' )
self.assertMatchesRegex(demjson.encode(1.2300456e78),
r'1.230045(' \
r'(6(0+[0-9])?)' r'|' \
r'(59(9+[0-9])?)' \
r')[eE][+]0*78')
self.assertMatchesRegex(demjson.encode(1.2300456e-78),
r'1.230045(' \
r'(6(0+[0-9])?)' r'|' \
r'(59(9+[0-9])?)' \
r')[eE][-]0*78')
self.assertMatchesRegex(demjson.encode(-1.2300456e78),
r'-1.230045(' \
r'(6(0+[0-9])?)' r'|' \
r'(59(9+[0-9])?)' \
r')[eE][+]0*78')
self.assertMatchesRegex(demjson.encode(-1.2300456e-78),
r'-1.230045(' \
r'(6(0+[0-9])?)' r'|' \
r'(59(9+[0-9])?)' \
r')[eE][-]0*78')
self.assertMatchesRegex(demjson.encode(0.0000043), r'4.3[0[0-9]*]?[eE]-0*6$')
self.assertMatchesRegex(demjson.encode(40000000000), r'(4[eE]+0*10)|(40000000000)$',
'Large integer not encoded properly')
def testEncodeNegativeZero(self):
self.assertTrue(demjson.encode(-0.0) in ['-0','-0.0'],
'Float -0.0 is not encoded as a negative zero')
if decimal:
self.assertTrue(demjson.encode( decimal.Decimal('-0') ) in ['-0','-0.0'],
'Decimal -0 is not encoded as a negative zero')
def testJsonInt(self):
self.assertTrue( isinstance( demjson.json_int(0), (int,long) ) )
self.assertEqual(demjson.json_int(0), 0)
self.assertEqual(demjson.json_int(555999), 555999)
self.assertEqual(demjson.json_int(-555999), -555999)
self.assertEqual(demjson.json_int(12131415161718191029282726), 12131415161718191029282726)
self.assertEqual(demjson.json_int('123'), 123)
self.assertEqual(demjson.json_int('+123'), 123)
self.assertEqual(demjson.json_int('-123'), -123)
self.assertEqual(demjson.json_int('123',8), 83)
self.assertEqual(demjson.json_int('123',16), 291)
self.assertEqual(demjson.json_int('110101',2), 53)
self.assertEqual( 123, demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_DECIMAL))
self.assertEqual( 123, demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_HEX))
self.assertEqual( 123, demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_OCTAL))
self.assertEqual( 123, demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_LEGACYOCTAL))
self.assertEqual( 123, demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_BINARY))
self.assertEqual(demjson.json_int(123), demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_DECIMAL))
self.assertEqual(demjson.json_int(123), demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_HEX))
self.assertEqual(demjson.json_int(123), demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_OCTAL))
self.assertEqual(demjson.json_int(123), demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_LEGACYOCTAL))
self.assertEqual(demjson.json_int(123), demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_BINARY))
self.assertEqual(demjson.json_int(123).json_format(), '123' )
self.assertEqual(demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_DECIMAL).json_format(), '123' )
self.assertEqual(demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_HEX).json_format(), '0x7b' )
self.assertEqual(demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_OCTAL).json_format(), '0o173' )
self.assertEqual(demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_LEGACYOCTAL).json_format(), '0173' )
self.assertEqual(demjson.json_int(0,number_format=demjson.NUMBER_FORMAT_LEGACYOCTAL).json_format(), '0' )
self.assertEqual(demjson.json_int(123,number_format=demjson.NUMBER_FORMAT_BINARY).json_format(), '0b1111011' )
def testEncodeDecimalIntegers(self):
self.assertEqual(demjson.encode( demjson.json_int(0)), '0')
self.assertEqual(demjson.encode( demjson.json_int(123)), '123')
self.assertEqual(demjson.encode( demjson.json_int(-123)), '-123')
self.assertEqual(demjson.encode( demjson.json_int(12345678901234567890888)), '12345678901234567890888')
def testEncodeHexIntegers(self):
self.assertEqual(demjson.encode( demjson.json_int(0x0,number_format=demjson.NUMBER_FORMAT_HEX)), '0x0')
self.assertEqual(demjson.encode( demjson.json_int(0xff,number_format=demjson.NUMBER_FORMAT_HEX)), '0xff')
self.assertEqual(demjson.encode( demjson.json_int(-0x7f,number_format=demjson.NUMBER_FORMAT_HEX)), '-0x7f')
self.assertEqual(demjson.encode( demjson.json_int(0x123456789abcdef,number_format=demjson.NUMBER_FORMAT_HEX)), '0x123456789abcdef')
def testEncodeOctalIntegers(self):
self.assertEqual(demjson.encode( demjson.json_int(0,number_format=demjson.NUMBER_FORMAT_OCTAL)), '0o0')
self.assertEqual(demjson.encode( demjson.json_int(359,number_format=demjson.NUMBER_FORMAT_OCTAL)), '0o547')
self.assertEqual(demjson.encode( demjson.json_int(-359,number_format=demjson.NUMBER_FORMAT_OCTAL)), '-0o547')
def testEncodeLegacyOctalIntegers(self):
self.assertEqual(demjson.encode( demjson.json_int(0,number_format=demjson.NUMBER_FORMAT_LEGACYOCTAL)), '0')
self.assertEqual(demjson.encode( demjson.json_int(1,number_format=demjson.NUMBER_FORMAT_LEGACYOCTAL)), '01')
self.assertEqual(demjson.encode( demjson.json_int(359,number_format=demjson.NUMBER_FORMAT_LEGACYOCTAL)), '0547')
self.assertEqual(demjson.encode( demjson.json_int(-359,number_format=demjson.NUMBER_FORMAT_LEGACYOCTAL)), '-0547')
def testIntAsFloat(self):
self.assertEqual(demjson.decode('[0,-5,600,0xFF]', int_as_float=True), [0.0,-5.0,600.0,255.0] )
if decimal:
self.assertEqual(demjson.decode('[0,-5,600,0xFF]', int_as_float=True, float_type=demjson.NUMBER_DECIMAL),
[decimal.Decimal('0.0'), decimal.Decimal('-5.0'), decimal.Decimal('600.0'), decimal.Decimal('255.0')] )
self.assertEqual([type(x) for x in demjson.decode('[0,-5,600,0xFF]', int_as_float=True, float_type=demjson.NUMBER_DECIMAL)],
[decimal.Decimal, decimal.Decimal, decimal.Decimal, decimal.Decimal] )
def testKeepFormat(self):
self.assertEqual(demjson.encode(demjson.decode( '[3,03,0o3,0x3,0b11]', keep_format=True )), '[3,03,0o3,0x3,0b11]' )
def testEncodeNaN(self):
self.assertEqual(demjson.encode( demjson.nan ), 'NaN')
self.assertEqual(demjson.encode( -demjson.nan ), 'NaN')
if decimal:
self.assertEqual(demjson.encode( decimal.Decimal('NaN') ), 'NaN')
self.assertEqual(demjson.encode( decimal.Decimal('sNaN') ), 'NaN')
def testEncodeInfinity(self):
self.assertEqual(demjson.encode( demjson.inf ), 'Infinity')
self.assertEqual(demjson.encode( -demjson.inf ), '-Infinity')
self.assertEqual(demjson.encode( demjson.neginf ), '-Infinity')
if decimal:
self.assertEqual(demjson.encode( decimal.Decimal('Infinity') ), 'Infinity')
self.assertEqual(demjson.encode( decimal.Decimal('-Infinity') ), '-Infinity')
def testDecodeString(self):
self.assertEqual(demjson.decode(r'""'), '')
self.assertEqual(demjson.decode(r'"a"'), 'a')
self.assertEqual(demjson.decode(r'"abc def"'), 'abc def')
self.assertEqual(demjson.decode(r'"\n\t\\\"\b\r\f"'), '\n\t\\"\b\r\f')
self.assertEqual(demjson.decode(r'"\abc def"'), 'abc def')
def testEncodeString(self):
self.assertEqual(demjson.encode(''), r'""')
self.assertEqual(demjson.encode('a'), r'"a"')
self.assertEqual(demjson.encode('abc def'), r'"abc def"')
self.assertEqual(demjson.encode('\n'), r'"\n"')
self.assertEqual(demjson.encode('\n\t\r\b\f'), r'"\n\t\r\b\f"')
self.assertEqual(demjson.encode('\n'), r'"\n"')
self.assertEqual(demjson.encode('"'), r'"\""')
self.assertEqual(demjson.encode('\\'), '"\\\\"')
def testDecodeStringWithNull(self):
self.assertEqual(demjson.decode('"\x00"',warnings=False), '\0')
self.assertEqual(demjson.decode('"a\x00b"',warnings=False), 'a\x00b')
def testDecodeStringUnicodeEscape(self):
self.assertEqual(demjson.decode(r'"\u0000"',warnings=False), '\0')
self.assertEqual(demjson.decode(r'"\u0061"'), 'a')
self.assertEqual(demjson.decode(r'"\u2012"'), u'\u2012')
self.assertEqual(demjson.decode(r'"\u1eDc"'), u'\u1edc')
self.assertEqual(demjson.decode(r'"\uffff"'), u'\uffff')
self.assertEqual(demjson.decode(r'"\u00a012"'), u'\u00a0' + '12')
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u041"', strict=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u041Z"', strict=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u"', strict=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\uZ"', strict=True)
def testEncodeStringUnicodeEscape(self):
self.assertEqual(demjson.encode('\0', escape_unicode=True), r'"\u0000"')
self.assertEqual(demjson.encode(u'\u00e0', escape_unicode=True), r'"\u00e0"')
self.assertEqual(demjson.encode(u'\u2012', escape_unicode=True), r'"\u2012"')
def testHtmlSafe(self):
self.assertEqual(demjson.encode('<', html_safe=True), r'"\u003c"')
self.assertEqual(demjson.encode('>', html_safe=True), r'"\u003e"')
self.assertEqual(demjson.encode('&', html_safe=True), r'"\u0026"')
self.assertEqual(demjson.encode('/', html_safe=True), r'"\/"')
self.assertEqual(demjson.encode('a<b>c&d/e', html_safe=True), r'"a\u003cb\u003ec\u0026d\/e"')
self.assertEqual(demjson.encode('a<b>c&d/e', html_safe=False), r'"a<b>c&d/e"')
def testDecodeStringExtendedUnicodeEscape(self):
self.assertEqual(demjson.decode(r'"\u{0041}"',allow_extended_unicode_escapes=True), u'A')
self.assertEqual(demjson.decode(r'"\u{1aFe}"',allow_extended_unicode_escapes=True), u'\u1afe')
self.assertEqual(demjson.decode(r'"\u{41}"',allow_extended_unicode_escapes=True), u'A')
self.assertEqual(demjson.decode(r'"\u{1}"',allow_extended_unicode_escapes=True), u'\u0001')
self.assertEqual(demjson.decode(r'"\u{00000000000041}"',allow_extended_unicode_escapes=True), u'A')
self.assertEqual(demjson.decode(r'"\u{1000a}"',allow_extended_unicode_escapes=True), u'\U0001000a')
self.assertEqual(demjson.decode(r'"\u{10ffff}"',allow_extended_unicode_escapes=True), u'\U0010FFFF')
self.assertEqual(demjson.decode(r'"\u{0000010ffff}"',allow_extended_unicode_escapes=True), u'\U0010FFFF')
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u{0041}"', strict=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u{110000}"', allow_extended_unicode_escapes=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u{012g}"', allow_extended_unicode_escapes=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u{ 0041}"', allow_extended_unicode_escapes=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u{0041 }"', allow_extended_unicode_escapes=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u{0041"', allow_extended_unicode_escapes=True)
self.assertRaises(demjson.JSONDecodeError, demjson.decode, r'"\u{}"', allow_extended_unicode_escapes=True)
def testAutoDetectEncodingWithCustomUTF32(self):
old_use_custom = demjson.helpers.always_use_custom_codecs
try:
demjson.helpers.always_use_custom_codecs = True
self.runTestAutoDetectEncoding()
finally:
demjson.helpers.always_use_custom_codecs = old_use_custom
def testAutoDetectEncodingWithBuiltinUTF32(self):
old_use_custom = demjson.helpers.always_use_custom_codecs
try:
demjson.helpers.always_use_custom_codecs = False
self.runTestAutoDetectEncoding()
finally:
demjson.helpers.always_use_custom_codecs = old_use_custom
def runTestAutoDetectEncoding(self):
QT = ord('"')
TAB = ord('\t')
FOUR = ord('4')
TWO = ord('2')
# Plain byte strings, without BOM
self.assertEqual(demjson.decode( rawbytes([ 0, 0, 0, FOUR ]) ), 4 ) # UTF-32BE
self.assertEqual(demjson.decode( rawbytes([ 0, 0, 0, FOUR, 0, 0, 0, TWO ]) ), 42 )
self.assertEqual(demjson.decode( rawbytes([ FOUR, 0, 0, 0 ]) ), 4 ) # UTF-32LE
self.assertEqual(demjson.decode( rawbytes([ FOUR, 0, 0, 0, TWO, 0, 0, 0 ]) ), 42 )
self.assertEqual(demjson.decode( rawbytes([ 0, FOUR, 0, TWO ]) ), 42 ) # UTF-16BE
self.assertEqual(demjson.decode( rawbytes([ FOUR, 0, TWO, 0 ]) ), 42 ) # UTF-16LE
self.assertEqual(demjson.decode( rawbytes([ 0, FOUR ]) ), 4 ) #UTF-16BE
self.assertEqual(demjson.decode( rawbytes([ FOUR, 0 ]) ), 4 ) #UTF-16LE
self.assertEqual(demjson.decode( rawbytes([ FOUR, TWO ]) ), 42 ) # UTF-8
self.assertEqual(demjson.decode( rawbytes([ TAB, FOUR, TWO ]) ), 42 ) # UTF-8
self.assertEqual(demjson.decode( rawbytes([ FOUR ]) ), 4 ) # UTF-8
# With byte-order marks (BOM)
# UTF-32BE
self.assertEqual(demjson.decode( rawbytes([ 0, 0, 0xFE, 0xFF, 0, 0, 0, FOUR ]) ), 4 )
self.assertRaises(demjson.JSONDecodeError,
demjson.decode, rawbytes([ 0, 0, 0xFE, 0xFF, FOUR, 0, 0, 0 ]) )
# UTF-32LE
self.assertEqual(demjson.decode( rawbytes([ 0xFF, 0xFE, 0, 0, FOUR, 0, 0, 0 ]) ), 4 )
self.assertRaises(demjson.JSONDecodeError,
demjson.decode, rawbytes([ 0xFF, 0xFE, 0, 0, 0, 0, 0, FOUR ]) )
# UTF-16BE
self.assertEqual(demjson.decode( rawbytes([ 0xFE, 0xFF, 0, FOUR ]) ), 4 )
self.assertRaises(demjson.JSONDecodeError,
demjson.decode, rawbytes([ 0xFE, 0xFF, FOUR, 0 ]) )
# UTF-16LE
self.assertEqual(demjson.decode( rawbytes([ 0xFF, 0xFE, FOUR, 0 ]) ), 4 )
self.assertRaises(demjson.JSONDecodeError,
demjson.decode, rawbytes([ 0xFF, 0xFE, 0, FOUR ]) )
# Invalid Unicode strings
self.assertRaises(demjson.JSONDecodeError,
demjson.decode, rawbytes([ 0 ]) )
self.assertRaises(demjson.JSONDecodeError,
demjson.decode, rawbytes([ TAB, FOUR, TWO, 0 ]) )
self.assertRaises(demjson.JSONDecodeError,
demjson.decode, rawbytes([ FOUR, 0, 0 ]) )
self.assertRaises(demjson.JSONDecodeError,
demjson.decode, rawbytes([ FOUR, 0, 0, TWO ]) )
def testDecodeStringRawUnicode(self):
QT = ord('"')
self.assertEqual(demjson.decode(rawbytes([ QT,0xC3,0xA0,QT ]),
encoding='utf-8'), u'\u00e0')
self.assertEqual(demjson.decode(rawbytes([ QT,0,0,0, 0xE0,0,0,0, QT,0,0,0 ]),
encoding='ucs4le'), u'\u00e0')
self.assertEqual(demjson.decode(rawbytes([ 0,0,0,QT, 0,0,0,0xE0, 0,0,0,QT ]),
encoding='ucs4be'), u'\u00e0')
self.assertEqual(demjson.decode(rawbytes([ 0,0,0,QT, 0,0,0,0xE0, 0,0,0,QT ]),
encoding='utf-32be'), u'\u00e0')
self.assertEqual(demjson.decode(rawbytes([ 0,0,0xFE,0xFF, 0,0,0,QT, 0,0,0,0xE0, 0,0,0,QT ]),
encoding='ucs4'), u'\u00e0')
def testEncodeStringRawUnicode(self):
QT = ord('"')
self.assertEqual(demjson.encode(u'\u00e0', escape_unicode=False, encoding='utf-8'),
rawbytes([ QT, 0xC3, 0xA0, QT ]) )
self.assertEqual(demjson.encode(u'\u00e0', escape_unicode=False, encoding='ucs4le'),
rawbytes([ QT,0,0,0, 0xE0,0,0,0, QT,0,0,0 ]) )
self.assertEqual(demjson.encode(u'\u00e0', escape_unicode=False, encoding='ucs4be'),
rawbytes([ 0,0,0,QT, 0,0,0,0xE0, 0,0,0,QT ]) )
self.assertEqual(demjson.encode(u'\u00e0', escape_unicode=False, encoding='utf-32be'),
rawbytes([ 0,0,0,QT, 0,0,0,0xE0, 0,0,0,QT ]) )
self.assertTrue(demjson.encode(u'\u00e0', escape_unicode=False, encoding='ucs4')
in [rawbytes([ 0,0,0xFE,0xFF, 0,0,0,QT, 0,0,0,0xE0, 0,0,0,QT ]),
rawbytes([ 0xFF,0xFE,0,0, QT,0,0,0, 0xE0,0,0,0, QT,0,0,0 ]) ])
def testEncodeStringWithSpecials(self):
# Make sure that certain characters are always \u-encoded even if the
# output encoding could have represented them in the raw.
# Test U+001B escape - a control character
self.assertEqual(demjson.encode(u'\u001B', escape_unicode=False, encoding='utf-8'),
rawbytes([ ord(c) for c in '"\\u001b"' ]) )
# Test U+007F delete - a control character
self.assertEqual(demjson.encode(u'\u007F', escape_unicode=False, encoding='utf-8'),
rawbytes([ ord(c) for c in '"\\u007f"' ]) )
# Test U+00AD soft hyphen - a format control character
self.assertEqual(demjson.encode(u'\u00AD', escape_unicode=False, encoding='utf-8'),
rawbytes([ ord(c) for c in '"\\u00ad"' ]) )
# Test U+200F right-to-left mark
self.assertEqual(demjson.encode(u'\u200F', escape_unicode=False, encoding='utf-8'),
rawbytes([ ord(c) for c in '"\\u200f"' ]) )
# Test U+2028 line separator
self.assertEqual(demjson.encode(u'\u2028', escape_unicode=False, encoding='utf-8'),
rawbytes([ ord(c) for c in '"\\u2028"' ]) )
# Test U+2029 paragraph separator
self.assertEqual(demjson.encode(u'\u2029', escape_unicode=False, encoding='utf-8'),
rawbytes([ ord(c) for c in '"\\u2029"' ]) )
# Test U+E007F cancel tag
self.assertEqual(demjson.encode(u'\U000E007F', escape_unicode=False, encoding='utf-8'),
rawbytes([ ord(c) for c in '"\\udb40\\udc7f"' ]) )
def testDecodeSupplementalUnicode(self):
import sys
if sys.maxunicode > 65535:
self.assertEqual(demjson.decode( rawbytes([ ord(c) for c in r'"\udbc8\udf45"' ]) ),
u'\U00102345')
self.assertEqual(demjson.decode( rawbytes([ ord(c) for c in r'"\ud800\udc00"' ]) ),
u'\U00010000')
self.assertEqual(demjson.decode( rawbytes([ ord(c) for c in r'"\udbff\udfff"' ]) ),
u'\U0010ffff')
for bad_case in [r'"\ud801"', r'"\udc02"',
r'"\ud801\udbff"', r'"\ud801\ue000"',
r'"\ud801\u2345"']:
try:
self.assertRaises(demjson.JSONDecodeError,
demjson.decode( rawbytes([ ord(c) for c in bad_case ]) ) )
except demjson.JSONDecodeError:
pass
def testEncodeSupplementalUnicode(self):
import sys
if sys.maxunicode > 65535:
self.assertEqual(demjson.encode(u'\U00010000',encoding='ascii'),
rawbytes([ ord(c) for c in r'"\ud800\udc00"' ]) )
self.assertEqual(demjson.encode(u'\U00102345',encoding='ascii'),
rawbytes([ ord(c) for c in r'"\udbc8\udf45"' ]) )
self.assertEqual(demjson.encode(u'\U0010ffff',encoding='ascii'),
rawbytes([ ord(c) for c in r'"\udbff\udfff"' ]) )
def have_codec(self, name):
import codecs
try:
i = codecs.lookup(name)
except LookupError:
return False
else:
return True
def testDecodeWithWindows1252(self):
have_cp1252 = self.have_codec('cp1252')
if have_cp1252:
# Use Windows-1252 code page. Note character 0x8c is U+0152, which
# is different than ISO8859-1.
d = rawbytes([ ord('"'), ord('a'), 0xe0, 0x8c, ord('"') ])
self.assertEqual(demjson.decode( d, encoding='cp1252' ),
u"a\u00e0\u0152")
def testDecodeWithEBCDIC(self):
have_ebcdic = self.have_codec('ibm037')
if have_ebcdic:
# Try EBCDIC
d = rawbytes([ 0x7f, 0xc1, 0xc0, 0x7c, 0xe0, 0xa4, 0xf0, 0xf1, 0xf5, 0xf2, 0x7f ])
self.assertEqual(demjson.decode( d, encoding='ibm037' ),
u"A{@\u0152")
def testDecodeWithISO8859_1(self):
have_iso8859_1 = self.have_codec('iso8859-1')
if have_iso8859_1:
# Try ISO-8859-1
d = rawbytes([ ord('"'), ord('a'), 0xe0, ord('\\'), ord('u'), ord('0'), ord('1'), ord('5'), ord('2'), ord('"') ])
self.assertEqual(demjson.decode( d, encoding='iso8859-1' ),
u"a\u00e0\u0152")
def testDecodeWithCustomCodec(self):
# Try Rot-1
ci = rot_one.lookup('rot-1')
d = rawbytes([ ord('"'), ord('A'), ord('B'), ord('Y'), ord('Z'), ord(' '), ord('5'), ord('"') ])
self.assertEqual(demjson.decode( d, encoding=ci ),
u"ZAXY 5")
| |
inside the environment.
node_config (google.cloud.orchestration.airflow.service_v1beta1.types.NodeConfig):
The configuration used for the Kubernetes
Engine cluster.
private_environment_config (google.cloud.orchestration.airflow.service_v1beta1.types.PrivateEnvironmentConfig):
The configuration used for the Private IP
Cloud Composer environment.
web_server_network_access_control (google.cloud.orchestration.airflow.service_v1beta1.types.WebServerNetworkAccessControl):
Optional. The network-level access control
policy for the Airflow web server. If
unspecified, no network-level access
restrictions will be applied.
database_config (google.cloud.orchestration.airflow.service_v1beta1.types.DatabaseConfig):
Optional. The configuration settings for
Cloud SQL instance used internally by Apache
Airflow software.
web_server_config (google.cloud.orchestration.airflow.service_v1beta1.types.WebServerConfig):
Optional. The configuration settings for the
Airflow web server App Engine instance.
airflow_uri (str):
Output only. The URI of the Apache Airflow Web UI hosted
within this environment (see `Airflow web
interface </composer/docs/how-to/accessing/airflow-web-interface>`__).
encryption_config (google.cloud.orchestration.airflow.service_v1beta1.types.EncryptionConfig):
Optional. The encryption options for the
Cloud Composer environment and its dependencies.
Cannot be updated.
maintenance_window (google.cloud.orchestration.airflow.service_v1beta1.types.MaintenanceWindow):
Optional. The maintenance window is the
period when Cloud Composer components may
undergo maintenance. It is defined so that
maintenance is not executed during peak hours or
critical time periods.
The system will not be under maintenance for
every occurrence of this window, but when
maintenance is planned, it will be scheduled
during the window.
The maintenance window period must encompass at
least 12 hours per week. This may be split into
multiple chunks, each with a size of at least 4
hours.
If this value is omitted, Cloud Composer
components may be subject to maintenance at any
time.
workloads_config (google.cloud.orchestration.airflow.service_v1beta1.types.WorkloadsConfig):
Optional. The workloads configuration settings for the GKE
cluster associated with the Cloud Composer environment. The
GKE cluster runs Airflow scheduler, web server and workers
workloads.
This field is supported for Cloud Composer environments in
versions composer-2.\ *.*-airflow-*.*.\* and newer.
environment_size (google.cloud.orchestration.airflow.service_v1beta1.types.EnvironmentConfig.EnvironmentSize):
Optional. The size of the Cloud Composer environment.
This field is supported for Cloud Composer environments in
versions composer-2.\ *.*-airflow-*.*.\* and newer.
"""
class EnvironmentSize(proto.Enum):
r"""The size of the Cloud Composer environment."""
ENVIRONMENT_SIZE_UNSPECIFIED = 0
ENVIRONMENT_SIZE_SMALL = 1
ENVIRONMENT_SIZE_MEDIUM = 2
ENVIRONMENT_SIZE_LARGE = 3
gke_cluster = proto.Field(
proto.STRING,
number=1,
)
dag_gcs_prefix = proto.Field(
proto.STRING,
number=2,
)
node_count = proto.Field(
proto.INT32,
number=3,
)
software_config = proto.Field(
proto.MESSAGE,
number=4,
message='SoftwareConfig',
)
node_config = proto.Field(
proto.MESSAGE,
number=5,
message='NodeConfig',
)
private_environment_config = proto.Field(
proto.MESSAGE,
number=7,
message='PrivateEnvironmentConfig',
)
web_server_network_access_control = proto.Field(
proto.MESSAGE,
number=9,
message='WebServerNetworkAccessControl',
)
database_config = proto.Field(
proto.MESSAGE,
number=10,
message='DatabaseConfig',
)
web_server_config = proto.Field(
proto.MESSAGE,
number=11,
message='WebServerConfig',
)
airflow_uri = proto.Field(
proto.STRING,
number=6,
)
encryption_config = proto.Field(
proto.MESSAGE,
number=12,
message='EncryptionConfig',
)
maintenance_window = proto.Field(
proto.MESSAGE,
number=13,
message='MaintenanceWindow',
)
workloads_config = proto.Field(
proto.MESSAGE,
number=15,
message='WorkloadsConfig',
)
environment_size = proto.Field(
proto.ENUM,
number=16,
enum=EnvironmentSize,
)
class WebServerNetworkAccessControl(proto.Message):
r"""Network-level access control policy for the Airflow web
server.
Attributes:
allowed_ip_ranges (Sequence[google.cloud.orchestration.airflow.service_v1beta1.types.WebServerNetworkAccessControl.AllowedIpRange]):
A collection of allowed IP ranges with
descriptions.
"""
class AllowedIpRange(proto.Message):
r"""Allowed IP range with user-provided description.
Attributes:
value (str):
IP address or range, defined using CIDR notation, of
requests that this rule applies to. Examples:
``192.168.1.1`` or ``192.168.0.0/16`` or ``2001:db8::/32``
or ``2001:0db8:0000:0042:0000:8a2e:0370:7334``.
IP range prefixes should be properly truncated. For example,
``1.2.3.4/24`` should be truncated to ``1.2.3.0/24``.
Similarly, for IPv6, ``2001:db8::1/32`` should be truncated
to ``2001:db8::/32``.
description (str):
Optional. User-provided description. It must
contain at most 300 characters.
"""
value = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=2,
)
allowed_ip_ranges = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=AllowedIpRange,
)
class SoftwareConfig(proto.Message):
r"""Specifies the selection and configuration of software inside
the environment.
Attributes:
image_version (str):
The version of the software running in the environment. This
encapsulates both the version of Cloud Composer
functionality and the version of Apache Airflow. It must
match the regular expression
``composer-([0-9]+\.[0-9]+\.[0-9]+|latest)-airflow-[0-9]+\.[0-9]+(\.[0-9]+.*)?``.
When used as input, the server also checks if the provided
version is supported and denies the request for an
unsupported version.
The Cloud Composer portion of the version is a `semantic
version <https://semver.org>`__ or ``latest``. When the
patch version is omitted, the current Cloud Composer patch
version is selected. When ``latest`` is provided instead of
an explicit version number, the server replaces ``latest``
with the current Cloud Composer version and stores that
version number in the same field.
The portion of the image version that follows *airflow-* is
an official Apache Airflow repository `release
name <https://github.com/apache/incubator-airflow/releases>`__.
See also `Version
List </composer/docs/concepts/versioning/composer-versions>`__.
airflow_config_overrides (Sequence[google.cloud.orchestration.airflow.service_v1beta1.types.SoftwareConfig.AirflowConfigOverridesEntry]):
Optional. Apache Airflow configuration properties to
override.
Property keys contain the section and property names,
separated by a hyphen, for example
"core-dags_are_paused_at_creation". Section names must not
contain hyphens ("-"), opening square brackets ("["), or
closing square brackets ("]"). The property name must not be
empty and must not contain an equals sign ("=") or semicolon
(";"). Section and property names must not contain a period
("."). Apache Airflow configuration property names must be
written in
`snake_case <https://en.wikipedia.org/wiki/Snake_case>`__.
Property values can contain any character, and can be
written in any lower/upper case format.
Certain Apache Airflow configuration property values are
`blocked </composer/docs/concepts/airflow-configurations>`__,
and cannot be overridden.
pypi_packages (Sequence[google.cloud.orchestration.airflow.service_v1beta1.types.SoftwareConfig.PypiPackagesEntry]):
Optional. Custom Python Package Index (PyPI) packages to be
installed in the environment.
Keys refer to the lowercase package name such as "numpy" and
values are the lowercase extras and version specifier such
as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2,
<1.9.2". To specify a package without pinning it to a
version specifier, use the empty string as the value.
env_variables (Sequence[google.cloud.orchestration.airflow.service_v1beta1.types.SoftwareConfig.EnvVariablesEntry]):
Optional. Additional environment variables to provide to the
Apache Airflow scheduler, worker, and webserver processes.
Environment variable names must match the regular expression
``[a-zA-Z_][a-zA-Z0-9_]*``. They cannot specify Apache
Airflow software configuration overrides (they cannot match
the regular expression ``AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+``),
and they cannot match any of the following reserved names:
- ``AIRFLOW_HOME``
- ``C_FORCE_ROOT``
- ``CONTAINER_NAME``
- ``DAGS_FOLDER``
- ``GCP_PROJECT``
- ``GCS_BUCKET``
- ``GKE_CLUSTER_NAME``
- ``SQL_DATABASE``
- ``SQL_INSTANCE``
- ``SQL_PASSWORD``
- ``SQL_PROJECT``
- ``SQL_REGION``
- ``SQL_USER``
python_version (str):
Optional. The major version of Python used to
run the Apache Airflow scheduler, worker, and
webserver processes.
Can be set to '2' or '3'. If not specified, the
default is '3'. Cannot be updated.
"""
image_version = proto.Field(
proto.STRING,
number=1,
)
airflow_config_overrides = proto.MapField(
proto.STRING,
proto.STRING,
number=2,
)
pypi_packages = proto.MapField(
proto.STRING,
proto.STRING,
number=3,
)
env_variables = proto.MapField(
proto.STRING,
proto.STRING,
number=4,
)
python_version = proto.Field(
proto.STRING,
number=6,
)
class IPAllocationPolicy(proto.Message):
r"""Configuration for controlling how IPs are allocated in the
GKE cluster.
Attributes:
use_ip_aliases (bool):
Optional. Whether or not to enable Alias IPs in the GKE
cluster. If ``true``, a VPC-native cluster is created.
cluster_secondary_range_name (str):
Optional. The name of the cluster's secondary range used to
allocate IP addresses to pods. Specify either
``cluster_secondary_range_name`` or
``cluster_ipv4_cidr_block`` but not both.
This field is applicable only when ``use_ip_aliases`` is
true.
services_secondary_range_name (str):
Optional. The name of the services' secondary range used to
allocate IP addresses to the cluster. Specify either
``services_secondary_range_name`` or
``services_ipv4_cidr_block`` but not both.
This field is applicable only when ``use_ip_aliases`` is
true.
cluster_ipv4_cidr_block (str):
Optional. The IP address range used to allocate IP addresses
to pods in the cluster.
This field is applicable only when ``use_ip_aliases`` is
true.
Set to blank to have GKE choose a range with the default
size.
Set to /netmask (e.g. ``/14``) to have GKE choose a range
with a specific netmask.
Set to a
`CIDR <http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing>`__
notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private
networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``,
``192.168.0.0/16``) to pick a specific range to use. Specify
``cluster_secondary_range_name`` or
``cluster_ipv4_cidr_block`` but not both.
services_ipv4_cidr_block (str):
Optional. The IP address range of the services IP addresses
in this cluster.
This field is applicable only when ``use_ip_aliases`` is
true.
Set to blank to have GKE choose a range with the default
size.
Set to /netmask (e.g. ``/14``) to have GKE choose a range
with a specific netmask.
Set to a
`CIDR <http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing>`__
notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private
networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``,
``192.168.0.0/16``) to pick a specific range to use. Specify
``services_secondary_range_name`` or
``services_ipv4_cidr_block`` but not both.
"""
use_ip_aliases = proto.Field(
proto.BOOL,
number=1,
)
cluster_secondary_range_name = proto.Field(
proto.STRING,
number=2,
)
services_secondary_range_name = proto.Field(
proto.STRING,
number=3,
)
cluster_ipv4_cidr_block = proto.Field(
proto.STRING,
number=4,
)
services_ipv4_cidr_block = proto.Field(
proto.STRING,
number=5,
)
class NodeConfig(proto.Message):
r"""The configuration information for the Kubernetes Engine | |
<filename>Appointment/utils/utils.py
# store some funcitons
import requests as requests
import json
from Appointment import *
import threading
from Appointment.models import Participant, Room, Appoint, CardCheckInfo # 数据库模型
from django.contrib.auth.models import User
from django.db import transaction # 原子化更改数据库
from django.db.models import Q
from datetime import datetime, timedelta
from django.http import JsonResponse
import os
import time
'''
YWolfeee:
utils.py中保存了一些比较杂碎的带有信息量的数据,例如对应表等。
微信消息的发送、日志的写入也在本文件中,使用前可以看一下代码的说明。
所有和scheduler无关、和互联网交互无关的工具函数,可以在本文件中实现。
'''
ip_room_dict = {
"152": "B104",
"155": "B104",
"132": "B106",
"153": "B106",
"131": "B107A",
"135": "B107B",
"134": "B108",
"151": "B108",
"146": "B111",
"149": "B111",
"141": "B112",
"148": "B112",
# "138": "B114", # 不准 自习室
"139": "B114",
"144": "B118",
"145": "B118",
"140": "B119",
"147": "B119",
"129": "B205",
"102": "B206",
"106": "B206",
"105": "B207",
"107": "B207",
"110": "B208",
"111": "B208",
"103": "B209",
"108": "B209",
"121": "B214",
"128": "B214", # 镜子 舞蹈室
"119": "B215",
"117": "B216",
"124": "B216", # 镜子 跑步机房
"122": "B217",
"126": "B217",
"113": "B218",
"120": "B220",
"130": "B220",
"112": "B221", # 琴房 看不到门口谱子位置
"123": "B221", # 琴房 看不到小提琴位
"118": "B222",
"125": "B222",
}
door_room_dict = {
"2020092016162884": "B104",
"2020092016370963": "B106A",
"2020092016422704": "B106B",
"2020092016464351": "B107A",
"2020092016550340": "B108A",
"2020092017010542": "B108B",
"2020092017070505": "B107B",
"2020092017084647": "B000", # 值班室
"2020092017233640": "B112A",
"2020092017234462": "B112B",
"2020092017235201": "B111",
"2020092017393941": "B114A",
"2020092017475922": "B114B",
"2020092017481264": "B118",
"2020092017482150": "B119",
"2020092018023538": "B218",
"2020092018030345": "B220",
"2020092018031303": "B221",
"2020092018032470": "B222",
"2020092018182960": "B214A",
"2020092018184631": "B214B",
"2020092018185928": "B216",
"2020092018201454": "B217",
"2020092018400410": "B209",
"2020092018521223": "B205",
"2020092018522586": "B206A",
"2020092018523750": "B206B",
"2020092018525770": "B208",
}
# 给定摄像头ip后三位,返回摄像头对应的Rid
def iptoroom(ip):
return ip_room_dict[ip]
# 给定房间门牌号id,返回对应的Rid
def doortoroom(door):
return door_room_dict[door]
############ modified by wxy ############
# 给企业微信发送消息
# update 0309:原来是返回状态码和错误信息,现在在这个函数中直接做错误处理,如果处理不了就写日志,不返回什么了
# update 0812:返回体信息发生了改变,以后只有errCode是权威的了,这里向下兼容了以前的版本
# 同时跳转链接也不再默认,需要手动提供,也进行了更新
send_message = requests.session()
def send_wechat_message(
stuid_list,
start_time,
room,
message_type,
major_student,
usage,
announcement,
num,
reason='',
url=None,
is_admin=None,
):
'''
stuid_list: Iter[sid] 学号列表,不是学生!
start_time: datetime | Any, 后者调用str方法
room: 将被调用str方法,所以可以不是实际的房间
major_student: str, 人名 不是学号!
'''
# --- modify by pht: 适当简化了代码 --- #
try:start_time = start_time.strftime("%Y-%m-%d %H:%M")
except:start_time = str(start_time)
room = str(room)
# 之后会呈现的信息只由以下的标题和两个列表拼接而成
title = '地下室预约提醒'
if is_admin is None:
is_admin = 'admin' in message_type # 决定标题呈现方式
appoint_info = []
show_time_and_place = True # 显示预约时间地点
show_main_student = True # 显示发起人
show_appoint_info = True # 显示预约人员信息,包括用途 人数
show_announcement = False # 显示提供给其他参与者的预约信息
extra_info = []
if message_type == 'admin':
title = '管理员通知'
show_time_and_place = False
show_appoint_info = False
extra_info = ['内容:' + reason]
elif message_type == 'new':
title = '您有一条新的预约'
show_announcement = True
elif message_type == 'start':
title = '您有一条预约即将在15分钟后开始'
show_announcement = True
elif message_type == 'new&start':
title = '您有一条新的预约并即将在15分钟内开始'
show_announcement = True
elif message_type == 'violated':
title = '您有一条新增的违约记录' # 原因
show_main_student = False
show_appoint_info = False
extra_info = ['原因:' + reason] # '当前信用分:'+str(credit)
elif message_type == 'cancel':
title = '您有一条预约被取消'
elif message_type.startswith('longterm'): # 发起一条长线预约
title = f'您有一条预约被长线化'
show_announcement = True
extra_info = ['详情:' + reason]
elif message_type == 'confirm_admin_w2c': # WAITING to CONFIRMED
title = '您有一条预约已确认完成'
show_main_student = False
elif message_type == 'confirm_admin_v2j': # VIOLATED to JUDGED
title = '您有一条违约的预约申诉成功'
show_main_student = False
elif message_type == 'violate_admin': # VIOLATED
title = '您有一条预约被判定违约'
show_main_student = False
extra_info = [
'常见违规原因包括:用途不实、盗用他人信息预约、将功能房用作其他用途等',
'请尽快取消同类预约,避免再次扣分',
'如有疑问请联系管理员或咨询反馈',
]
if reason:
extra_info = [reason] + extra_info
elif message_type == 'need_agree': # 需要签署协议
title = '您刷卡的房间需要签署协议'
show_main_student = False
show_appoint_info = False
extra_info = ['点击本消息即可快捷跳转到用户协议页面']
elif message_type == 'temp_appointment': # 临时预约
title = '您发起了一条临时预约'
elif message_type == 'temp_appointment_fail': # 临时预约失败
title = '您发起的临时预约失败'
show_main_student = False
show_appoint_info = False
extra_info = ['原因:' + reason]
else:
# todo: 记得测试一下!为什么之前出问题的log就找不到呢TAT
operation_writer(SYSTEM_LOG,
f'{start_time} {room} {message_type} ' + "出错,原因:unknown message_type", "utils.send_wechat_message",
"Problem")
return
try:
if is_admin:
title = f'【管理员操作】\n{title}<title>'
else:
title = title + '\n'
if show_time_and_place: # 目前所有信息都显示时间地点
appoint_info += [f'时间:{start_time}', f'地点:{room}']
if show_main_student:
appoint_info += [f'发起者:{major_student}']
if show_appoint_info:
appoint_info += ['用途:' + usage, f'人数:{num}']
if show_announcement and announcement:
appoint_info += ['预约通知:' + announcement]
message = title + '\n'.join(appoint_info + extra_info)
except Exception as e:
operation_writer(SYSTEM_LOG,
f"尝试整合信息时出错,原因:{e}", "utils.send_wechat_message",
"Problem")
# --- modify end(2021.9.1) --- #
secret = hash_wechat_coder.encode(message)
url = url if url is not None else '/admin-index.html'
if url.startswith('/'):
url = GLOBAL_INFO.this_url.rstrip('/') + '/' + url.lstrip('/')
post_data = {
'touser': stuid_list,
'toall': True,
'content': message,
'secret': secret,
'card': True,
'url': url,
'btntxt': '预约详情',
}
response = send_message.post(
GLOBAL_INFO.wechat_url, data=json.dumps(post_data))
# for _ in range(0, 3): # 重发3次
for _ in range(0, 1): # 不要重发,短时间重试没用,失败名单在内部重试--pht
if _:
time.sleep(1) # 改成最后一次不休眠--pht
if response.status_code != 200:
# 正常连接永远返回200状态码
# 只有能正常连接的时候才解析json数据,否则可能报错--pht
operation_writer(SYSTEM_LOG,
f'{start_time} {room} {message_type} '+
f"向微信发消息失败,原因:状态码{response.status_code}异常",
"utils.send_wechat_message",
"Problem")
continue
response = response.json()
if response['status'] == 200:
operation_writer(SYSTEM_LOG,
f'{start_time} {room} {message_type} '+
"向微信发消息成功", "utils.send_wechat_message",
"OK")
return
# else check the reason, send wechat message again
# 现在errMsg不再靠谱,改用errCode判断--pht
code = response['data'].get('errCode')
has_code = code is not None
retry_enabled = (
(200 <= code and code < 400 or str(code)[0] == '2') if
has_code else
('部分' in response['data']['errMsg']) # 部分或全部发送失败/部分发送失败
)
# 别重发了
retry_enabled = False
if retry_enabled:
if has_code and code != 206:
operation_writer(SYSTEM_LOG,
f'{start_time} {room} {message_type} '+
f"企业微信返回了异常的错误码:{code}",
"utils.send_wechat_message",
"Problem")
continue # 目前仅206代表部分失败,可以重发,不应该出现200或其他
stuid_list = [i[0] for i in response['data']['detail']]
post_data = {
'touser': stuid_list,
'toall': True,
'content': message,
'secret': secret,
'card': True,
'url': url,
'btntxt': '预约详情',
}
response = send_message.post(
GLOBAL_INFO.wechat_url, data=json.dumps(post_data)) # 这里为啥是''
else:
# 之前的判断冗余,返回值只需判断是否有重发价值,错误信息照搬errMsg即可
# 一般只可能是参数不规范(412),企业微信出现问题会有应用不可见(404)
err_msg = response['data']['errMsg']
if has_code:
err_msg = f'{code} ' + err_msg
operation_writer(SYSTEM_LOG,
f'{start_time} {room} {message_type} '+
f"向微信发消息失败,原因:{err_msg}",
"utils.send_wechat_message",
"Problem")
return
# 重发都失败了
operation_writer(SYSTEM_LOG,
f'{start_time} {room} {message_type} '+
"向微信发消息失败,原因:多次发送失败. 发起者为: " +
str(major_student), "utils.send_wechat_message",
"Problem")
return
# return 1, response['data']['errMsg']
# 线程锁,用于对数据库扣分操作时的排他性
lock = threading.RLock()
# 信用分扣除体系
real_credit_point = True # 如果为false 那么不把扣除信用分纳入范畴
def set_appoint_reason(input_appoint, reason):
'''预约的过程中检查迟到,先记录原因,并且进入到进行中状态,不一定扣分'''
try:
operation_succeed = False
with transaction.atomic():
appoints = Appoint.objects.select_for_update().filter(Aid=input_appoint.Aid)
if len(appoints) != 1:
raise AssertionError
for appoint in appoints:
if appoint.Astatus == Appoint.Status.APPOINTED:
appoint.Astatus = Appoint.Status.PROCESSING # 避免重复调用本函数
appoint.Areason = reason
appoint.save()
operation_succeed = True
# TODO: major_sid
major_sid = str(appoint.major_student.Sid_id)
aid = str(appoint.Aid)
areason = str(appoint.get_Areason_display())
if operation_succeed:
operation_writer(major_sid, f"预约{aid}出现违约:{areason}",
f"utils.set_appoint_reason{os.getpid()}", "OK")
return True, ""
except Exception as e:
return False, "in utils.set_appoint_reason: " + str(e)
def appoint_violate(input_appoint, reason): # 将一个aid设为违约 并根据real_credit_point设置
try:
#lock.acquire()
operation_succeed = False
with transaction.atomic():
appoints = Appoint.objects.select_related(
'major_student').select_for_update().filter(Aid=input_appoint.Aid)
if len(appoints) != 1:
raise AssertionError
for appoint in appoints: # 按照假设,这里的访问应该是原子的,所以第二个程序到这里会卡主
really_deduct = False
if real_credit_point and appoint.Astatus != Appoint.Status.VIOLATED: # 不出现负分;如果已经是violated了就不重复扣分了
if appoint.major_student.credit > 0: # 这个时候需要扣分
appoint.major_student.credit -= 1
really_deduct = True
appoint.Astatus = Appoint.Status.VIOLATED
appoint.Areason = reason
appoint.save()
appoint.major_student.save()
operation_succeed = True
# TODO: major_sid
major_sid = str(appoint.major_student.Sid_id)
astart = appoint.Astart
aroom = str(appoint.Room)
major_name = str(appoint.major_student.name)
usage = str(appoint.Ausage)
announce = str(appoint.Aannouncement)
number = str(appoint.Ayp_num+appoint.Anon_yp_num)
status = str(appoint.get_status())
aid = str(appoint.Aid)
areason = str(appoint.get_Areason_display())
credit = str(appoint.major_student.credit)
if operation_succeed: # 本任务执行成功
send_wechat_message([major_sid],
astart,
aroom,
"violated",
major_name,
usage,
announce,
number,
status,
#appoint.major_student.credit,
) # totest: only main_student
operation_writer(major_sid, f"预约{aid}出现违约:{areason}" +
f";扣除信用分:{really_deduct}" +
f";剩余信用分:{credit}",
f"utils.appoint_violate{os.getpid()}", "OK") # str(os.getpid()),str(threading.current_thread().name()))
#lock.release()
return True, ""
except Exception as e:
return False, "in utils.appoint_violate: " + str(e)
# 文件操作体系
log_root = "logstore"
if not os.path.exists(log_root):
os.mkdir(log_root)
log_root_path = os.path.join(os.getcwd(), log_root)
log_user = "user_detail"
if not os.path.exists(os.path.join(log_root_path, log_user)):
os.mkdir(os.path.join(log_root_path, log_user))
log_user_path = os.path.join(log_root_path, log_user)
# 每周定时删除预约的程序,用于减少系统内的预约数量
def write_before_delete(appoint_list):
date = str(datetime.now().date())
write_path = os.path.join(log_root_path, date+".log")
log = open(write_path, mode="a") # open file
period_start = (datetime.now()-timedelta(days=7)).date()
log.write(str(period_start) + "~" + str(date) + "\n")
for appoint in appoint_list:
if appoint.Astatus != Appoint.Status.CANCELED: # not delete
log.write(str(appoint.toJson()).encode(
"gbk", 'ignore').decode("gbk", "ignore"))
log.write("\n")
log.write("end of file\n")
log.close()
# 通用日志写入程序 写入时间(datetime.now()),操作主体(Sid),操作说明(Str),写入函数(Str)
# 参数说明:第一为Sid也是文件名,第二位消息,第三位来源的函数名(类别)
def operation_writer(user, message, source, status_code="OK")-> None:
lock.acquire()
try:
if isinstance(user, User):
user = user.username
timestamp = str(datetime.now())
source = str(source).ljust(30)
status = status_code.ljust(10)
message = f"{timestamp} {source}{status}: {message}\n"
with open(os.path.join(log_user_path, f"{str(user)}.log"), mode="a") as journal:
journal.write(message)
if status_code == "Error" and GLOBAL_INFO.debug_stuids:
send_wechat_message(
stuid_list=GLOBAL_INFO.debug_stuids,
start_time=datetime.now(),
room='地下室后台',
message_type="admin",
major_student="地下室系统",
usage="发生Error错误",
announcement="",
num=1,
reason=message,
# credit=appoint.major_student.credit,
)
except Exception as e:
# 最好是发送邮件通知存在问题
# 待补充
print(e)
lock.release()
def cardcheckinfo_writer(Participant, Room, real_status, should_status, message=None):
CardCheckInfo.objects.create(Cardroom=Room, Cardstudent=Participant,
CardStatus=real_status, ShouldOpenStatus=should_status, Message=message)
def check_temp_appoint(room):
return '研讨' in room.Rtitle
def get_conflict_appoints(appoint, times=1, interval=1, no_cross_day=False, lock=False):
'''获取以时间排序的冲突预约,可以加锁,但不负责开启事务'''
# 获取该房间的所有有效预约
activate_appoints = Appoint.objects.not_canceled().filter(Room=appoint.Room)
if lock:
activate_appoints = activate_appoints.select_for_update()
# 空的Q对象进行与和或运算的结果都是另一个操作数
conditions = Q()
if no_cross_day:
conditions &= Q(
# 开始比当前的结束时间早,结束比当前的开始时间晚
Astart__time__lt=appoint.Afinish.time(),
Afinish__time__gt=appoint.Astart.time(),
)
date_range = [
appoint.Astart.date() + timedelta(days=7 * week)
for week in range(0, times * interval, interval)
]
conditions &= Q(
Astart__date__in=date_range,
Afinish__date__in=date_range,
)
else:
for week in range(0, times * interval, interval):
conditions |= Q(
# 开始比当前的结束时间早
Astart__lt=appoint.Afinish + timedelta(days=7 * week),
# 结束比当前的开始时间晚
Afinish__gt=appoint.Astart + timedelta(days=7 * week),
)
conflict_appoints = activate_appoints.filter(conditions)
return conflict_appoints.order_by('Astart', | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 17 23:12:05 2021
@author: mlampert
"""
#Core imports
import os
import copy
#Importing and setting up the FLAP environment
import flap
import flap_nstx
flap_nstx.register()
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,"../flap_nstx.cfg")
flap.config.read(file_name=fn)
#Scientific library imports
from flap_nstx.tools import Polygon, FitEllipse
import cv2
import imutils
import matplotlib.pyplot as plt
from scipy import ndimage
import numpy as np
#import sys
#np.set_printoptions(threshold=sys.maxsize)
import scipy
from skimage.feature import peak_local_max
from skimage.filters import threshold_otsu
from skimage.segmentation import watershed, random_walker
def nstx_gpi_contour_structure_finder(data_object=None, #Name of the FLAP.data_object
exp_id='*', #Shot number (if data_object is not used)
time=None, #Time when the structures need to be evaluated (when exp_id is used)
sample=None, #Sample number where the structures need to be evaluated (when exp_id is used)
spatial=False, #Calculate the results in real spatial coordinates
pixel=False, #Calculate the results in pixel coordinates
mfilter_range=5, #Range of the median filter
nlevel=80//5, #The number of contours to be used for the calculation (default:ysize/mfilter_range=80//5)
levels=None, #Contour levels from an input and not from automatic calculation
threshold_level=None, #Threshold level over which it is considered to be a structure
#if set, the value is subtracted from the data and contours are found after that.
#Negative values are substituted with 0.
filter_struct=True, #Filter out the structures with less than filter_level number of contours
filter_level=None, #The number of contours threshold for structures filtering (default:nlevel//4)
remove_interlaced_structures=False, #Filter out the structures which are interlaced. Only the largest structures is preserved, others are removed.
test_result=False, #Test the result only (plot the contour and the found structures)
test=False, #Test the contours and the structures before any kind of processing
save_data_for_publication=False,
):
"""
The method calculates the radial and poloidal sizes of the structures
present in one from of the GPI image. It gathers the isosurface contour
coordinates and determines the structures based on certain criteria. In
principle no user input is necessary, the code provides a robust solution.
The sizes are determined by fitting an ellipse onto the contour at
half-height. The code returns the following list:
a[structure_index]={'Paths': [list of the paths, type: matplotlib.path.Path],
'Half path': [path at the half level of the structure]
'Levels': [levels of the paths, type: list],
'Center': [center of the ellipse in px,py or R,z coordinates, type: numpy.ndarray of two elements],
'Size': [size of the ellipse in x and y direction or R,z direction, type: ]numpy.ndarray of two elements,
'Angle': [angle of the ellipse compared to horizontal in radians, type: numpy.float64],
'Area': [area of the polygon at the half level],
('Ellipse': [the entire ellipse object, returned if test_result is True, type: flap_nstx.tools.FitEllipse])
}
"""
"""
----------------
READING THE DATA
----------------
"""
if type(data_object) is str:
data_object=flap.get_data_object_ref(data_object, exp_id=exp_id)
if len(data_object.data.shape) != 2:
raise IOError('The inpud data_object is not 2D. The method only processes 2D data.')
if data_object is None:
if (exp_id is None) or ((time is None) and (sample is None)):
raise IOError('exp_id and time needs to be set if data_object is not set.')
try:
data_object=flap.get_data_object_ref('GPI', exp_id=exp_id)
except:
print('---- Reading GPI data ----')
data_object=flap.get_data('NSTX_GPI', exp_id=exp_id, name='', object_name='GPI')
if (time is not None) and (sample is not None):
raise IOError('Either time or sample can be set, not both.')
if time is not None:
data_object=data_object.slice_data(slicing={'Time':time})
if sample is not None:
data_object=data_object.slice_data(slicing={'Sample':sample})
try:
data_object.data
except:
raise IOError('The input data object should be a flap.DataObject')
if len(data_object.data.shape) != 2:
raise TypeError('The frame dataobject needs to be a 2D object without a time coordinate.')
if pixel:
x_coord_name='Image x'
y_coord_name='Image y'
if spatial:
x_coord_name='Device R'
y_coord_name='Device z'
x_coord=data_object.coordinate(x_coord_name)[0]
y_coord=data_object.coordinate(y_coord_name)[0]
"""
----------------
READING THE DATA
----------------
"""
data = scipy.ndimage.median_filter(data_object.data, mfilter_range)
if test:
plt.cla()
if threshold_level is not None:
if data.max() < threshold_level:
print('The maximum of the signal doesn\'t reach the threshold level.')
return None
data_thres = data - threshold_level
data_thres[np.where(data_thres < 0)] = 0.
if levels is None:
levels=np.arange(nlevel)/(nlevel-1)*(data_thres.max()-data_thres.min())+data_thres.min()
else:
nlevel=len(levels)
try:
structure_contours=plt.contourf(x_coord, y_coord, data_thres, levels=levels)
except:
plt.cla()
plt.close()
print('Failed to create the contours for the structures.')
return None
if not test or test_result:
plt.cla()
structures=[]
one_structure={'Paths':[None],
'Levels':[None]}
if test:
print('Plotting levels')
else:
plt.close()
#The following lines are the core of the code. It separates the structures
#from each other and stores the in the structure list.
"""
Steps of the algorithm:
1st step: Take the paths at the highest level and store them. These
create the initial structures
2nd step: Take the paths at the second highest level
2.1 step: if either of the previous paths contain either of
the paths at this level, the corresponding
path is appended to the contained structure from the
previous step.
2.2 step: if none of the previous structures contain the contour
at this level, a new structure is created.
3rd step: Repeat the second step until it runs out of levels.
4th step: Delete those structures from the list which doesn't have
enough paths to be called a structure.
(Note: a path is a matplotlib path, a structure is a processed path)
"""
for i_lev in range(len(structure_contours.collections)-1,-1,-1):
cur_lev_paths=structure_contours.collections[i_lev].get_paths()
n_paths_cur_lev=len(cur_lev_paths)
if len(cur_lev_paths) > 0:
if len(structures) == 0:
for i_str in range(n_paths_cur_lev):
structures.append(copy.deepcopy(one_structure))
structures[i_str]['Paths'][0]=cur_lev_paths[i_str]
structures[i_str]['Levels'][0]=levels[i_lev]
else:
for i_cur in range(n_paths_cur_lev):
new_path=True
cur_path=cur_lev_paths[i_cur]
for j_prev in range(len(structures)):
if cur_path.contains_path(structures[j_prev]['Paths'][-1]):
structures[j_prev]['Paths'].append(cur_path)
structures[j_prev]['Levels'].append(levels[i_lev])
new_path=False
if new_path:
structures.append(copy.deepcopy(one_structure))
structures[-1]['Paths'][0]=cur_path
structures[-1]['Levels'][0]=levels[i_lev]
if test:
x=cur_lev_paths[i_cur].to_polygons()[0][:,0]
y=cur_lev_paths[i_cur].to_polygons()[0][:,1]
plt.plot(x,y)
plt.axis('equal')
plt.pause(0.001)
#Cut the structures based on the filter level
if filter_level is None:
filter_level=nlevel//5
if filter_struct:
cut_structures=[]
for i_str in range(len(structures)):
if len(structures[i_str]['Levels']) > filter_level:
cut_structures.append(structures[i_str])
structures=cut_structures
if test:
print('Plotting structures')
plt.cla()
plt.set_aspect(1.0)
for struct in structures:
plt.contourf(x_coord, y_coord, data, levels=levels)
for path in struct['Paths']:
x=path.to_polygons()[0][:,0]
y=path.to_polygons()[0][:,1]
plt.plot(x,y)
plt.pause(0.001)
plt.cla()
#plt.axis('equal')
plt.set_aspect(1.0)
plt.contourf(x_coord, y_coord, data, levels=levels)
plt.colorbar()
#Finding the contour at the half level for each structure and
#calculating its properties
if len(structures) > 1:
#Finding the paths at FWHM
paths_at_half=[]
for i_str in range(len(structures)):
half_level=(structures[i_str]['Levels'][-1]+structures[i_str]['Levels'][0])/2.
ind_at_half=np.argmin(np.abs(structures[i_str]['Levels']-half_level))
paths_at_half.append(structures[i_str]['Paths'][ind_at_half])
#Process the structures which are embedded (cut the inner one)
if remove_interlaced_structures:
structures_to_be_removed=[]
for ind_path1 in range(len(paths_at_half)):
for ind_path2 in range(len(paths_at_half)):
if ind_path1 != ind_path2:
if paths_at_half[ind_path2].contains_path(paths_at_half[ind_path1]):
structures_to_be_removed.append(ind_path1)
structures_to_be_removed=np.unique(structures_to_be_removed)
cut_structures=[]
for i_str in range(len(structures)):
if i_str not in structures_to_be_removed:
cut_structures.append(structures[i_str])
structures=cut_structures
#Calculate the ellipse and its properties for the half level contours
for i_str in range(len(structures)):
str_levels=structures[i_str]['Levels']
half_level=(str_levels[-1]+str_levels[0])/2.
ind_at_half=np.argmin(np.abs(str_levels-half_level))
n_path=len(structures[i_str]['Levels'])
polygon_areas=np.zeros(n_path)
polygon_centroids=np.zeros([n_path,2])
polygon_intensities=np.zeros(n_path)
for i_path in range(n_path):
polygon=structures[i_str]['Paths'][i_path].to_polygons()
if polygon != []:
polygon=polygon[0]
polygon_areas[i_path]=flap_nstx.tools.Polygon(polygon[:,0],polygon[:,1]).area
polygon_centroids[i_path,:]=flap_nstx.tools.Polygon(polygon[:,0],polygon[:,1]).centroid
if i_path == 0:
polygon_intensities[i_path]=polygon_areas[i_path]*str_levels[i_path]
else:
polygon_intensities[i_path]=(polygon_areas[i_path]-polygon_areas[i_path-1])*str_levels[i_path]
intensity=np.sum(polygon_intensities)
center_of_gravity=np.asarray([np.sum(polygon_intensities*polygon_centroids[:,0])/np.sum(polygon_intensities),
np.sum(polygon_intensities*polygon_centroids[:,1])/np.sum(polygon_intensities)])
half_coords=structures[i_str]['Paths'][ind_at_half].to_polygons()[0]
half_polygon=flap_nstx.tools.Polygon(half_coords[:,0],half_coords[:,1])
structures[i_str]['Half path']=structures[i_str]['Paths'][ind_at_half]
structures[i_str]['Half level']=half_level
structures[i_str]['Centroid']=half_polygon.centroid
structures[i_str]['Area']=half_polygon.area
structures[i_str]['Intensity']=intensity
structures[i_str]['Center of gravity']=center_of_gravity
try:
ellipse=flap_nstx.tools.FitEllipse(half_coords[:,0],half_coords[:,1])
structures[i_str]['Center']=ellipse.center
size=ellipse.size
structures[i_str]['Size']=size
structures[i_str]['Angle']=ellipse.angle_of_rotation
structures[i_str]['Elongation']=(size[0]-size[1])/(size[0]+size[1])
if test_result or test:
structures[i_str]['Ellipse']=ellipse
else:
structures[i_str]['Ellipse']=None
except:
print('Ellipse fitting failed.')
structures[i_str]['Center']=None
structures[i_str]['Size']=None
structures[i_str]['Angle']=None
structures[i_str]['Elongation']=None
structures[i_str]['Ellipse']=None
fitted_structures=[]
for i_str in range(len(structures)):
if structures[i_str]['Size'] is not None:
fitted_structures.append(structures[i_str])
structures=fitted_structures
if test_result:
#fig,ax=plt.subplots(figsize=(8.5/2.54, 8.5/2.54/1.62))
fig,ax=plt.subplots(figsize=(10,10))
ax.set_aspect(1.0)
plt.contourf(x_coord, y_coord, data)
plt.colorbar()
if len(structures) > 0:
#Parametric reproduction of the Ellipse
R=np.arange(0,2*np.pi,0.01)
for i_structure in range(len(structures)):
structure=structures[i_structure]
if structure['Half path'] is not None:
phi=structure['Angle']
a,b=structure['Ellipse'].axes_length
x=structure['Half path'].to_polygons()[0][:,0]
y=structure['Half path'].to_polygons()[0][:,1]
xx = structure['Center'][0] + \
a*np.cos(R)*np.cos(phi) - \
b*np.sin(R)*np.sin(phi)
yy = structure['Center'][1] + \
a*np.cos(R)*np.sin(phi) + \
b*np.sin(R)*np.cos(phi)
plt.plot(x,y) #Plot the half path polygon
plt.plot(xx,yy) #Plot the ellipse
plt.scatter(structure['Centroid'][0],
structure['Centroid'][1], color='yellow')
plt.scatter(structure['Center of gravity'][0],
structure['Center of gravity'][1], color='red')
if save_data_for_publication:
exp_id=data_object.exp_id
time=data_object.coordinate('Time')[0][0,0]
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
filename=wd+'/'+str(exp_id)+'_'+str(time)+'_half_path_no.'+str(i_structure)+'.txt'
file1=open(filename, 'w+')
for i in range(len(x)):
file1.write(str(x[i])+'\t'+str(y[i])+'\n')
file1.close()
filename=wd+'/'+str(exp_id)+'_'+str(time)+'_fit_ellipse_no.'+str(i_structure)+'.txt'
file1=open(filename, 'w+')
for i in range(len(xx)):
file1.write(str(xx[i])+'\t'+str(yy[i])+'\n')
file1.close()
plt.xlabel('Image x')
plt.ylabel('Image y')
plt.title(str(exp_id)+' @ '+str(data_object.coordinate('Time')[0][0,0]))
plt.show()
plt.pause(0.001)
if save_data_for_publication:
exp_id=data_object.exp_id
time=data_object.coordinate('Time')[0][0,0]
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
filename=wd+'/'+str(exp_id)+'_'+str(time)+'_raw_data.txt'
file1=open(filename, 'w+')
for i in range(len(data[0,:])):
string=''
for j in range(len(data[:,0])):
string+=str(data[j,i])+'\t'
string+='\n'
file1.write(string)
file1.close()
return structures
def nstx_gpi_watershed_structure_finder(data_object=None, #Name of the FLAP.data_object
exp_id='*', #Shot number (if data_object is not used)
time=None, #Time when the structures need to be evaluated (when exp_id is used)
sample=None, #Sample number where the structures need to be evaluated (when exp_id is used)
spatial=False, #Calculate the results in real spatial coordinates
pixel=False, #Calculate the results | |
import math
from numpy import imag
from numpy import real
from numpy import roots
class motion:
"""
Класс профиля движения с ограничением рывка
"""
def __init__(self):
pass
def compute(self, p0, pf, v0, vf, V, A, D, J, t0=0.0):
"""
Вычисление траектории по указанным параметрам
p0 - начальное положение;
pf - конечное положение;
v0 - начальная скорость;
vf - конечная скорость;
V - максимальная допустимая скорость;
A - максимальное допустимое ускорение разгона;
D - максимальное допустимое ускорение торможения;
J - максимальный допустимый рывок
"""
# Запись начальных параметров
self.p0 = p0
self.pf = pf
self.v0 = v0
self.vf = vf
self.V = V
self.A = A
self.D = D
self.J = J
self.t0 = t0
self.AFP = self.isAFP()
# Запись внутренних параметров, переход от DFP к AFP при необходимости
self._p = [None] * 8
self._v = [None] * 8
self._t = [None] * 8
if self.AFP:
self._V = V
self._A = A
self._D = D
self._J = J
self._p[0] = p0
self._p[7] = pf
self._v[0] = v0
self._v[7] = vf
else:
self._V = V
self._A = D
self._D = A
self._J = J
self._p[0] = -p0
self._p[7] = -pf
self._v[0] = -v0
self._v[7] = -vf
self._t[0] = t0
# Рассчет периодов ускорения, торможения и постоянной скорости профиля
(x, vp) = self.evaluatePeriods()
# Изменения ограничений ускорения при необходимости
# if vp - v0 > 0.25*J*pow(x[0],2):
if self._A > J * x[0] / 2:
self._A = 0.5 * J * x[0]
# if vp - vf > 0.25*J*pow(x[2],2):
if self._D > J * x[2] / 2:
self._D = 0.5 * J * x[2]
# Рассчет временных участков профиля
self._T = [0.0]
self._T.append(self._A / self._J)
self._T.append(x[0] - 2 * self._T[1])
self._T.append(self._T[1])
self._T.append(x[1])
self._T.append(self._D / self._J)
self._T.append(x[2] - 2 * self._T[5])
self._T.append(self._T[5])
for i in range(1, 8):
self._t[i] = self._t[i - 1] + self._T[i]
self.T = self._t[7] - self._t[0]
# Рассчет границ участков профиля
self.calcIntervalBorderConditions()
def isAFP(self):
"""
Возвращает True, если период разгона идет первым.
False - если период торможения.
"""
v0 = self.v0
vf = self.vf
L = self.pf - self.p0
if v0 <= vf:
if vf - v0 <= pow(self.A, 2) / self.J:
Tm = 2 * math.sqrt((vf - v0) / self.J)
else:
Tm = (vf - v0) / self.A + self.A / self.J
else:
if v0 - vf <= pow(self.D, 2) / self.J:
Tm = 2 * math.sqrt((v0 - vf) / self.J)
else:
Tm = (v0 - vf) / self.D + self.D / self.J
Lm = (v0 + vf) / 2 * Tm
return (v0 <= vf and Lm <= L) or (v0 > vf and Lm < L)
def position(self, time):
"""
Возвращает положение профиля в момент времени time
Выполняет расчет исходя из того, на каком участке профиля
находится момент времени time.
"""
if time < self._t[0]:
p = self._p[0]
elif time < self._t[1]:
p = self._p[0] + self._v[0] * (time - self._t[0]) + \
self._J * pow(time - self._t[0], 3) / 6
elif time < self._t[2]:
p = self._p[1] + self._v[1] * (time - self._t[1]) + \
0.5 * self._A * pow(time - self._t[1], 2)
elif time < self._t[3]:
p = self._p[2] + self._v[2] * (time - self._t[2]) + \
0.5 * self._A * pow(time - self._t[2], 2) - \
self._J * pow(time - self._t[2], 3) / 6
elif time < self._t[4]:
p = self._p[3] + self._v[3] * (time - self._t[3])
elif time < self._t[5]:
p = self._p[4] + self._v[4] * (time - self._t[4]) - \
self._J * pow(time - self._t[4], 3) / 6
elif time < self._t[6]:
p = self._p[5] + self._v[5] * (time - self._t[5]) - \
0.5 * self._D * pow(time - self._t[5], 2)
elif time < self._t[7]:
p = self._p[6] + self._v[6] * (time - self._t[6]) - \
0.5 * self._D * pow(time - self._t[6], 2) + \
self._J * pow(time - self._t[6], 3) / 6
else:
p = self._p[7] + self._v[7] * (time - self._t[7])
if self.AFP:
return p
else:
return -p
def velocity(self, time):
"""
Возвращает скорость профиля в момент времени time
Выполняет расчет исходя из того, на каком участке профиля
находится момент времени time.
"""
if time < self._t[0]:
v = self._v[0]
elif time < self._t[1]:
v = self._v[0] + 0.5 * self._J * pow(time - self._t[0], 2)
elif time < self._t[2]:
v = self._v[1] + self._A * (time - self._t[1])
elif time < self._t[3]:
v = self._v[2] + self._A * (time - self._t[2]) \
- 0.5 * self._J * pow(time - self._t[2], 2)
elif time < self._t[4]:
v = self._v[3]
elif time < self._t[5]:
v = self._v[4] - 0.5 * self._J * pow(time - self._t[4], 2)
elif time < self._t[6]:
v = self._v[5] - self._D * (time - self._t[5])
elif time < self._t[7]:
v = self._v[6] - self._D * (time - self._t[6]) \
+ 0.5 * self._J * pow(time - self._t[6], 2)
else:
v = self._v[7]
if self.AFP:
return v
else:
return -v
def acceleration(self, time):
"""
Возвращает ускорение профиля в момент времени time.
Выполняет расчет исходя из того, на каком участке профиля
находится момент времени time.
"""
if time < self._t[0]:
a = 0.0
elif time < self._t[1]:
a = self._J * (time - self._t[0])
elif time < self._t[2]:
a = self._A
elif time < self._t[3]:
a = self._A - self._J * (time - self._t[2])
elif time < self._t[4]:
a = 0.0
elif time < self._t[5]:
a = -self._J * (time - self._t[4])
elif time < self._t[6]:
a = -self._D
elif time < self._t[7]:
a = -self._D + self._J * (time - self._t[6])
else:
a = 0.0
if self.AFP:
return a
else:
return -a
def calcIntervalBorderConditions(self):
"""
Внутренняя функция. Вычисляет значения скорости
и положения на границах временных участков профиля.
"""
T = self._T
self._v[1] = self._v[0] + 0.5 * self._J * pow(T[1], 2)
self._v[2] = self._v[1] + self._A * T[2]
self._v[3] = self._v[2] + self._A * T[3] - 0.5 * self._J * pow(T[3], 2)
self._v[4] = self._v[3]
self._v[5] = self._v[4] - 0.5 * self._J * pow(T[5], 2)
self._v[6] = self._v[5] - self._D * T[6]
self._p[1] = self._p[0] + self._v[0] * T[1] + self._J * pow(T[1], 3) / 6
self._p[2] = self._p[1] + self._v[1] * T[2] + 0.5 * self._A * pow(T[2], 2)
self._p[3] = self._p[2] + self._v[2] * T[3] + 0.5 * self._A * pow(T[3], 2) \
- self._J * pow(T[3], 3) / 6
self._p[4] = self._p[3] + self._v[3] * T[4]
self._p[5] = self._p[4] + self._v[4] * T[5] - self._J * pow(T[5], 3) / 6
self._p[6] = self._p[5] + self._v[5] * T[6] - 0.5 * self._D * pow(T[6], 2)
def evaluatePeriods(self):
"""
Внутренняя функция. Определяет периоды профиля.
Подряд проверяются все условия, пока не будет найдено подходящее.
Возвращает кортеж вида ((xa,xc,xd),vp),
xa - период разгона
xc - период постоянной скорости
xd - период торможения
vp - пиковая скорость
"""
V = self._V
A = self._A
D = self._D
J = self._J
v0 = self._v[0]
vf = self._v[7]
L = self._p[7] - self._p[0]
# Варианты без участка с постоянной скоростью
def caseNoCVelCAccCDec():
# Есть участки с постоянным разгоном и торможением, нет постоянной скорости
a = A * (A / D + 1)
b = 1 / (J * D) * (A + D) * (A * D - 2 * pow(A, 2) + 2 * v0 * J)
c = -2 * L - 1 / D * (v0 + vf - pow(A, 2) / J) * (vf - v0 + (pow(A, 2) - pow(D, 2)) / J)
x = roots([a, b, c])
for xi in x:
if imag(xi) == 0 and xi >= 2 * A / J:
xa = real(xi)
xd = (v0 - vf - pow(A, 2) / J + pow(D, 2) / J + A * xa) / D
if xd >= 2 * D / J:
vp = v0 - pow(A, 2) / J + A * xa
return ((xa, 0.0, xd), | |
(overlay_set_size, reg0, pos1),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_show_players_1", s0, tf_scrollable_style_2),
(position_set_x, pos1, 285),
(position_set_y, pos1, 125),
(overlay_set_position, "$g_presentation_obj_show_players_1", pos1),
(position_set_x, pos1, 405),
(position_set_y, pos1, 500),
(overlay_set_area_size, "$g_presentation_obj_show_players_1", pos1),
(set_container_overlay, "$g_presentation_obj_show_players_1"),
#(assign, ":cur_y", 450),
(multiplayer_get_my_player, ":my_player_no"),
(assign, ":cur_y", 10),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"), #0 is server no need to write it
(player_is_active, ":player_no"),
(assign, ":continue", 0),
(try_begin),
(neq, "$g_multiplayer_players_list_action_type", 5),
(neq, "$g_multiplayer_players_list_action_type", 6),
(assign, ":continue", 1),
(else_try),
(eq, "$g_multiplayer_players_list_action_type", 5),
(neq, ":player_no", ":my_player_no"),
(player_get_is_muted, ":is_muted", ":player_no"),
(eq, ":is_muted", 0),
(assign, ":continue", 1),
(else_try),
(eq, "$g_multiplayer_players_list_action_type", 6),
(neq, ":player_no", ":my_player_no"),
(player_get_is_muted, ":is_muted", ":player_no"),
(eq, ":is_muted", 1),
(assign, ":continue", 1),
(try_end),
(eq, ":continue", 1),
(val_add, ":cur_y", escape_menu_item_height),
(try_end),
(create_text_overlay, reg0, "str_choose_a_player", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"), #0 is server no need to write it
(player_is_active, ":player_no"),
(player_set_slot, ":player_no", slot_player_button_index, -1),
(assign, ":continue", 0),
(try_begin),
(neq, "$g_multiplayer_players_list_action_type", 5),
(neq, "$g_multiplayer_players_list_action_type", 6),
(assign, ":continue", 1),
(else_try),
(eq, "$g_multiplayer_players_list_action_type", 5),
(neq, ":player_no", ":my_player_no"),
(player_get_is_muted, ":is_muted", ":player_no"),
(eq, ":is_muted", 0),
(assign, ":continue", 1),
(else_try),
(eq, "$g_multiplayer_players_list_action_type", 6),
(neq, ":player_no", ":my_player_no"),
(player_get_is_muted, ":is_muted", ":player_no"),
(eq, ":is_muted", 1),
(assign, ":continue", 1),
(try_end),
(eq, ":continue", 1),
(str_store_player_username, s0, ":player_no"),
(create_button_overlay, ":overlay_id", s0, 0),
(overlay_set_color, ":overlay_id", 0xFFFFFF),
(position_set_x, pos1, 130),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, ":overlay_id", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(player_set_slot, ":player_no", slot_player_button_index, ":overlay_id"),
(try_end),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 1, ":num_players"), #0 is server no need to write it
(player_is_active, ":player_no"),
(player_slot_eq, ":player_no", slot_player_button_index, ":object"),
(try_begin),
(is_between, "$g_multiplayer_players_list_action_type", 1, 3), #poll kick or poll ban
(try_begin),
(multiplayer_get_my_player, ":my_player_no"),
(ge, ":my_player_no", 0),
(multiplayer_send_2_int_to_server, multiplayer_event_start_new_poll, "$g_multiplayer_players_list_action_type", ":player_no"),
(store_mission_timer_a, ":mission_timer"),
(val_add, ":mission_timer", multiplayer_poll_disable_period),
(player_set_slot, ":my_player_no", slot_player_poll_disabled_until_time, ":mission_timer"),
(try_end),
(else_try),
(eq, "$g_multiplayer_players_list_action_type", 3), #admin kick
(multiplayer_send_int_to_server, multiplayer_event_admin_kick_player, ":player_no"),
(else_try),
(eq, "$g_multiplayer_players_list_action_type", 4), #admin ban
(multiplayer_send_int_to_server, multiplayer_event_admin_ban_player, ":player_no"),
(else_try),
(eq, "$g_multiplayer_players_list_action_type", 5), #mute player
(player_set_is_muted, ":player_no", 1),
(else_try),
(eq, "$g_multiplayer_players_list_action_type", 6), #unmute player
(player_set_is_muted, ":player_no", 0),
(try_end),
(assign, ":num_players", 0), #break
(presentation_set_duration, 0),
(try_end),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_escape),
(key_clicked, key_xbox_start),
(gt, ":cur_time", 200),
(presentation_set_duration, 0),
(try_end),
]),
]),
("multiplayer_show_maps_list", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_mp_ingame_menu"),
(position_set_x, pos1, 250),
(position_set_y, pos1, 80),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_show_maps_list_menu_container", s0, tf_scrollable_style_2),
(position_set_x, pos1, 285),
(position_set_y, pos1, 125),
(overlay_set_position, "$g_presentation_obj_show_maps_list_menu_container", pos1),
(position_set_x, pos1, 405),
(position_set_y, pos1, 500),
(overlay_set_area_size, "$g_presentation_obj_show_maps_list_menu_container", pos1),
(set_container_overlay, "$g_presentation_obj_show_maps_list_menu_container"),
(call_script, "script_multiplayer_fill_map_game_types", "$g_multiplayer_game_type"),
(assign, ":num_maps", reg0),
(store_mul, ":cur_y", ":num_maps", escape_menu_item_height),
(val_add, ":cur_y", 10),
(create_text_overlay, reg0, "str_choose_a_map", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(assign, ":overlay_id", -1),
(try_for_range, ":i_map", 0, ":num_maps"),
(store_add, ":map_slot", ":i_map", multi_data_maps_for_game_type_begin),
(troop_get_slot, ":map_no", "trp_multiplayer_data", ":map_slot"),
(store_sub, ":string_index", ":map_no", multiplayer_scenes_begin),
(val_add, ":string_index", multiplayer_scene_names_begin),
(str_store_string, s0, ":string_index"),
(create_button_overlay, ":overlay_id", s0, 0),
(overlay_set_color, ":overlay_id", 0xFFFFFF),
(position_set_x, pos1, 100),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, ":overlay_id", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(store_add, "$g_show_maps_list_button_list_end_index", ":overlay_id", 1),
(store_sub, "$g_show_maps_list_button_list_first_index", "$g_show_maps_list_button_list_end_index", ":num_maps"),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(try_for_range, ":i_button", "$g_show_maps_list_button_list_first_index", "$g_show_maps_list_button_list_end_index"),
(eq, ":object", ":i_button"),
(call_script, "script_multiplayer_fill_map_game_types", "$g_multiplayer_game_type"),
(store_sub, ":map_slot", ":object", "$g_show_maps_list_button_list_first_index"),
(val_add, ":map_slot", multi_data_maps_for_game_type_begin),
(troop_get_slot, ":scene_id", "trp_multiplayer_data", ":map_slot"),
(presentation_set_duration, 0),
(try_begin),
(eq, "$g_multiplayer_maps_list_action_type", 1), #vote for map
(try_begin),
(multiplayer_get_my_player, ":my_player_no"),
(ge, ":my_player_no", 0),
(multiplayer_send_2_int_to_server, multiplayer_event_start_new_poll, 0, ":scene_id"),
(store_mission_timer_a, ":mission_timer"),
(val_add, ":mission_timer", multiplayer_poll_disable_period),
(player_set_slot, ":my_player_no", slot_player_poll_disabled_until_time, ":mission_timer"),
(try_end),
(else_try), #vote for map and factions
(assign, "$g_multiplayer_factions_list_action_type", 1), #for team 1
(assign, "$g_multiplayer_poll_for_map_and_faction_data_map", ":scene_id"),
(start_presentation, "prsnt_multiplayer_show_factions_list"),
(try_end),
(assign, "$g_show_maps_list_button_list_end_index", 0), #break;
(try_end),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_escape),
(key_clicked, key_xbox_start),
(gt, ":cur_time", 200),
(presentation_set_duration, 0),
(try_end),
]),
]),
("multiplayer_show_factions_list", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_mp_ingame_menu"),
(position_set_x, pos1, 250),
(position_set_y, pos1, 80),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_show_factions_list_menu_container", s0, tf_scrollable_style_2),
(position_set_x, pos1, 285),
(position_set_y, pos1, 125),
(overlay_set_position, "$g_presentation_obj_show_factions_list_menu_container", pos1),
(position_set_x, pos1, 405),
(position_set_y, pos1, 500),
(overlay_set_area_size, "$g_presentation_obj_show_factions_list_menu_container", pos1),
(set_container_overlay, "$g_presentation_obj_show_factions_list_menu_container"),
(store_sub, ":num_factions", multiplayer_factions_end, multiplayer_factions_begin),
(try_begin),
(eq, "$g_multiplayer_factions_list_action_type", 2),
(val_sub, ":num_factions", 1),
(try_end),
(store_mul, ":cur_y", ":num_factions", escape_menu_item_height),
(val_add, ":cur_y", 10),
(assign, reg0, "$g_multiplayer_factions_list_action_type"),
(create_text_overlay, reg0, "str_choose_a_faction_for_team_reg0", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(assign, ":overlay_id", -1),
(try_for_range, ":i_faction", multiplayer_factions_begin, multiplayer_factions_end),
(this_or_next|eq, "$g_multiplayer_factions_list_action_type", 1),
(neq, "$g_multiplayer_poll_for_map_and_faction_data_faction_1", ":i_faction"),
(str_store_faction_name, s0, ":i_faction"),
(create_button_overlay, ":overlay_id", s0, 0),
(overlay_set_color, ":overlay_id", 0xFFFFFF),
(position_set_x, pos1, 100),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, ":overlay_id", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(store_add, "$g_show_factions_list_button_list_end_index", ":overlay_id", 1),
(store_sub, "$g_show_factions_list_button_list_first_index", "$g_show_factions_list_button_list_end_index", ":num_factions"),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(try_for_range, ":i_button", "$g_show_factions_list_button_list_first_index", "$g_show_factions_list_button_list_end_index"),
(eq, ":object", ":i_button"),
(store_sub, ":faction_no", ":object", "$g_show_factions_list_button_list_first_index"),
(val_add, ":faction_no", multiplayer_factions_begin),
(presentation_set_duration, 0),
(try_begin),
(eq, "$g_multiplayer_factions_list_action_type", 2), #vote for second team
(try_begin),
(ge, ":faction_no", "$g_multiplayer_poll_for_map_and_faction_data_faction_1"),
(val_add, ":faction_no", 1),
(try_end),
(try_begin),
(multiplayer_get_my_player, ":my_player_no"),
(ge, ":my_player_no", 0),
(multiplayer_send_4_int_to_server, multiplayer_event_start_new_poll, 3, "$g_multiplayer_poll_for_map_and_faction_data_map", "$g_multiplayer_poll_for_map_and_faction_data_faction_1", ":faction_no"),
(store_mission_timer_a, ":mission_timer"),
(val_add, ":mission_timer", multiplayer_poll_disable_period),
(player_set_slot, ":my_player_no", slot_player_poll_disabled_until_time, ":mission_timer"),
(try_end),
(else_try), #vote for first team
(assign, "$g_multiplayer_factions_list_action_type", 2), #for team 2
(assign, "$g_multiplayer_poll_for_map_and_faction_data_faction_1", ":faction_no"),
(start_presentation, "prsnt_multiplayer_show_factions_list"),
(try_end),
(assign, "$g_show_factions_list_button_list_end_index", 0), #break;
(try_end),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_escape),
(key_clicked, key_xbox_start),
(gt, ":cur_time", 200),
(presentation_set_duration, 0),
(try_end),
]),
]),
("multiplayer_show_number_of_bots_list", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_mp_ingame_menu"),
(position_set_x, pos1, 250),
(position_set_y, pos1, 80),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_show_number_of_bots_list_menu_container", s0, tf_scrollable_style_2),
(position_set_x, pos1, 285),
(position_set_y, pos1, 125),
(overlay_set_position, "$g_presentation_obj_show_number_of_bots_list_menu_container", pos1),
(position_set_x, pos1, 405),
(position_set_y, pos1, 500),
(overlay_set_area_size, "$g_presentation_obj_show_number_of_bots_list_menu_container", pos1),
(set_container_overlay, "$g_presentation_obj_show_number_of_bots_list_menu_container"),
(assign, ":num_options", 0),
(store_add, ":end_cond", "$g_multiplayer_num_bots_voteable", 1),
(try_for_range, ":i_number", 0, ":end_cond"),
(assign, ":i_number_mod_5", ":i_number"),
(val_mod, ":i_number_mod_5", 5),
(this_or_next|lt, ":i_number", 10),
(eq, ":i_number_mod_5", 0),
(val_add, ":num_options", 1),
(try_end),
(store_mul, ":cur_y", ":num_options", escape_menu_item_height),
(val_add, ":cur_y", 10),
(assign, reg0, "$g_multiplayer_number_of_bots_list_action_type"),
(create_text_overlay, reg0, "str_choose_number_of_bots_for_team_reg0", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(assign, ":overlay_id", -1),
(try_for_range, ":i_number", 0, ":end_cond"),
(assign, ":i_number_mod_5", ":i_number"),
(val_mod, ":i_number_mod_5", 5),
(this_or_next|lt, ":i_number", 10),
(eq, ":i_number_mod_5", 0),
(assign, reg0, ":i_number"),
(str_store_string, s0, "str_reg0"),
(create_button_overlay, ":overlay_id", s0, 0),
(overlay_set_color, ":overlay_id", 0xFFFFFF),
(position_set_x, pos1, 100),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, ":overlay_id", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(store_add, "$g_show_number_of_bots_list_button_list_end_index", ":overlay_id", 1),
(store_sub, "$g_show_number_of_bots_list_button_list_first_index", "$g_show_number_of_bots_list_button_list_end_index", ":num_options"),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(try_for_range, ":i_button", "$g_show_number_of_bots_list_button_list_first_index", "$g_show_number_of_bots_list_button_list_end_index"),
(eq, ":object", ":i_button"),
(store_sub, ":value_index", ":object", "$g_show_number_of_bots_list_button_list_first_index"),
(try_begin),
(lt, ":value_index", 10),
(assign, ":used_value", ":value_index"),
(else_try),
(store_sub, ":used_value", ":value_index", 8),
(val_mul, ":used_value", 5),
(try_end),
(presentation_set_duration, 0),
(try_begin),
(eq, "$g_multiplayer_number_of_bots_list_action_type", 2), #vote for second team
(try_begin),
(multiplayer_get_my_player, ":my_player_no"),
(ge, ":my_player_no", 0),
(multiplayer_send_3_int_to_server, multiplayer_event_start_new_poll, 4, "$g_multiplayer_poll_number_of_bots_team_1", ":used_value"),
(store_mission_timer_a, ":mission_timer"),
(val_add, ":mission_timer", multiplayer_poll_disable_period),
(player_set_slot, ":my_player_no", slot_player_poll_disabled_until_time, ":mission_timer"),
(try_end),
(else_try), #vote for first team
(assign, "$g_multiplayer_number_of_bots_list_action_type", 2), #for team 2
(assign, "$g_multiplayer_poll_number_of_bots_team_1", ":used_value"),
(start_presentation, "prsnt_multiplayer_show_number_of_bots_list"),
(try_end),
(assign, "$g_show_number_of_bots_list_button_list_end_index", 0), #break;
(try_end),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_escape),
(key_clicked, key_xbox_start),
(gt, ":cur_time", 200),
(presentation_set_duration, 0),
(try_end),
]),
]),
("multiplayer_poll", prsntf_read_only|prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_white_plane"),
(overlay_set_color, reg0, 0x000000),
(overlay_set_alpha, reg0, 0x44),
(position_set_x, pos1, 50),
(position_set_y, pos1, 50),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 37500),
(position_set_y, pos1, 4500),
(overlay_set_size, reg0, pos1),
(try_begin),
(eq, "$g_multiplayer_poll_to_show", 0),
(store_sub, ":string_index", "$g_multiplayer_poll_value_to_show", multiplayer_scenes_begin),
(val_add, ":string_index", multiplayer_scene_names_begin),
(str_store_string, s0, ":string_index"),
(create_text_overlay, reg0, "str_poll_change_map", tf_center_justify),
(else_try),
(eq, "$g_multiplayer_poll_to_show", 1),
(str_store_player_username, s0, "$g_multiplayer_poll_value_to_show"),
(create_text_overlay, reg0, "str_poll_kick_player", tf_center_justify),
(else_try),
(eq, "$g_multiplayer_poll_to_show", 2),
(str_store_player_username, s0, "$g_multiplayer_poll_value_to_show"),
(create_text_overlay, reg0, "str_poll_ban_player", tf_center_justify),
(else_try),
(eq, "$g_multiplayer_poll_to_show", 3),
(store_sub, ":string_index", "$g_multiplayer_poll_value_to_show", multiplayer_scenes_begin),
(val_add, ":string_index", multiplayer_scene_names_begin),
(str_store_string, s0, ":string_index"),
(str_store_faction_name, s1, "$g_multiplayer_poll_value_2_to_show"),
(str_store_faction_name, s2, "$g_multiplayer_poll_value_3_to_show"),
(create_text_overlay, reg0, "str_poll_change_map_with_faction", tf_center_justify|tf_scrollable_style_2),
(else_try),
(assign, reg0, "$g_multiplayer_poll_value_to_show"),
(assign, reg1, "$g_multiplayer_poll_value_2_to_show"),
(str_store_faction_name, s0, "$g_multiplayer_team_1_faction"),
(str_store_faction_name, s1, "$g_multiplayer_team_2_faction"),
(create_text_overlay, reg0, "str_poll_change_number_of_bots", tf_center_justify|tf_scrollable_style_2),
(try_end),
(overlay_set_color, reg0, 0xFFFFFF),
(try_begin),
(neq, "$g_multiplayer_poll_to_show", 3),
(neq, "$g_multiplayer_poll_to_show", 4),
(position_set_x, pos1, 400),
(position_set_y, pos1, 100),
(overlay_set_position, reg0, pos1),
(else_try),
(position_set_x, pos1, 50),
(position_set_y, pos1, 70),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 750),
(position_set_y, pos1, 60),
(overlay_set_area_size, reg0, pos1),
(try_end),
(store_mission_timer_a, ":mission_timer"),
(store_sub, "$g_multiplayer_poll_last_written_seconds_left", "$g_multiplayer_poll_client_end_time", ":mission_timer"),
(assign, reg0, "$g_multiplayer_poll_last_written_seconds_left"),
(create_text_overlay, "$g_presentation_obj_poll_1", "str_poll_time_left", tf_right_align|tf_single_line),
(overlay_set_color, "$g_presentation_obj_poll_1", 0xFFFFFF),
(position_set_x, pos1, 790),
(position_set_y, pos1, 60),
(overlay_set_position, "$g_presentation_obj_poll_1", pos1),
(omit_key_once, key_1),
(omit_key_once, key_2),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_escape),
(this_or_next|key_clicked, key_xbox_start),
(key_clicked, key_2),
(gt, ":cur_time", 500),
(multiplayer_send_int_to_server, multiplayer_event_answer_to_poll, 0),
(clear_omitted_keys),
(presentation_set_duration, | |
<reponame>gaxler/triton
import ast
import builtins
import functools
import inspect
import struct
import sys
import textwrap
import hashlib
import os
import pickle
import subprocess
import os
from .tools.disasm import extract
import torch
import triton
import triton._C.libtriton.triton as _triton
from filelock import FileLock
import dbm
class CodeGenerator(ast.NodeVisitor):
def get_value(self, name):
# search node.id in local scope
ret = None
if name in self.lscope:
ret = self.lscope[name]
# search node.id in global scope
elif name in self.gscope:
ret = self.gscope[name]
# search node.id in builtins
elif name in self.builtins:
ret = self.builtins[name]
else:
raise ValueError(f'{name} is not defined')
if isinstance(ret, triton.language.block):
handle = self.module.get_value(name)
return triton.language.block(handle)
return ret
def set_value(self, name, value):
if isinstance(value, _triton.ir.value):
value = triton.language.block(value)
if isinstance(value, triton.language.block):
self.module.set_value(name, value.handle)
self.module.set_type(name, value.handle.type)
self.lscope[name] = value
def is_triton_object(self, value):
return isinstance(value, triton.language.block)
def visit_compound_statement(self, stmts):
for stmt in stmts:
self.last_ret = self.visit(stmt)
if isinstance(stmt, ast.Return):
break
return stmts and isinstance(stmt, ast.Return)
def __init__(self, context, prototype, gscope, attributes, constants, kwargs):
self.builder = _triton.ir.builder(context)
self.module = _triton.ir.module('', self.builder)
self.prototype = prototype
self.gscope = gscope
self.lscope = dict()
self.attributes = attributes
self.constants = constants
self.kwargs = kwargs
self.last_node = None
self.builtins = {
'range': range,
'min': triton.language.minimum,
'float': float,
'int': int,
'print': print,
'isinstance': isinstance,
'getattr': getattr,
}
def visit_Module(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_List(self, node):
ctx = self.visit(node.ctx)
assert ctx is None
elts = [self.visit(elt) for elt in node.elts]
return elts
# By design, only non-kernel functions can return
def visit_Return(self, node):
ret = self.visit(node.value)
if ret is None:
return self.builder.ret_void()
return ret
def visit_FunctionDef(self, node, inline=False, arg_values=None):
arg_names, kwarg_names = self.visit(node.args)
# store keyword arguments in local scope
self.lscope[kwarg_names] = self.kwargs
# initialize function
if inline:
pass
else:
fn = self.module.get_or_insert_function(node.name, self.prototype)
arg_values = []
for i, arg_name in enumerate(arg_names):
if i in self.constants:
cst = triton.language.core._to_ir(self.constants[i], self.builder)
arg_values.append(cst)
else:
if i in self.attributes:
is_ptr = fn.args[i].type.is_ptr()
attr = 'aligned' if is_ptr else 'multiple_of'
attr = getattr(_triton.ir.attribute_kind, attr)
attr = _triton.ir.attribute(attr, self.attributes[i])
fn.add_attr(i + 1, attr)
fn.args[i].name = arg_name
arg_values.append(fn.args[i])
for arg_name, arg_value in zip(arg_names, arg_values):
self.set_value(arg_name, arg_value)
if inline:
self.visit_compound_statement(node.body)
return self.last_ret
else:
entry = _triton.ir.basic_block.create(self.builder.context, "entry", fn)
self.module.seal_block(entry)
self.builder.set_insert_block(entry)
# visit function body
self.visit_compound_statement(node.body)
# finalize function
self.builder.ret_void()
def visit_arguments(self, node):
arg_names = []
for arg in node.args:
arg_names += [self.visit(arg)]
kwarg_names = self.visit(node.kwarg)
return arg_names, kwarg_names
def visit_arg(self, node):
ast.NodeVisitor.generic_visit(self, node)
return node.arg
def visit_Assign(self, node):
_names = []
for target in node.targets:
_names += [self.visit(target)]
assert len(_names) == 1
names = _names[0]
values = self.visit(node.value)
if not isinstance(names, tuple):
names = [names]
if not isinstance(values, tuple):
values = [values]
for name, value in zip(names, values):
if not isinstance(value, triton.language.block):
value = triton.language.core._to_ir(value, self.builder)
self.set_value(name, value)
def visit_AugAssign(self, node):
name = node.target.id
lhs = ast.Name(id=name, ctx=ast.Load())
rhs = ast.BinOp(lhs, node.op, node.value)
assign = ast.Assign(targets=[node.target], value=rhs)
self.visit(assign)
return self.get_value(name)
def visit_Name(self, node):
if type(node.ctx) == ast.Store:
return node.id
return self.get_value(node.id)
def visit_Store(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_Load(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_Tuple(self, node):
args = [self.visit(x) for x in node.elts]
return tuple(args)
def visit_BinOp(self, node):
lhs = self.visit(node.left)
rhs = self.visit(node.right)
fn = {
ast.Add: '__add__',
ast.Sub: '__sub__',
ast.Mult: '__mul__',
ast.Div: '__truediv__',
ast.FloorDiv: '__floordiv__',
ast.Mod: '__mod__',
ast.Pow: '__pow__',
ast.LShift: '__lshift__',
ast.RShift: '__rshift__',
ast.BitAnd: '__and__',
ast.BitOr: '__or__',
ast.BitXor: '__xor__',
}[type(node.op)]
kws = dict()
if self.is_triton_object(lhs):
kws['_builder'] = self.builder
ret = getattr(lhs, fn)(rhs, **kws)
if ret is NotImplemented:
if self.is_triton_object(rhs):
kws['_builder'] = self.builder
fn = fn[:2] + 'r' + fn[2:]
ret = getattr(rhs, fn)(lhs, **kws)
return ret
def visit_If(self, node):
cond = self.visit(node.test)
if self.is_triton_object(cond):
current_bb = self.builder.get_insert_block()
then_bb = _triton.ir.basic_block.create(self.builder.context, "then", current_bb.parent)
else_bb = _triton.ir.basic_block.create(self.builder.context, "else", current_bb.parent) if node.orelse else None
endif_bb = _triton.ir.basic_block.create(self.builder.context, "endif", current_bb.parent)
self.module.seal_block(then_bb)
if else_bb:
self.module.seal_block(else_bb)
self.builder.cond_br(cond.handle, then_bb, else_bb)
else:
self.builder.cond_br(cond.handle, then_bb, endif_bb)
self.builder.set_insert_block(then_bb)
is_terminator = self.visit_compound_statement(node.body)
# TODO: last statement is a terminator?
if not is_terminator:
self.builder.br(endif_bb)
if else_bb:
self.builder.set_insert_block(else_bb)
is_terminator = self.visit_compound_statement(node.orelse)
#TODO: last statement is a terminator?
if not is_terminator:
self.builder.br(endif_bb)
self.module.seal_block(endif_bb)
self.builder.set_insert_block(endif_bb)
else:
if cond:
self.visit_compound_statement(node.body)
else:
self.visit_compound_statement(node.orelse)
def visit_IfExp(self, node):
cond = self.visit(node.test)
if cond:
return self.visit(node.body)
else:
return self.visit(node.orelse)
def visit_Pass(self, node):
pass
def visit_Compare(self, node):
assert len(node.comparators) == 1
assert len(node.ops) == 1
lhs = self.visit(node.left)
rhs = self.visit(node.comparators[0])
fn = {
ast.Eq: '__eq__',
ast.NotEq: '__ne__',
ast.Lt: '__lt__',
ast.LtE: '__le__',
ast.Gt: '__gt__',
ast.GtE: '__ge__',
ast.Is: '__eq__',
ast.IsNot: '__ne__',
}[type(node.ops[0])]
if self.is_triton_object(lhs):
return getattr(lhs, fn)(rhs, _builder=self.builder)
elif self.is_triton_object(rhs):
fn = fn[:2] + 'r' + fn[2:]
return getattr(rhs, fn)(lhs, _builder=self.builder)
else:
return getattr(lhs, fn)(rhs)
def visit_UnaryOp(self, node):
op = self.visit(node.operand)
fn = {
ast.USub: '__neg__',
ast.UAdd: '__pos__',
ast.Invert: '__invert__',
}[type(node.op)]
if self.is_triton_object(op):
return getattr(op, fn)(_builder=self.builder)
return getattr(op, fn)()
def visit_While(self, node):
current_bb = self.builder.get_insert_block()
loop_bb = _triton.ir.basic_block.create(self.module.builder.context, "loop", current_bb.parent)
next_bb = _triton.ir.basic_block.create(self.module.builder.context, "postloop", current_bb.parent)
def continue_fn():
cond = self.visit(node.test)
return self.builder.cond_br(cond.handle, loop_bb, next_bb)
continue_fn()
self.builder.set_insert_block(loop_bb)
self.visit_compound_statement(node.body)
continue_fn()
stop_bb = self.builder.get_insert_block()
self.module.seal_block(stop_bb)
self.module.seal_block(loop_bb)
self.module.seal_block(next_bb)
self.builder.set_insert_block(next_bb)
for stmt in node.orelse:
ast.NodeVisitor.generic_visit(self, stmt)
def visit_Str(self, node):
return ast.literal_eval(node)
def visit_Subscript(self, node):
assert node.ctx.__class__.__name__ == "Load"
lhs = self.visit(node.value)
slices = self.visit(node.slice)
if self.is_triton_object(lhs):
return lhs.__getitem__(slices, _builder=self.builder)
return lhs[slices]
def visit_ExtSlice(self, node):
return [self.visit(dim) for dim in node.dims]
def visit_For(self, node):
iterator = self.visit(node.iter.func)
if iterator != self.builtins['range']:
raise RuntimeError('Only `range` iterator currently supported')
# create nodes
st_target = ast.Name(id=node.target.id, ctx=ast.Store())
ld_target = ast.Name(id=node.target.id, ctx=ast.Load())
arg_0 = node.iter.args[0] if len(node.iter.args) > 1 else ast.Num(0)
arg_1 = node.iter.args[1] if len(node.iter.args) > 1 else node.iter.args[0]
arg_2 = node.iter.args[2] if len(node.iter.args) > 2 else ast.Num(1)
init_node = ast.Assign(targets=[st_target], value=arg_0)
pos_cond_node = ast.Compare(ld_target, [ast.Lt()], [arg_1])
neg_cond_node = ast.Compare(ld_target, [ast.Gt()], [arg_1])
pos_step_node = ast.Compare(arg_2, [ast.Gt()], [ast.Num(0)])
build_cond = lambda: triton.language.where(self.visit(pos_step_node),\
self.visit(pos_cond_node),\
self.visit(neg_cond_node),\
_builder=self.builder)
#cond_node = neg_cond_node
step_node = ast.AugAssign(target=st_target, op=ast.Add(), value=arg_2)
# code generation
current_bb = self.builder.get_insert_block()
loop_bb = _triton.ir.basic_block.create(self.module.builder.context, "loop", current_bb.parent)
next_bb = _triton.ir.basic_block.create(self.module.builder.context, "postloop", current_bb.parent)
def continue_fn():
self.visit(step_node)
cond = build_cond()
return self.builder.cond_br(cond.handle, loop_bb, next_bb)
self.visit(init_node)
cond = build_cond()
self.builder.cond_br(cond.handle, loop_bb, next_bb)
self.builder.set_insert_block(loop_bb)
self.visit_compound_statement(node.body)
# TODO: handle case where body breaks control flow
continue_fn()
stop_bb = self.builder.get_insert_block()
self.module.seal_block(stop_bb)
self.module.seal_block(loop_bb)
self.module.seal_block(next_bb)
self.builder.set_insert_block(next_bb)
for stmt in node.orelse:
ast.NodeVisitor.generic_visit(self, stmt)
def visit_Slice(self, node):
lower = self.visit(node.lower)
upper = self.visit(node.upper)
step = self.visit(node.step)
return slice(lower, upper, step)
def visit_Index(self, node):
return self.visit(node.value)
def visit_NameConstant(self, node):
return node.value
def visit_keyword(self, node):
return {node.arg: self.visit(node.value)}
def visit_Call(self, node):
fn = self.visit(node.func)
kws = dict()
for keyword in node.keywords:
kws.update(self.visit(keyword))
args = [self.visit(arg) for arg in node.args]
if isinstance(fn, JITFunction):
return fn(*args, generator=self, **kws)
if hasattr(fn, '__self__') and self.is_triton_object(fn.__self__) or \
sys.modules[fn.__module__] is triton.language.core:
return fn(*args, _builder=self.builder, **kws)
return fn(*args, **kws)
def visit_Num(self, node):
return node.n
def visit_Attribute(self, node):
lhs = self.visit(node.value)
return getattr(lhs, node.attr)
def visit_Expr(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_NoneType(self, node):
return None
def visit(self, node):
if node is not None:
self.last_node = node
return super().visit(node)
def generic_visit(self, node):
typename = type(node).__name__
raise NotImplementedError("Unsupported node: {}".format(typename))
class Binary:
def __init__(self, backend, name, asm, shared_mem, num_warps):
self.backend = backend
self.name = name
self.asm = asm
self.shared_mem = shared_mem
self.num_warps = num_warps
class LoadedBinary:
def __init__(self, device: int, bin: Binary):
module, kernel = _triton.code_gen.load_binary(bin.backend,
bin.name,
bin.asm,
bin.shared_mem,
device)
self.bin = bin
self.asm = bin.asm
self.module = module
self.kernel = kernel
self.device = device
def __call__(self, stream, args, grid_0, grid_1=1, grid_2=1):
_triton.runtime.enqueue(self.bin.backend, stream, self.kernel,
grid_0, grid_1, grid_2,
self.bin.num_warps * 32, 1, 1,
args, self.bin.shared_mem)
class CompilationError(Exception):
def __init__(self, src, node, err):
self.message = '\n'.join(src.split('\n')[:node.lineno])
self.message += '\n' + ' ' * node.col_offset + '^'
self.message += '\n Error: ' + str(err)
super().__init__(self.message)
class OutOfResources(Exception):
def __init__(self, required, limit, name):
self.message = f'out of resource: {name}'\
f'Required: {required}'\
f'Hardware limit: {limit}'
super().__init__(self.message)
class Kernel:
@staticmethod
def _type_name(obj):
type_names = {
triton.language.float8: 'f8',
torch.bfloat16: 'bf16',
torch.float16: 'f16',
torch.float32: 'f32',
torch.float64: 'f64',
torch.bool: 'i1',
torch.int8: 'i8',
torch.int16: 'i16',
torch.int32: 'i32',
torch.int64: 'i64',
}
if hasattr(obj, 'data_ptr'):
return type_names[obj.dtype]
if isinstance(obj, int):
if abs(obj) <= 0xffffffff:
return 'I'
return 'L'
if isinstance(obj, float):
return 'f'
if isinstance(obj, bool):
return 'B'
assert False
@staticmethod
def _to_triton_ir(context, obj):
type_map = {
'I': _triton.ir.type.get_int32,
'L': _triton.ir.type.get_int64,
'f': _triton.ir.type.get_fp32,
'B': _triton.ir.type.get_int1,
'f8': _triton.ir.type.get_fp8,
'f16': _triton.ir.type.get_fp16,
'bf16': _triton.ir.type.get_bf16,
'f32': _triton.ir.type.get_fp32,
'f64': _triton.ir.type.get_fp64,
'i1': | |
self.detect(0, conf_preds, decoded_boxes, mask_data, use_fastnms=False)
scores = box_pred[:, 1]
boxes = box_pred[:, 2:6]
classes = box_pred[:, 0]
masks = fluid.layers.gather(masks, index)
masks = fluid.layers.matmul(proto_data[0], masks, transpose_x=False, transpose_y=True)
masks = fluid.layers.sigmoid(masks)
masks = crop_tensor(masks, boxes)
masks = fluid.layers.transpose(masks, [2, 0, 1])
maskiou_p = self.mask_iou_net([8, 16, 32, 64, 128, 80], fluid.layers.unsqueeze(masks, 1))
idx_s = fluid.layers.range(0, fluid.layers.shape(classes)[0], 1, 'int32')
idx_s = fluid.layers.reshape(idx_s, (-1,1))
classes = fluid.layers.reshape(classes, (-1,1))
transform_idx_t = fluid.layers.concat([idx_s, fluid.layers.cast(classes, 'int32')], -1)
maskiou_p = fluid.layers.gather_nd(maskiou_p, transform_idx_t)
maskiou_p = fluid.layers.reshape(maskiou_p, shape=(-1,1))
bbox_scores = fluid.layers.reshape(scores, [-1,1])
mask_scores = bbox_scores * maskiou_p
mask_scores, idx = fluid.layers.argsort(mask_scores, axis=0, descending=True)
classes = fluid.layers.gather(classes, idx)
classes = fluid.layers.cast(classes, 'float32')
masks = fluid.layers.cast(masks, 'float32')
masks = fluid.layers.gather(masks, idx)
boxes = fluid.layers.gather(boxes, idx)
box_pred = fluid.layers.concat([classes, mask_scores, boxes], -1)
return {'bbox': box_pred, 'mask': masks}
def get_loss(self, body_feats, spatial_scale, im_info, gt_box, gt_class, gt_mask, is_crowd, gt_num):
output = self._get_output(body_feats, spatial_scale)
bbox_pred_reshape_list = output['loc']
conf_pred_reshape_list = output['conf']
mask_pred_reshape_list = output['mask']
anchor_reshape_list = output['anchor']
proto_data = output['proto']
segm_pred_reshape_list = output['segm']
loc_data = fluid.layers.concat(bbox_pred_reshape_list, axis=1)
conf_data = fluid.layers.concat(conf_pred_reshape_list, axis=1)
mask_data = fluid.layers.concat(mask_pred_reshape_list, axis=1)
priors = anchor_reshape_list
priors = fluid.layers.cast(priors, 'float32')
priors.stop_gradient = True
batch_size = self.batch_size
num_priors = self.num_priors
num_classes = self.num_classes
loc_t, gt_box_t, conf_t, idx_t, pos = get_target_tensor(gt_box, priors, gt_class, is_crowd, gt_num, loc_data, batch_size, num_priors)
labels = gt_class
losses = {}
out = fluid.layers.where(pos)
out.stop_gradient=True
loc_pt = fluid.layers.gather_nd(loc_data, out)
loc_tt = fluid.layers.gather_nd(loc_t, out)
loc_tt.stop_gradient = True
loss_bbox = fluid.layers.smooth_l1(
x=fluid.layers.cast(loc_pt,'float32'),
y=fluid.layers.cast(loc_tt,'float32'))
losses['B'] = fluid.layers.reduce_sum(loss_bbox) * 1.5
losses['M'], maskiou_targets = self.lincomb_mask_loss(pos, idx_t, mask_data, proto_data, gt_mask, gt_box_t, labels, gt_num, batch_size, num_priors)
losses['C'] = self.ohem_conf_loss(conf_data, conf_t, pos, batch_size, num_priors)
losses['S'] = self.semantic_segmentation_loss(segm_pred_reshape_list, gt_mask, labels, batch_size, gt_num)
losses['I'] = self.mask_iou_loss([8, 16, 32, 64, 128, 80], maskiou_targets)
total_num_pos = fluid.layers.reduce_sum(fluid.layers.cast(pos, 'int32'))
for k in losses:
if k not in ('P', 'E', 'S'):
losses[k] /= total_num_pos
else:
losses[k] /= batch_size
return losses
def mask_iou_net(self, net, maskiou_net_input):
stride_list = [2,2,2,2,2,1]
kernel_list = [3,3,3,3,3,1]
subnet_blob_in = maskiou_net_input
for i in range(len(net)):
conv_name = 'maskiou_conv_n{}'.format(i)
subnet_blob = fluid.layers.conv2d(
input=subnet_blob_in,
num_filters=net[i],
filter_size=kernel_list[i],
stride=stride_list[i],
padding="SAME",
act='relu',
name=conv_name,
param_attr=ParamAttr(
name=conv_name + '_w',
initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name=conv_name + '_b',
learning_rate=2.,
regularizer=L2Decay(0.)))
subnet_blob_in = subnet_blob
maskiou_p = subnet_blob_in
maskiou_p = fluid.layers.pool2d(maskiou_p, global_pooling=True)
maskiou_p = fluid.layers.squeeze(maskiou_p, axes=[2,3])
return maskiou_p
def mask_iou_loss(self, net, maskiou_targets):
maskiou_net_input, maskiou_t, label_t = maskiou_targets
maskiou_p = self.mask_iou_net(net, maskiou_net_input)
idx_s = fluid.layers.range(0, fluid.layers.shape(label_t)[0], 1, 'int32')
idx_s = fluid.layers.reshape(idx_s, (-1,1))
label_t = fluid.layers.reshape(label_t, (-1,1))
transform_idx_t = fluid.layers.concat([idx_s, label_t], -1)
transform_idx_t.stop_gradient=True
maskiou_p = fluid.layers.gather_nd(maskiou_p, transform_idx_t)
maskiou_p = fluid.layers.reshape(maskiou_p, shape=(-1,1))
maskiou_t = fluid.layers.reshape(maskiou_t, shape=(-1,1))
maskiou_t.stop_gradient=True
loss = fluid.layers.smooth_l1(maskiou_p,maskiou_t)
loss = fluid.layers.reduce_sum(loss) * 25
return loss
def semantic_segmentation_loss(self, segment_data, mask_t, class_t, batch_size, gt_num):
num_classes = 80
mask_h = 69
mask_w = 69
loss_s = 0
for idx in range(batch_size):
cur_segment = segment_data[idx]
cur_class_t = class_t[idx]
cur_num = gt_num[idx]
segment_t = get_segment_t_tensor(mask_t[idx], cur_class_t, cur_num, mask_w, mask_h)
segment_t.stop_gradient = True
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=fluid.layers.reshape(cur_segment, shape=(80,-1)),
label=fluid.layers.reshape(segment_t, shape=(80,-1)))
loss_s += fluid.layers.reduce_sum(loss)
loss_s = loss_s / mask_h / mask_w
return loss_s
def ohem_conf_loss(self, conf_data, conf_t, pos, num, num_priors):
batch_conf = fluid.layers.reshape(conf_data, shape=(-1, 81))
x_max = fluid.layers.reduce_max(batch_conf)
loss_c = fluid.layers.log(fluid.layers.reduce_sum(fluid.layers.exp(batch_conf - x_max), dim=1)) + x_max
loss_c = loss_c - batch_conf[:, 0]
batch_size = fluid.layers.shape(conf_data)[0]
loss_c = fluid.layers.reshape(loss_c, (batch_size, -1))
pos = fluid.layers.cast(pos, 'int32')
loss_c *= -(pos - 1)
loss_c *= -(fluid.layers.cast(conf_t < 0, 'int32') - 1)
_, loss_idx = fluid.layers.argsort(loss_c, 1, descending=True)
_, idx_rank = fluid.layers.argsort(loss_idx, 1)
idx_rank = fluid.layers.cast(idx_rank, 'int32')
num_pos = fluid.layers.reduce_sum(pos, dim=1, keep_dim=True)
negpos_ratio = 3
num_neg = fluid.layers.elementwise_min(fluid.layers.elementwise_max(negpos_ratio * num_pos, fluid.layers.zeros([1], dtype='int32')), fluid.layers.shape(pos)[1]-1)
neg = idx_rank < fluid.layers.expand_as(num_neg, idx_rank)
neg = fluid.layers.cast(neg, 'int32')
neg *= -(pos - 1)
neg *= -(fluid.layers.cast(conf_t < 0, 'int32') - 1)
posneg = (pos + neg) > 0
out = fluid.layers.where(posneg)
out.stop_gradient = True
conf_data_t = fluid.layers.gather_nd(conf_data, out)
conf_tt = fluid.layers.gather_nd(conf_t, out)
conf_tt = fluid.layers.reshape(conf_tt, shape=(-1,1))
conf_tt.stop_gradient=True
loss = fluid.layers.softmax_with_cross_entropy(
logits=fluid.layers.cast(conf_data_t,'float32'),
label=fluid.layers.cast(conf_tt,'int64'))
loss = fluid.layers.reduce_sum(loss)
return loss
def lincomb_mask_loss(self, pos, idx_t, mask_data, proto_data, masks, gt_box_t, labels, gt_num, batch_size, num_priors, interpolation_mode='bilinear'):
mask_h = 138
mask_w = 138
flag = create_tmp_var('flag','bool',[1])
maskiou_t_list = []
maskiou_net_input_list = []
label_t_list = []
for idx in range(batch_size):
downsampled_masks = fluid.layers.squeeze(
fluid.layers.resize_bilinear(
fluid.layers.unsqueeze(input=masks[idx], axes=[0]),
out_shape=[mask_h, mask_w], align_corners=False),
axes=[0])
downsampled_masks = fluid.layers.cast(downsampled_masks > 0.5,'float32')
cur_pos = fluid.layers.cast(pos,'int32')[idx]
out = fluid.layers.where(fluid.layers.cast(cur_pos,'bool'))
out.stop_gradient = True
pos_idx_t = fluid.layers.gather_nd(idx_t[idx], out)
pos_gt_box_t = fluid.layers.gather_nd(gt_box_t[idx], out)
proto_masks = proto_data[idx]
proto_coef = fluid.layers.gather_nd(mask_data[idx], out)
old_num_pos = fluid.layers.shape(proto_coef)[0]
select = create_tmp_var('select','int32',[-1,1])
fluid.layers.py_func(func=get_mast, x=proto_coef ,out=select)
masks_to_train = fluid.layers.fill_constant(shape=[1], dtype='int32', value=100)
out = fluid.layers.cond(old_num_pos > masks_to_train, lambda: masks_to_train, lambda: old_num_pos)
select = select[:out,]
select.stop_gradient=True
proto_coef = fluid.layers.gather(proto_coef, select, overwrite=True)
pos_idx_t = fluid.layers.gather(pos_idx_t, select, overwrite=True)
pos_gt_box_t = fluid.layers.gather(pos_gt_box_t, select, overwrite=True)
pos_idx_t.stop_gradient=True
pos_gt_box_t.stop_gradient=True
downsampled_masks.stop_gradient=True
downsampled_masks = fluid.layers.gather(downsampled_masks, pos_idx_t, overwrite=True)
label_t = fluid.layers.gather(labels[idx], pos_idx_t, overwrite=True)
mask_t = fluid.layers.transpose(downsampled_masks, perm=[1, 2, 0])
pred_masks = fluid.layers.matmul(proto_masks, proto_coef, transpose_x=False, transpose_y=True)
pred_masks = fluid.layers.sigmoid(pred_masks)
pred_masks = crop_tensor(pred_masks, pos_gt_box_t)
mask_t.stop_gradient=True
pred_masks = fluid.layers.clip(pred_masks, 0, 1)
pre_loss = - (fluid.layers.log(pred_masks+1e-10) * mask_t + (1 - mask_t) * fluid.layers.log(1 - pred_masks+1e-10))
pre_loss = crop_tensor(pre_loss, pos_gt_box_t)
pred_masks = crop_tensor(pred_masks, pos_gt_box_t)
weight = mask_h * mask_w
gt_box_width, gt_box_height = get_box_height_width_tensor(pos_gt_box_t, mask_w, mask_h)
gt_box_width.stop_gradient=True
gt_box_height.stop_gradient=True
pre_loss = fluid.layers.reduce_sum(pre_loss, dim=[0, 1]) / (gt_box_width) / (gt_box_height) * weight
num_pos = fluid.layers.shape(proto_coef)[0]
rate = old_num_pos / num_pos
rate.stop_gradient = True
if idx == 0:
loss_m = fluid.layers.reduce_sum(pre_loss) * fluid.layers.cast(rate, 'float32')
else:
loss_m += fluid.layers.reduce_sum(pre_loss) * fluid.layers.cast(rate, 'float32')
gt_mask_area = fluid.layers.reduce_sum(mask_t, dim=[0, 1])
discard_mask_area = 25
new_select = gt_mask_area > discard_mask_area
out = fluid.layers.where(new_select)
one = fluid.layers.fill_constant(shape=[1], dtype='int32', value=1)
pred = fluid.layers.less_than(fluid.layers.shape(out)[0], one)
out1 = fluid.layers.reshape(fluid.layers.range(0, fluid.layers.shape(pos_gt_box_t)[0], 1, 'int32'), (-1,1))
out_res = fluid.layers.cond(pred, lambda: out1, lambda: out)
out_res.stop_gradient = True
pos_gt_box_t = fluid.layers.gather(pos_gt_box_t, out_res)
pred_masks = fluid.layers.transpose(pred_masks, perm=[2, 0, 1])
pred_masks = fluid.layers.gather(pred_masks, out_res)
mask_t = fluid.layers.transpose(mask_t, perm=[2, 0, 1])
mask_t = fluid.layers.gather(mask_t, out_res)
mask_t = fluid.layers.transpose(mask_t, perm=[1, 2, 0])
label_t = fluid.layers.gather(label_t, out_res)
maskiou_net_input = fluid.layers.unsqueeze(input=pred_masks, axes=[1])
pred_masks = fluid.layers.transpose(pred_masks, perm=[1, 2, 0])
pred_masks = fluid.layers.cast(pred_masks > 0.5,'float32')
maskiou_t = self._mask_iou(pred_masks, mask_t)
maskiou_net_input_list.append(maskiou_net_input)
maskiou_t_list.append(maskiou_t)
label_t_list.append(label_t)
mask_alpha = 6.125
losses = loss_m * mask_alpha / mask_h / mask_w
maskiou_t = fluid.layers.concat(maskiou_t_list, axis=0)
label_t = fluid.layers.concat(label_t_list, axis=0)
maskiou_net_input = fluid.layers.concat(maskiou_net_input_list, axis=0)
return losses, [maskiou_net_input, maskiou_t, label_t]
def _mask_iou(self, mask1, mask2):
intersection = fluid.layers.reduce_sum(mask1*mask2, dim=[0, 1])
area1 = fluid.layers.reduce_sum(mask1, dim=[0, 1])
area2 = fluid.layers.reduce_sum(mask2, dim=[0, 1])
union = (area1 + area2) - intersection
ret = intersection / (union + 1e-10)
return ret
def judge_discard_mask(self, new_select, pred_masks, pos_gt_box_t, mask_t, label_t):
out = fluid.layers.where(new_select)
one = fluid.layers.fill_constant(shape=[1], dtype='int32', value=1)
pred = fluid.layers.less_than(fluid.layers.shape(out)[0], one)
def noop():
pred_masks_output = fluid.layers.transpose(pred_masks, perm=[2, 0, 1])
fluid.layers.assign(input=pred_masks_output, output=pred_masks)
def select_right():
pos_gt_box_t_output = fluid.layers.gather(pos_gt_box_t, out)
pred_masks_output = fluid.layers.transpose(pred_masks, perm=[2, 0, 1])
pred_masks_output = fluid.layers.gather(pred_masks_output, out)
mask_t_output = fluid.layers.transpose(mask_t, perm=[2, 0, 1])
mask_t_output = fluid.layers.gather(mask_t_output, out)
mask_t_output = fluid.layers.transpose(mask_t_output, perm=[1, 2, 0])
label_t_output = fluid.layers.gather(label_t, out)
fluid.layers.assign(input=pred_masks_output, output=pred_masks)
fluid.layers.assign(input=pos_gt_box_t_output, output=pos_gt_box_t)
fluid.layers.assign(input=mask_t_output, output=mask_t)
fluid.layers.assign(input=label_t_output, output=label_t)
fluid.layers.cond(pred, noop, select_right)
return pred_masks, pos_gt_box_t, mask_t, label_t
def transform_index(idx):
idx = np.array(idx)
res = []
for i in range(len(idx)):
res.append(np.array([i,idx[i]]))
return np.array(res)
def debug_shape(x):
x = np.array(x)
print('debug_shape:',np.shape(x))
def get_select(gt_mask_area):
gt_mask_area = np.array(gt_mask_area)
discard_mask_area = 10
select = gt_mask_area > discard_mask_area
return select
def get_box_height_width_tensor(pos_gt_box_t, mask_w, mask_h):
pos_gt_csize = center_size_tensor(pos_gt_box_t)
gt_box_width = pos_gt_csize[:, 2] * mask_w
gt_box_height = pos_gt_csize[:, 3] * mask_h
return fluid.layers.cast(gt_box_width, 'float32'), fluid.layers.cast(gt_box_height, 'float32')
def center_size_tensor(boxes):
return fluid.layers.concat(input=[ (boxes[:, 2:] + boxes[:, :2])/2,
boxes[:, 2:] - boxes[:, :2] ], axis=1)
def get_box_height_width(pos_gt_box_t):
mask_w = 144
mask_h = 144
pos_gt_box_t = np.array(pos_gt_box_t)
pos_gt_csize = center_size(pos_gt_box_t)
gt_box_width = pos_gt_csize[:, 2] * mask_w
gt_box_height = pos_gt_csize[:, 3] * mask_h
return gt_box_width.astype('float32'), gt_box_height.astype('float32')
def get_mast_to_train(x):
x = np.array(x)
masks_to_train = 100
if np.shape(x)[0] > masks_to_train:
perm = np.random.permutation(np.shape(x)[0])
select = perm[:masks_to_train]
return select
return np.random.permutation(np.shape(x)[0])
def get_mast(x):
x = np.array(x)
perm = | |
import logging
from inflection import underscore
from collections import OrderedDict as _o
import gilda
from indra.sources import trips
from indra.ontology.standardize import \
standardize_agent_name
from indra.statements.statements import *
from indra.assemblers.english.assembler import _assemble_agent_str, \
EnglishAssembler, statement_base_verb, statement_present_verb
from indra.explanation.model_checker.pysb import _add_activity_to_agent, \
_add_modification_to_agent
from bioagents.tra.tra import MolecularQuantity, TemporalPattern, TimeInterval
from .util import get_class_from_name
logger = logging.getLogger(__name__)
class Query(object):
"""The parent class of all query types."""
@classmethod
def _from_json(cls, json_dict):
query_type = json_dict.get('type')
query_cls = get_class_from_name(query_type, Query)
query = query_cls._from_json(json_dict)
return query
def matches(self, other):
return self.matches_key() == other.matches_key()
def matches_key(self):
pass
def get_hash(self):
return make_hash(self.matches_key(), 14)
def get_hash_with_model(self, model_name):
key = (self.matches_key(), model_name)
return make_hash(mk_str(key), 14)
def get_type(self):
return underscore(type(self).__name__)
class StructuralProperty(Query):
pass
class PathProperty(Query):
"""This type of query requires finding a mechanistic causally consistent
path that satisfies query statement.
Parameters
----------
path_stmt : indra.statements.Statement
A path to look for in the model represented as INDRA statement.
entity_constraints : dict(list(indra.statements.Agent))
A dictionary containing lists of Agents to be included in or excluded
from the path.
relationship_constraints : dict(list(str))
A dictionary containing lists of Statement types to include in or
exclude from the path.
"""
def __init__(self, path_stmt, entity_constraints=None,
relationship_constraints=None):
self.path_stmt = path_stmt
if entity_constraints:
self.include_entities = entity_constraints.get('include', [])
self.exclude_entities = entity_constraints.get('exclude', [])
else:
self.include_entities = []
self.exclude_entities = []
if relationship_constraints:
self.include_rels = relationship_constraints.get('include', [])
self.exclude_rels = relationship_constraints.get('exclude', [])
else:
self.include_rels = []
self.exclude_rels = []
self.entities = self.get_entities()
def to_json(self):
query_type = self.get_type()
json_dict = _o(type=query_type)
json_dict['path'] = self.path_stmt.to_json()
json_dict['entity_constraints'] = {}
if self.include_entities:
json_dict['entity_constraints']['include'] = [
ec.to_json() for ec in self.include_entities]
if self.exclude_entities:
json_dict['entity_constraints']['exclude'] = [
ec.to_json() for ec in self.exclude_entities]
json_dict['relationship_constraints'] = {}
if self.include_rels:
json_dict['relationship_constraints']['include'] = [
{'type': rel} for rel in self.include_rels]
if self.exclude_rels:
json_dict['relationship_constraints']['exclude'] = [
{'type': rel} for rel in self.exclude_rels]
return json_dict
@classmethod
def _from_json(cls, json_dict):
path_stmt_json = json_dict.get('path')
path_stmt = Statement._from_json(path_stmt_json)
ent_constr_json = json_dict.get('entity_constraints')
entity_constraints = None
if ent_constr_json:
entity_constraints = {}
for key, value in ent_constr_json.items():
entity_constraints[key] = [Agent._from_json(ec) for ec
in value]
rel_constr_json = json_dict.get('relationship_constraints')
relationship_constraints = None
if rel_constr_json:
relationship_constraints = {}
for key, value in rel_constr_json.items():
relationship_constraints[key] = [
rel_type['type'] for rel_type in value]
query = cls(path_stmt, entity_constraints, relationship_constraints)
return query
def get_entities(self):
"""Return entities from the path statement and the inclusion list."""
path_entities = self.path_stmt.agent_list()
return path_entities + self.include_entities
def matches_key(self):
key = self.path_stmt.matches_key()
if self.include_entities:
for ent in sorted(self.include_entities,
key=lambda x: x.matches_key()):
key += ent.matches_key()
if self.exclude_entities:
for ent in sorted(self.exclude_entities,
key=lambda x: x.matches_key()):
key += ent.matches_key()
if self.include_rels:
for rel in sorted(self.include_rels):
key += rel
if self.exclude_rels:
for rel in sorted(self.exclude_rels):
key += rel
return mk_str(key)
def __str__(self):
parts = [f'PathPropertyQuery(stmt={str(self.path_stmt)}.']
if self.include_entities:
inents = ', '.join([str(e) for e in self.include_entities])
parts.append(f' Include entities: {inents}.')
if self.exclude_entities:
exents = ', '.join([str(e) for e in self.exclude_entities])
parts.append(f' Exclude entities: {exents}.')
if self.include_rels:
inrels = ', '.join(self.include_rels)
parts.append(f' Include relations: {inrels}.')
if self.exclude_rels:
exrels = ', '.join(self.exclude_rels)
parts.append(f' Exclude relations: {exrels}.')
return ''.join(parts)
def __repr__(self):
return str(self)
def to_english(self):
ea = EnglishAssembler([self.path_stmt])
return ea.make_model()
class SimpleInterventionProperty(Query):
"""This type of query requires dynamic simulation of the model to observe
the behavior under perturbation.
"""
def __init__(self, condition_entity, target_entity, direction):
self.condition_entity = condition_entity
self.target_entity = target_entity
self.direction = direction
@classmethod
def from_stmt(cls, stmt):
if not isinstance(stmt, (Modification, RegulateAmount,
RegulateActivity, Influence)):
logger.info('Statement type %s not handled' %
stmt.__class__.__name__)
return
# Get the polarity for the statement
if isinstance(stmt, Modification):
dir = 'dn' if isinstance(stmt, RemoveModification) else 'up'
elif isinstance(stmt, RegulateActivity):
dir = 'up' if stmt.is_activation else 'dn'
elif isinstance(stmt, RegulateAmount):
dir = 'dn' if isinstance(stmt, DecreaseAmount) else 'up'
elif isinstance(stmt, Influence):
dir = 'dn' if stmt.overall_polarity() == -1 else 'up'
# Get condition and target agents
# Modification
if isinstance(stmt, Modification):
# TODO use Modification's _get_mod_condition when
# _add_modification_to_agent is refactored in INDRA
condition_entity = stmt.enz
# Add the mod for the agent
mod_condition_name = modclass_to_modtype[stmt.__class__]
if isinstance(stmt, RemoveModification):
mod_condition_name = modtype_to_inverse[
mod_condition_name]
# Add modification to substrate agent
target_entity = _add_modification_to_agent(
stmt.sub, mod_condition_name, stmt.residue, stmt.position)
# Activation/Inhibition
elif isinstance(stmt, RegulateActivity):
condition_entity = stmt.subj
# Add activity to object agent
target_entity = _add_activity_to_agent(
stmt.obj, stmt.obj_activity, stmt.is_activation)
# Increase/Decrease amount
elif isinstance(stmt, (RegulateAmount, Influence)):
condition_entity, target_entity = stmt.agent_list()
query = cls(condition_entity, target_entity, dir)
return query
def matches_key(self):
condition_key = self.condition_entity.matches_key()
target_key = self.target_entity.matches_key()
key = (condition_key, self.direction, target_key)
return str(key)
def to_json(self):
query_type = self.get_type()
json_dict = _o(type=query_type)
json_dict['condition_entity'] = self.condition_entity.to_json()
json_dict['target_entity'] = self.target_entity.to_json()
json_dict['direction'] = self.direction
return json_dict
@classmethod
def _from_json(cls, json_dict):
cond_ent_json = json_dict.get('condition_entity')
condition_entity = Agent._from_json(cond_ent_json)
target_ent_json = json_dict.get('target_entity')
target_entity = Agent._from_json(target_ent_json)
direction = json_dict.get('direction')
query = cls(condition_entity, target_entity, direction)
return query
def __str__(self):
descr = (f'SimpleInterventionPropertyQuery'
f'(condition={self.condition_entity}, '
f'target={self.target_entity}, '
f'direction={self.direction})')
return descr
def __repr__(self):
return str(self)
def to_english(self):
cond = _assemble_agent_str(self.condition_entity).agent_str
target = _assemble_agent_str(self.target_entity).agent_str
if self.direction == 'up':
dir_verb = 'increases'
else:
dir_verb = 'decreases'
return f'{cond} {dir_verb} {target}.'
class ComparativeInterventionProperty(Query):
pass
class DynamicProperty(Query):
"""This type of query requires dynamic simulation of the model to check
whether the queried temporal pattern is satisfied.
Parameters
----------
entity : indra.statements.Agent
An entity to simulate the model for.
pattern_type : str
Type of temporal pattern. Accepted values: 'always_value', 'no_change',
'eventual_value', 'sometime_value', 'sustained', 'transient'.
quant_value : str or float
Value of molecular quantity of entity of interest. Can be 'high' or
'low' or a specific number.
quant_type : str
Type of molecular quantity of entity of interest. Default: qualitative.
"""
def __init__(self, entity, pattern_type, quant_value=None,
quant_type='qualitative'):
self.entity = entity
self.pattern_type = pattern_type
self.quant_value = quant_value
self.quant_type = quant_type
def get_temporal_pattern(self, time_limit=None):
"""Return TemporalPattern object created with query properties."""
mq = None
if self.quant_value:
mq = MolecularQuantity(self.quant_type, self.quant_value)
t = None
if time_limit:
t = TimeInterval(0, time_limit, 'second')
tp = TemporalPattern(self.pattern_type, [self.entity], t, value=mq)
return tp
def matches_key(self):
ent_matches_key = self.entity.matches_key()
key = (ent_matches_key, self.pattern_type, self.quant_type,
str(self.quant_value))
return str(key)
def to_json(self):
query_type = self.get_type()
json_dict = _o(type=query_type)
json_dict['entity'] = self.entity.to_json()
json_dict['pattern_type'] = self.pattern_type
json_dict['quantity'] = {}
json_dict['quantity']['type'] = self.quant_type
json_dict['quantity']['value'] = self.quant_value
return json_dict
@classmethod
def _from_json(cls, json_dict):
ent_json = json_dict.get('entity')
entity = Agent._from_json(ent_json)
pattern_type = json_dict.get('pattern_type')
quant_json = json_dict.get('quantity')
quant_type = quant_json.get('type')
quant_value = quant_json.get('value')
query = cls(entity, pattern_type, quant_value, quant_type)
return query
def __str__(self):
descr = (f'DynamicPropertyQuery(entity={self.entity}, '
f'pattern={self.pattern_type}, '
f'molecular quantity={(self.quant_type, self.quant_value)})')
return descr
def __repr__(self):
return str(self)
def to_english(self):
agent = _assemble_agent_str(self.entity).agent_str
agent = agent[0].upper() + agent[1:]
if self.pattern_type == 'always_value':
pattern = 'always'
elif self.pattern_type == 'eventual_value':
pattern = 'eventually'
elif self.pattern_type == 'sometime_value':
pattern = 'sometimes'
elif self.pattern_type == 'no_change':
pattern = 'not changing'
else:
pattern = self.pattern_type
if self.quant_value:
return f'{agent} is {pattern} {self.quant_value}.'
return f'{agent} is {pattern}.'
class OpenSearchQuery(Query):
"""This type of query requires doing an open ended breadth-first search
to find paths satisfying the query.
Parameters
----------
entity : indra.statements.Agent
An entity to simulate the model for.
stmt_type : str
Name of statement type.
entity_role : str
What role entity should play in statement (subject or object).
terminal_ns : list[str]
Force a path to terminate when any of the namespaces in this list
are encountered and only yield paths that terminate at these
namepsaces
Attributes
----------
path_stmt : indra.statements.Statement
An INDRA statement having its subject or object set to None to
represent open search query.
"""
def __init__(self, entity, stmt_type, entity_role, terminal_ns=None):
self.entity = entity
self.stmt_type = stmt_type
self.entity_role = entity_role
self.terminal_ns = [ns.lower() for ns in terminal_ns] if terminal_ns \
else None
self.path_stmt = self.make_stmt()
def make_stmt(self):
stmt_type = self.stmt_type
if self.entity_role == 'subject':
if self.stmt_type == 'IncreaseAmount':
stmt_type = 'Activation'
elif self.stmt_type == 'DecreaseAmount':
stmt_type = 'Inhibition'
stmt_class = get_statement_by_name(stmt_type)
if self.entity_role == 'subject':
subj = self.entity
obj = None
elif self.entity_role == 'object':
subj = None
obj = self.entity
stmt = stmt_class(subj, obj)
return stmt
def get_sign(self, mc_type):
if mc_type == 'unsigned_graph' or self.entity_role == 'object':
sign = 0
elif isinstance(self.path_stmt, RegulateActivity):
sign = 0 if self.path_stmt.is_activation else 1
elif isinstance(self.path_stmt, RegulateAmount):
sign = 1 if isinstance(self.path_stmt, DecreaseAmount) else 0
else:
raise ValueError('Could not determine sign')
return sign
def matches_key(self):
key = self.entity.matches_key()
key += self.stmt_type
key += |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.