ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40efda50fa461e668e2f99e3c7d60cae963acc8 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import server_external_events \
as server_external_events_v21
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
fake_instances = {
'00000000-0000-0000-0000-000000000001': objects.Instance(
uuid='00000000-0000-0000-0000-000000000001', host='host1'),
'00000000-0000-0000-0000-000000000002': objects.Instance(
uuid='00000000-0000-0000-0000-000000000002', host='host1'),
'00000000-0000-0000-0000-000000000003': objects.Instance(
uuid='00000000-0000-0000-0000-000000000003', host='host2'),
'00000000-0000-0000-0000-000000000004': objects.Instance(
uuid='00000000-0000-0000-0000-000000000004', host=None),
}
fake_instance_uuids = sorted(fake_instances.keys())
MISSING_UUID = '00000000-0000-0000-0000-000000000005'
@classmethod
def fake_get_by_uuid(cls, context, uuid):
try:
return fake_instances[uuid]
except KeyError:
raise exception.InstanceNotFound(instance_id=uuid)
@mock.patch('nova.objects.instance.Instance.get_by_uuid', fake_get_by_uuid)
class ServerExternalEventsTestV21(test.NoDBTestCase):
server_external_events = server_external_events_v21
invalid_error = exception.ValidationError
def setUp(self):
super(ServerExternalEventsTestV21, self).setUp()
self.api = \
self.server_external_events.ServerExternalEventsController()
self.event_1 = {'name': 'network-vif-plugged',
'tag': 'foo',
'server_uuid': fake_instance_uuids[0],
'status': 'completed'}
self.event_2 = {'name': 'network-changed',
'server_uuid': fake_instance_uuids[1]}
self.default_body = {'events': [self.event_1, self.event_2]}
self.resp_event_1 = dict(self.event_1)
self.resp_event_1['code'] = 200
self.resp_event_2 = dict(self.event_2)
self.resp_event_2['code'] = 200
self.resp_event_2['status'] = 'completed'
self.default_resp_body = {'events': [self.resp_event_1,
self.resp_event_2]}
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
def _assert_call(self, body, expected_uuids, expected_events):
with mock.patch.object(self.api.compute_api,
'external_instance_event') as api_method:
response = self.api.create(self.req, body=body)
result = response.obj
code = response._code
self.assertEqual(1, api_method.call_count)
for inst in api_method.call_args_list[0][0][1]:
expected_uuids.remove(inst.uuid)
self.assertEqual([], expected_uuids)
for event in api_method.call_args_list[0][0][2]:
expected_events.remove(event.name)
self.assertEqual([], expected_events)
return result, code
def test_create(self):
result, code = self._assert_call(self.default_body,
fake_instance_uuids[:2],
['network-vif-plugged',
'network-changed'])
self.assertEqual(self.default_resp_body, result)
self.assertEqual(200, code)
def test_create_one_bad_instance(self):
body = self.default_body
body['events'][1]['server_uuid'] = MISSING_UUID
result, code = self._assert_call(body, [fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual(404, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_event_instance_has_no_host(self):
body = self.default_body
body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
# the instance without host should not be passed to the compute layer
result, code = self._assert_call(body,
[fake_instance_uuids[1]],
['network-changed'])
self.assertEqual(422, result['events'][0]['code'])
self.assertEqual('failed', result['events'][0]['status'])
self.assertEqual(200, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_no_good_instances(self):
body = self.default_body
body['events'][0]['server_uuid'] = MISSING_UUID
body['events'][1]['server_uuid'] = MISSING_UUID
self.assertRaises(webob.exc.HTTPNotFound,
self.api.create, self.req, body=body)
def test_create_bad_status(self):
body = self.default_body
body['events'][1]['status'] = 'foo'
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_extra_gorp(self):
body = self.default_body
body['events'][0]['foobar'] = 'bad stuff'
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_bad_events(self):
body = {'events': 'foo'}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_bad_body(self):
body = {'foo': 'bar'}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_unkown_events(self):
self.event_1['name'] = 'unkown_event'
body = {'events': self.event_1}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
|
py | b40efe000d15ea5abc0e8575280c8d5721c8df1c | def self_bin_serach(arr, s, e, key):
mid = int(s + (e - s) / 2)
if arr[mid] == key:
return mid
while s < e:
if arr[s] == key:
return s
if arr[e] == key:
return e
s += 1
e -= 1
return -1
arr = [33, 44, 16, 1, 14, 6, 2, 5, 7, 9, 10]
x = self_bin_serach(arr, 0, len(arr) - 1, 9)
print(x)
|
py | b40efe23053ce0ebefdd8e5c8a7e83df9323952e | #!/usr/bin/env python
"""Configuration parameters for the client."""
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib.rdfvalues import crypto
# General Client options.
config_lib.DEFINE_string("Client.name", "GRR",
"The name of the client. This will be used as a base "
"name to generate many other default parameters such "
"as binary names and service names. Note that on "
"Linux we lowercase the name to confirm with most "
"linux naming conventions.")
config_lib.DEFINE_string("Client.binary_name", "%(Client.name)",
"The name of the client binary.")
config_lib.DEFINE_list("Client.labels", [], "Labels for this client.")
config_lib.DEFINE_string("Client.company_name", "GRR Project",
"The name of the company which made the client.")
config_lib.DEFINE_string("Client.description", "%(name) %(platform) %(arch)",
"A description of this specific client build.")
config_lib.DEFINE_string("Client.platform", "windows",
"The platform we are running on.")
config_lib.DEFINE_string("Client.arch", "amd64",
"The architecture we are running on.")
config_lib.DEFINE_string("Client.build_time", "Unknown",
"The time the client was built.")
config_lib.DEFINE_string("Client.deploy_time", "Unknown",
"The time the client was deployed.")
config_lib.DEFINE_string("Client.build_environment", None,
"The output of Uname.FromCurrentSystem.signature() "
"on the system the client was built on.")
config_lib.DEFINE_integer("Client.rsa_key_length", 2048,
"The key length of the client keys in bits.")
config_lib.DEFINE_string(
name="Client.install_path",
default=r"%(SystemRoot|env)\\System32\\%(name)\\%(Template.version_string)",
help="Where the client binaries are installed.")
config_lib.DEFINE_string(
name="Client.component_path",
default=r"%(Client.install_path)/components",
help="Where the client components are installed on the client.")
config_lib.DEFINE_string(
name="Client.component_url_stem",
default="%(Frontend.static_url_path_prefix)components/",
help="A URL path where components will be served from.")
config_lib.DEFINE_semantic(
rdfvalue.RDFURN,
"Client.component_aff4_stem",
default="%(Frontend.static_aff4_prefix)/components/",
description="A common AFF4 stem where components will be served from.")
config_lib.DEFINE_string(
name="Client.rekall_profile_cache_path",
default=r"%(Client.install_path)\\rekall_profiles",
help="Where GRR stores cached Rekall profiles needed for memory analysis")
config_lib.DEFINE_list(
name="Client.server_urls", default=[], help="Base URL for client control.")
config_lib.DEFINE_list("Client.control_urls", [],
"DEPRECATED List of URLs of the controlling server. "
"Use server_urls instead.")
config_lib.DEFINE_integer("Client.http_timeout", 100,
"Timeout for HTTP requests.")
config_lib.DEFINE_string("Client.plist_path",
"/Library/LaunchDaemons/com.google.code.grrd.plist",
"Location of our launchctl plist.")
config_lib.DEFINE_string("Client.plist_filename", None,
"Filename of launchctl plist.")
config_lib.DEFINE_string("Client.plist_label", None,
"Identifier label for launchd")
config_lib.DEFINE_string("Client.plist_label_prefix", None,
"Domain for launchd label.")
config_lib.DEFINE_float("Client.poll_min", 0.2,
"Minimum time between polls in seconds.")
config_lib.DEFINE_float("Client.poll_max", 600,
"Maximum time between polls in seconds.")
config_lib.DEFINE_float("Client.error_poll_min", 60,
"Minimum time between polls in seconds if the server "
"reported an error.")
config_lib.DEFINE_float("Client.poll_slew", 1.15, "Slew of poll time.")
config_lib.DEFINE_integer("Client.connection_error_limit", 60 * 24,
"If the client encounters this many connection "
"errors, it exits and restarts. Retries are one "
"minute apart.")
config_lib.DEFINE_integer("Client.retry_error_limit", 10,
"If the client encounters this many connection "
"errors, it searches for a new proxy/server url "
"combination.")
config_lib.DEFINE_list(
name="Client.proxy_servers",
help="List of valid proxy servers the client should try.",
default=[])
config_lib.DEFINE_integer("Client.max_post_size", 40000000,
"Maximum size of the post.")
config_lib.DEFINE_integer("Client.max_out_queue", 51200000,
"Maximum size of the output queue.")
config_lib.DEFINE_integer("Client.foreman_check_frequency", 1800,
"The minimum number of seconds before checking with "
"the foreman for new work.")
config_lib.DEFINE_float("Client.rss_max", 1000,
"Maximum memory footprint in MB (soft limit). "
"Exceeding this will result in an orderly shutdown.")
config_lib.DEFINE_float("Client.rss_max_hard", 2000,
"Maximum memory footprint in MB (hard limit). "
"Exceeding this will result in aborting the current "
"client action and restarting.")
config_lib.DEFINE_string(
name="Client.tempfile_prefix",
help="Prefix to use for temp files created by the GRR client.",
default="tmp%(Client.name)")
config_lib.DEFINE_list(
name="Client.tempdir_roots",
help="List of temporary directories to use on the client.",
default=["/var/tmp/"])
config_lib.DEFINE_string(
name="Client.grr_tempdir",
help="Default subdirectory in the temp directory to use for GRR.",
default="%(Client.name)")
config_lib.DEFINE_list(
name="Client.vfs_virtualroots",
help=("If this is set for a VFS type, client VFS operations will always be"
" relative to the given root. Format is os:/mount/disk."),
default=[])
# Windows client specific options.
config_lib.DEFINE_string(
"Client.config_hive",
r"HKEY_LOCAL_MACHINE",
help="The registry hive where the client "
"configuration will be stored.")
config_lib.DEFINE_string(
"Client.config_key",
r"Software\\GRR",
help="The registry key where client configuration "
"will be stored.")
# Client Cryptographic options. Here we define defaults for key values.
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"Client.private_key",
description="Client private key in pem format. If not provided this "
"will be generated by the enrollment process.",)
config_lib.DEFINE_semantic(
crypto.RDFX509Cert,
"CA.certificate",
description="Trusted CA certificate in X509 pem format",)
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.executable_signing_public_key",
description="public key for verifying executable signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.executable_signing_private_key",
description="Private keys for signing executables. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_integer("Client.server_serial_number", 0,
"Minimal serial number we accept for server cert.")
config_lib.DEFINE_integer(
"Client.gc_frequency", 10,
"Defines how often the client calls garbage collection (seconds).")
# The following configuration options are defined here but are used in
# the windows nanny code (grr/client/nanny/windows_nanny.h).
config_lib.DEFINE_string(
"Nanny.child_binary", "GRR.exe", help="The location to the client binary.")
config_lib.DEFINE_string(
"Nanny.child_command_line",
"%(Nanny.child_binary)",
help="The command line to launch the client binary.")
config_lib.DEFINE_string("Nanny.logfile", "%(Logging.path)/nanny.log",
"The file where we write the nanny transaction log.")
config_lib.DEFINE_string(
"Nanny.service_name", "GRR Service", help="The name of the nanny.")
config_lib.DEFINE_string(
"Nanny.service_description",
"GRR Service",
help="The description of the nanny service.")
config_lib.DEFINE_string(
"Nanny.service_key",
r"%(Client.config_key)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string(
"Nanny.service_key_hive",
r"%(Client.config_hive)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.statusfile", "%(Logging.path)/nanny.status",
"The file where we write the nanny status.")
config_lib.DEFINE_string("Nanny.status", "",
"The regkey where we write the nanny status.")
config_lib.DEFINE_string(
"Nanny.binary",
r"%(Client.install_path)\\%(service_binary_name)",
help="The full location to the nanny binary.")
config_lib.DEFINE_string(
"Nanny.service_binary_name",
"%(Client.name)service.exe",
help="The executable name of the nanny binary.")
config_lib.DEFINE_integer("Nanny.unresponsive_kill_period", 60,
"The time in seconds after which the nanny kills us.")
config_lib.DEFINE_integer("Network.api", 3,
"The version of the network protocol the client "
"uses.")
config_lib.DEFINE_string(
"Network.compression",
default="ZCOMPRESS",
help="Type of compression (ZCOMPRESS, UNCOMPRESSED)")
# Installer options.
config_lib.DEFINE_string(
name="Installer.logfile",
default="%(Logging.path)/%(Client.name)_installer.txt",
help=("A specific log file which is used for logging the "
"installation process."))
config_lib.DEFINE_list("Installer.old_key_map", [
"HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate->Client.private_key",
"HKEY_LOCAL_MACHINE\\Software\\GRR\\server_serial_number"
"->Client.server_serial_number",
], """
A mapping of old registry values which will be copied to new values. The old
value location must start with a valid hive name, followed by a key name, and
end with the value name. The source location must be separated from the new
parameter name by a -> symbol.
This setting allows to carry over settings from obsolete client installations to
newer versions of the client which may store the same information in other
locations.
For example:
HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate -> Client.private_key
""")
|
py | b40efef25c9e1b117f533c6d89206fc90ce6bb1b | """[Practice: Ice Cream]
Class
keyword class name
class Ice:
Instantiation
variable class
name name
("instance")
ice = Ice()
Method an action or behavior ==== to add a method, I simply define a function inside the class
| method name is (eat)
def eat(self):
print("hi") # this line is the method content
Dot Expression # To test the method
instance method name
IceCream . eat ()
Assigning an attribute is very similar to defining a variable
self attaches the attribute attribute value
self.cubes = 3
"""
class IceCream:
def __init__(self):
print("Created ice cream")
def eat(self, scoops):
if self.scoops < scoops:
print("Not enough bites left!")
self.scoops -= scoops
def add(self, scoops):
self.scoops += scoops
IceCream.eat() # Traceback (most recent call last):
# File "/home/rich/Desktop/CarlsHub/Coding101-OOP/Classiles/ice_cream.py", line 37, in <module>
# IceCream.eat()
# TypeError: eat() missing 2 required positional arguments: 'self' and 'scoops'
|
py | b40eff231ad5ba756a33239af1e202f49f04c95c | from minicps.devices import PLC
from utils import *
import logging
import time
SENSOR_ADDR = IP['lit103']
LIT103 = ('LIT103', 1)
class Lit103(PLC):
def pre_loop(self, sleep=0.1):
logging.basicConfig(filename=LOG_LIT103_FILE, level=logging.DEBUG)
time.sleep(sleep)
def main_loop(self):
print 'DEBUG: sensor enters main_loop'
count = 0
while count<=PLC_SAMPLES:
self.level = float(self.get(LIT103))
logging.debug('LIT103 level %s', self.level)
self.send(LIT103, self.level, IP['lit103'])
time.sleep(PLC_PERIOD_SEC)
if __name__ == '__main__':
lit103 = Lit103(name='lit103',state=STATE,protocol=LIT103_PROTOCOL,memory=GENERIC_DATA,disk=GENERIC_DATA)
|
py | b40f00901ccd6c153bd0750caa483f791c9e5fe7 | """
byceps.services.webhooks.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from typing import Any, Optional
from ...database import db
from .dbmodels import OutgoingWebhook as DbOutgoingWebhook
from .transfer.models import EventFilters, OutgoingWebhook, WebhookID
def create_outgoing_webhook(
event_types: set[str],
event_filters: EventFilters,
format: str,
url: str,
enabled: bool,
*,
text_prefix: Optional[str] = None,
extra_fields: Optional[dict[str, Any]] = None,
description: Optional[str] = None,
) -> OutgoingWebhook:
"""Create an outgoing webhook."""
webhook = DbOutgoingWebhook(
event_types,
event_filters,
format,
url,
enabled,
text_prefix=text_prefix,
extra_fields=extra_fields,
description=description,
)
db.session.add(webhook)
db.session.commit()
return _db_entity_to_outgoing_webhook(webhook)
def update_outgoing_webhook(
webhook_id: WebhookID,
event_types: set[str],
event_filters: EventFilters,
format: str,
text_prefix: Optional[str],
extra_fields: Optional[dict[str, Any]],
url: str,
description: Optional[str],
enabled: bool,
) -> OutgoingWebhook:
"""Update an outgoing webhook."""
webhook = _find_db_webhook(webhook_id)
if webhook is None:
raise ValueError(f'Unknown webhook ID "{webhook_id}"')
webhook.event_types = event_types
webhook.event_filters = event_filters
webhook.format = format
webhook.text_prefix = text_prefix
webhook.extra_fields = extra_fields
webhook.url = url
webhook.description = description
webhook.enabled = enabled
db.session.commit()
return _db_entity_to_outgoing_webhook(webhook)
def delete_outgoing_webhook(webhook_id: WebhookID) -> None:
"""Delete the outgoing webhook."""
db.session.query(DbOutgoingWebhook) \
.filter_by(id=webhook_id) \
.delete()
db.session.commit()
def find_webhook(webhook_id: WebhookID) -> Optional[OutgoingWebhook]:
"""Return the webhook with that ID, if found."""
webhook = _find_db_webhook(webhook_id)
if webhook is None:
return None
return _db_entity_to_outgoing_webhook(webhook)
def _find_db_webhook(webhook_id: WebhookID) -> Optional[DbOutgoingWebhook]:
"""Return the webhook database entity with that ID, if found."""
return db.session.query(DbOutgoingWebhook).get(webhook_id)
def get_all_webhooks() -> list[OutgoingWebhook]:
"""Return all webhooks."""
webhooks = db.session.query(DbOutgoingWebhook).all()
return [_db_entity_to_outgoing_webhook(webhook) for webhook in webhooks]
def get_enabled_outgoing_webhooks(event_type: str) -> list[OutgoingWebhook]:
"""Return the configurations for enabled outgoing webhooks for that
event type.
"""
webhooks = db.session.query(DbOutgoingWebhook) \
.filter(DbOutgoingWebhook._event_types.contains([event_type])) \
.filter_by(enabled=True) \
.all()
return [_db_entity_to_outgoing_webhook(webhook) for webhook in webhooks]
def _db_entity_to_outgoing_webhook(
webhook: DbOutgoingWebhook,
) -> OutgoingWebhook:
event_filters = (
dict(webhook.event_filters)
if (webhook.event_filters is not None)
else {}
)
extra_fields = (
dict(webhook.extra_fields) if (webhook.extra_fields is not None) else {}
)
return OutgoingWebhook(
id=webhook.id,
event_types=webhook.event_types,
event_filters=event_filters,
format=webhook.format,
text_prefix=webhook.text_prefix,
extra_fields=extra_fields,
url=webhook.url,
description=webhook.description,
enabled=webhook.enabled,
)
|
py | b40f0111eb5c01d66c277188f7ceaa58b487cb67 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
from msrest.exceptions import ValidationError
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import get_subscription_id
from ._client_factory import cf_acr_tokens, cf_acr_scope_maps
from ._utils import (
build_token_id,
create_default_scope_map,
get_registry_by_name,
get_scope_map_from_id,
get_token_from_id,
parse_scope_map_actions,
user_confirmation,
validate_managed_registry
)
class ConnectedRegistryModes(Enum):
MIRROR = 'Mirror'
REGISTRY = 'Registry'
class ConnectedRegistryActivationStatus(Enum):
ACTIVE = 'Active'
INACTIVE = 'Inactive'
DEFAULT_GATEWAY_SCOPE = ['config/read', 'config/write', 'message/read', 'message/write']
REPO_SCOPES_BY_MODE = {
ConnectedRegistryModes.MIRROR.value: ['content/read', 'metadata/read'],
ConnectedRegistryModes.REGISTRY.value: ['content/read', 'content/write', 'content/delete',
'metadata/read', 'metadata/write']
}
SYNC_SCOPE_MAP_NAME = "{}-sync-scope-map"
SYNC_TOKEN_NAME = "{}-sync-token"
REPOSITORY = "repositories/"
GATEWAY = "gateway/"
logger = get_logger(__name__)
def acr_connected_registry_create(cmd, # pylint: disable=too-many-locals, too-many-statements
client,
registry_name,
connected_registry_name,
repositories=None,
sync_token_name=None,
client_token_list=None,
resource_group_name=None,
mode=None,
parent_name=None,
sync_schedule=None,
sync_message_ttl=None,
sync_window=None,
log_level=None,
sync_audit_logs_enabled=False):
if bool(sync_token_name) == bool(repositories):
raise CLIError("usage error: you need to provide either --sync-token-name or --repository, but not both.")
registry, resource_group_name = get_registry_by_name(cmd.cli_ctx, registry_name, resource_group_name)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not registry.data_endpoint_enabled:
raise CLIError("Can't create the connected registry '{}' ".format(connected_registry_name) +
"because the cloud registry '{}' data endpoint is disabled. ".format(registry_name) +
"Enabling the data endpoint might affect your firewall rules.\nTo enable data endpoint run:" +
"\n\taz acr update -n {} --data-endpoint-enabled true".format(registry_name))
ErrorResponseException = cmd.get_models('ErrorResponseException')
parent = None
mode = mode.capitalize()
if parent_name:
try:
parent = acr_connected_registry_show(cmd, client, parent_name, registry_name, resource_group_name)
connected_registry_list = list(client.list(resource_group_name, registry_name))
family_tree, _ = _get_family_tree(connected_registry_list, None)
except ErrorResponseException as ex:
if ex.response.status_code == 404:
raise CLIError("The parent connected registry '{}' could not be found.".format(parent_name))
raise CLIError(ex)
if parent.mode != ConnectedRegistryModes.REGISTRY.value and parent.mode != mode:
raise CLIError("Can't create the registry '{}' with mode '{}' ".format(connected_registry_name, mode) +
"when the connected registry parent '{}' mode is '{}'. ".format(parent_name, parent.mode) +
"For more information on connected registries " +
"please visit https://aka.ms/acr/connected-registry.")
_update_ancestor_permissions(cmd, family_tree, resource_group_name, registry_name, parent.id,
connected_registry_name, repositories, mode, False)
if sync_token_name:
sync_token_id = build_token_id(subscription_id, resource_group_name, registry_name, sync_token_name)
else:
sync_token_id = _create_sync_token(cmd, resource_group_name, registry_name,
connected_registry_name, repositories, mode)
if client_token_list is not None:
for i, client_token_name in enumerate(client_token_list):
client_token_list[i] = build_token_id(
subscription_id, resource_group_name, registry_name, client_token_name)
ConnectedRegistry, LoggingProperties, SyncProperties, ParentProperties = cmd.get_models(
'ConnectedRegistry', 'LoggingProperties', 'SyncProperties', 'ParentProperties')
connected_registry_create_parameters = ConnectedRegistry(
provisioning_state=None,
mode=mode,
parent=ParentProperties(
id=parent.id if parent else None,
sync_properties=SyncProperties(
token_id=sync_token_id,
schedule=sync_schedule,
message_ttl=sync_message_ttl,
sync_window=sync_window
)
),
client_token_ids=client_token_list,
logging=LoggingProperties(
log_level=log_level,
audit_log_status='Enabled' if sync_audit_logs_enabled else 'Disabled'
)
)
try:
return client.create(subscription_id=subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
connected_registry_name=connected_registry_name,
connected_registry_create_parameters=connected_registry_create_parameters)
except ValidationError as e:
raise CLIError(e)
def acr_connected_registry_update(cmd, # pylint: disable=too-many-locals, too-many-statements
client,
registry_name,
connected_registry_name,
add_client_token_list=None,
remove_client_token_list=None,
resource_group_name=None,
sync_schedule=None,
sync_window=None,
log_level=None,
sync_message_ttl=None,
sync_audit_logs_enabled=None):
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name)
subscription_id = get_subscription_id(cmd.cli_ctx)
current_connected_registry = acr_connected_registry_show(
cmd, client, connected_registry_name, registry_name, resource_group_name)
# Add or remove from the current client token id list
if add_client_token_list is not None:
for i, client_token_name in enumerate(add_client_token_list):
add_client_token_list[i] = build_token_id(
subscription_id, resource_group_name, registry_name, client_token_name)
add_client_token_set = set(add_client_token_list)
else:
add_client_token_set = set()
if remove_client_token_list is not None:
for i, client_token_name in enumerate(remove_client_token_list):
remove_client_token_list[i] = build_token_id(
subscription_id, resource_group_name, registry_name, client_token_name)
remove_client_token_set = set(remove_client_token_list)
else:
remove_client_token_set = set()
duplicate_client_token = set.intersection(add_client_token_set, remove_client_token_set)
if duplicate_client_token:
errors = sorted(map(lambda action: action[action.rfind('/') + 1:], duplicate_client_token))
raise CLIError(
'Update ambiguity. Duplicate client token ids were provided with ' +
'--add-client-tokens and --remove-client-tokens arguments.\n{}'.format(errors))
current_client_token_set = set(current_connected_registry.client_token_ids) \
if current_connected_registry.client_token_ids else set()
client_token_set = current_client_token_set.union(add_client_token_set).difference(remove_client_token_set)
client_token_list = list(client_token_set) if client_token_set != current_client_token_set else None
ConnectedRegistryUpdateParameters, SyncUpdateProperties, LoggingProperties = cmd.get_models(
'ConnectedRegistryUpdateParameters', 'SyncUpdateProperties', 'LoggingProperties')
connected_registry_update_parameters = ConnectedRegistryUpdateParameters(
sync_properties=SyncUpdateProperties(
token_id=current_connected_registry.parent.sync_properties.token_id,
schedule=sync_schedule,
message_ttl=sync_message_ttl,
sync_window=sync_window
),
logging=LoggingProperties(
log_level=log_level,
audit_log_status=sync_audit_logs_enabled
),
client_token_ids=client_token_list
)
try:
return client.update(resource_group_name=resource_group_name,
registry_name=registry_name,
connected_registry_name=connected_registry_name,
connected_registry_update_parameters=connected_registry_update_parameters)
except ValidationError as e:
raise CLIError(e)
def acr_connected_registry_delete(cmd,
client,
connected_registry_name,
registry_name,
cleanup=False,
yes=False,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name)
user_confirmation("Are you sure you want to delete the connected registry '{}' in '{}'?".format(
connected_registry_name, registry_name), yes)
try:
connected_registry = acr_connected_registry_show(
cmd, client, connected_registry_name, registry_name, resource_group_name)
result = client.delete(resource_group_name, registry_name, connected_registry_name)
sync_token = get_token_from_id(cmd, connected_registry.parent.sync_properties.token_id)
sync_token_name = sync_token.name
sync_scope_map_name = sync_token.scope_map_id.split('/scopeMaps/')[1]
if cleanup:
from .token import acr_token_delete
from .scope_map import acr_scope_map_delete
token_client = cf_acr_tokens(cmd.cli_ctx)
scope_map_client = cf_acr_scope_maps(cmd.cli_ctx)
# Delete target sync scope map and token.
acr_token_delete(cmd, token_client, registry_name, sync_token_name, yes, resource_group_name)
acr_scope_map_delete(cmd, scope_map_client, registry_name, sync_scope_map_name, yes, resource_group_name)
# Cleanup gateway permissions from ancestors
connected_registry_list = list(client.list(resource_group_name, registry_name))
family_tree, _ = _get_family_tree(connected_registry_list, None)
_update_ancestor_permissions(cmd, family_tree, resource_group_name, registry_name,
connected_registry.parent.id, connected_registry_name, remove_access=True)
else:
msg = "Connected registry successfully deleted. Please cleanup your sync tokens and scope maps. " + \
"Run the following commands for cleanup: \n\t" + \
"az acr token delete -n {} -r {} --yes\n\t".format(sync_token_name, registry_name) + \
"az acr scope-map delete -n {} -r {} --yes\n".format(sync_scope_map_name, registry_name) + \
"Run the following command on all ascendency to remove the deleted registry gateway access: \n\t" + \
"az acr scope-map update -n <scope-map-name> -r {} --remove-gateway {} --yes".format(
registry_name, " ".join([connected_registry_name] + DEFAULT_GATEWAY_SCOPE))
logger.warning(msg)
return result
except ValidationError as e:
raise CLIError(e)
def acr_connected_registry_deactivate(cmd,
client,
connected_registry_name,
registry_name,
yes=False,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name)
subscription_id = get_subscription_id(cmd.cli_ctx)
user_confirmation("Are you sure you want to deactivate the connected registry '{}' in '{}'?".format(
connected_registry_name, registry_name), yes)
return client.deactivate(subscription_id=subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
connected_registry_name=connected_registry_name)
def acr_connected_registry_list(cmd,
client,
registry_name,
parent_name=None,
no_children=False,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name)
connected_registry_list = list(client.list(resource_group_name, registry_name))
result = []
if no_children:
if parent_name:
result = [registry for registry in connected_registry_list
if registry.parent.id is not None and registry.parent.id.endswith(parent_name)]
else:
result = [registry for registry in connected_registry_list if not registry.parent.id]
elif parent_name:
family_tree, parent = _get_family_tree(connected_registry_list, parent_name)
if parent is None:
raise CLIError("Parent connected registry '{}' doesn't exist.".format(parent_name))
result = _get_descendants(family_tree, parent.id)
else:
result = connected_registry_list
return result
def acr_connected_registry_show(cmd,
client,
connected_registry_name,
registry_name,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name)
return client.get(resource_group_name, registry_name, connected_registry_name)
def acr_connected_registry_list_client_tokens(cmd,
client,
connected_registry_name,
registry_name,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name)
current_connected_registry = acr_connected_registry_show(
cmd, client, connected_registry_name, registry_name, resource_group_name)
result = []
if current_connected_registry.client_token_ids is None:
return result
for token_id in current_connected_registry.client_token_ids:
token = get_token_from_id(cmd, token_id)
result.append(token)
return result
def _create_sync_token(cmd,
resource_group_name,
registry_name,
connected_registry_name,
repositories,
mode):
token_client = cf_acr_tokens(cmd.cli_ctx)
mode = mode.capitalize()
if not any(option for option in ConnectedRegistryModes if option.value == mode):
raise CLIError("usage error: --mode supports only 'registry' and 'mirror' values.")
repository_actions_list = [[repo] + REPO_SCOPES_BY_MODE[mode] for repo in repositories]
gateway_actions_list = [[connected_registry_name.lower()] + DEFAULT_GATEWAY_SCOPE]
try:
message = "Created by connected registry sync token: {}"
sync_scope_map_name = SYNC_SCOPE_MAP_NAME.format(connected_registry_name)
logger.warning("If sync scope map '%s' already exists, its actions will be overwritten", sync_scope_map_name)
sync_scope_map = create_default_scope_map(cmd, resource_group_name, registry_name, sync_scope_map_name,
repository_actions_list, gateway_actions_list,
scope_map_description=message.format(connected_registry_name),
force=True)
sync_token_name = SYNC_TOKEN_NAME.format(connected_registry_name)
logger.warning("If sync token '%s' already exists, it properties will be overwritten", sync_token_name)
Token = cmd.get_models('Token')
poller = token_client.create(
resource_group_name,
registry_name,
sync_token_name,
Token(
scope_map_id=sync_scope_map.id,
status="enabled"
)
)
token = LongRunningOperation(cmd.cli_ctx)(poller)
return token.id
except ValidationError as e:
raise CLIError(e)
def _get_family_tree(connected_registry_list, target_connected_registry_name):
family_tree = {}
targetConnectedRegistry = None
# Populate the dictionary
for ConnectedRegistry in connected_registry_list:
family_tree[ConnectedRegistry.id] = {
"connectedRegistry": ConnectedRegistry,
"children": []
}
if ConnectedRegistry.name == target_connected_registry_name:
targetConnectedRegistry = ConnectedRegistry
# Populate Children dependencies
for ConnectedRegistry in connected_registry_list:
parent_id = ConnectedRegistry.parent.id
if parent_id and not parent_id.isspace():
family_tree[parent_id]["children"].append(ConnectedRegistry.id)
return family_tree, targetConnectedRegistry
def _get_descendants(family_tree, parent_id):
children = family_tree[parent_id]['children']
result = []
for child_id in children:
result = [family_tree[child_id]["connectedRegistry"]]
descendants = _get_descendants(family_tree, child_id)
if descendants:
result.extend(descendants)
return result
# region connected-registry install subgroup
def acr_connected_registry_install_info(cmd,
client,
connected_registry_name,
registry_name,
resource_group_name=None):
return _get_install_info(cmd, client, connected_registry_name, registry_name, False, resource_group_name)
def acr_connected_registry_install_renew_credentials(cmd,
client,
connected_registry_name,
registry_name,
resource_group_name=None):
return _get_install_info(cmd, client, connected_registry_name, registry_name, True, resource_group_name)
def _get_install_info(cmd,
client,
connected_registry_name,
registry_name,
regenerate_credentials,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name)
connected_registry = acr_connected_registry_show(
cmd, client, connected_registry_name, registry_name, resource_group_name)
parent_gateway_endpoint = connected_registry.parent.sync_properties.gateway_endpoint
if parent_gateway_endpoint is None or parent_gateway_endpoint == '':
parent_gateway_endpoint = "<parent gateway endpoint>"
parent_id = connected_registry.parent.id
# if parent_id is not none, parent is a connected registry
if parent_id:
parent_endpoint_protocol = "<http or https>"
# if parent_id is none, parent is a cloud registry
else:
parent_endpoint_protocol = "https"
sync_token_name = connected_registry.parent.sync_properties.token_id.split('/tokens/')[1]
connected_registry_login_server = "<Optional: connected registry login server. " + \
"More info at https://aka.ms/acr/connected-registry>"
if regenerate_credentials:
from ._client_factory import cf_acr_token_credentials
from .token import acr_token_credential_generate
cred_client = cf_acr_token_credentials(cmd.cli_ctx)
poller = acr_token_credential_generate(
cmd, cred_client, registry_name, sync_token_name,
password1=True, password2=False, resource_group_name=resource_group_name)
credentials = LongRunningOperation(cmd.cli_ctx)(poller)
sync_username = credentials.username
sync_password = credentials.passwords[0].value
logger.warning('Please store your generated credentials safely.')
else:
sync_username = sync_token_name
sync_password = "<sync token password>"
connection_string = "ConnectedRegistryName=%s;" % connected_registry_name + \
"SyncTokenName=%s;SyncTokenPassword=%s;" % (sync_username, sync_password) + \
"ParentGatewayEndpoint=%s;ParentEndpointProtocol=%s" % (parent_gateway_endpoint, parent_endpoint_protocol)
return {
"ACR_REGISTRY_CONNECTION_STRING": connection_string,
"ACR_REGISTRY_LOGIN_SERVER": connected_registry_login_server
}
# endregion
def _update_ancestor_permissions(cmd,
family_tree,
resource_group_name,
registry_name,
parent_id,
gateway,
repositories=None,
mode=None,
remove_access=False):
gateway_actions_list = [[gateway.lower()] + DEFAULT_GATEWAY_SCOPE]
if repositories is not None:
repository_actions_list = [[repo] + REPO_SCOPES_BY_MODE[mode] for repo in repositories]
repo_msg = ", ".join(repositories)
repo_msg = " and repo(s) '{}' {} permissions".format(repo_msg, mode)
if remove_access:
action_txt = "Removing"
add_actions_set = set()
remove_actions_set = set(parse_scope_map_actions(gateway_actions_list=gateway_actions_list))
else:
action_txt = "Adding"
add_actions_set = set(parse_scope_map_actions(repository_actions_list, gateway_actions_list))
remove_actions_set = set()
while parent_id and not parent_id.isspace():
ancestor = family_tree[parent_id]["connectedRegistry"]
msg = "{} '{}' gateway permissions{} to connected registry '{}' sync scope map.".format(
action_txt, gateway, repo_msg, ancestor.name)
_update_repo_permissions(cmd, resource_group_name, registry_name,
ancestor, add_actions_set, remove_actions_set, msg=msg)
parent_id = ancestor.parent.id
# region connected-registry repo update
def _update_repo_permissions(cmd,
resource_group_name,
registry_name,
connected_registry,
add_actions_set,
remove_actions_set,
msg=None,
description=None):
scope_map_client = cf_acr_scope_maps(cmd.cli_ctx)
sync_token = get_token_from_id(cmd, connected_registry.parent.sync_properties.token_id)
sync_scope_map = get_scope_map_from_id(cmd, sync_token.scope_map_id)
sync_scope_map_name = sync_scope_map.name
current_actions_set = set(sync_scope_map.actions)
final_actions_set = current_actions_set.union(add_actions_set).difference(remove_actions_set)
if final_actions_set == current_actions_set:
return None
current_actions = list(final_actions_set)
logger.warning(msg)
return scope_map_client.update(
resource_group_name,
registry_name,
sync_scope_map_name,
description,
current_actions
)
def _get_scope_map_actions_set(repos, actions):
for i, repo_name in enumerate(repos):
repos[i] = [repo_name] + actions
return set(parse_scope_map_actions(repos))
def acr_connected_registry_repo(cmd,
client,
connected_registry_name,
registry_name,
add_repos=None,
remove_repos=None,
resource_group_name=None):
if not (add_repos or remove_repos):
raise CLIError('No repository permissions to update.')
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name)
add_repos_set = set(add_repos) if add_repos is not None else set()
remove_repos_set = set(remove_repos) if remove_repos is not None else set()
duplicate_repos = set.intersection(add_repos_set, remove_repos_set)
if duplicate_repos:
errors = sorted(map(lambda action: action[action.rfind('/') + 1:], duplicate_repos))
raise CLIError(
'Update ambiguity. Duplicate repository names were provided with ' +
'--add and --remove arguments.\n{}'.format(errors))
connected_registry_list = list(client.list(resource_group_name, registry_name))
family_tree, target_connected_registry = _get_family_tree(connected_registry_list, connected_registry_name)
if target_connected_registry is None:
raise CLIError("Connected registry '{}' doesn't exist.".format(connected_registry_name))
# remove repo permissions from connected registry descendants.
remove_actions = REPO_SCOPES_BY_MODE[ConnectedRegistryModes.REGISTRY.value]
if remove_repos is not None:
remove_repos_txt = ", ".join(remove_repos)
remove_repos_set = _get_scope_map_actions_set(remove_repos, remove_actions)
descendants = _get_descendants(family_tree, target_connected_registry.id)
for connected_registry in descendants:
msg = "Removing '{}' permissions from {}".format(remove_repos_txt, connected_registry.name)
_update_repo_permissions(cmd, resource_group_name, registry_name,
connected_registry, set(), remove_repos_set, msg=msg)
else:
remove_repos_set = set()
# add repo permissions to ancestors.
add_actions = REPO_SCOPES_BY_MODE[target_connected_registry.mode]
if add_repos is not None:
add_repos_txt = ", ".join(add_repos)
add_repos_set = _get_scope_map_actions_set(add_repos, add_actions)
parent_id = target_connected_registry.parent.id
while parent_id and not parent_id.isspace():
connected_registry = family_tree[parent_id]["connectedRegistry"]
msg = "Adding '{}' permissions to {}".format(add_repos_txt, connected_registry.name)
_update_repo_permissions(cmd, resource_group_name, registry_name,
connected_registry, add_repos_set, set(), msg=msg)
parent_id = connected_registry.parent.id
else:
add_repos_set = set()
# update target connected registry repo permissions.
if add_repos and remove_repos:
msg = "Adding '{}' and removing '{}' permissions in {}".format(
add_repos_txt, remove_repos_txt, target_connected_registry.name)
elif add_repos:
msg = "Adding '{}' permissions to {}".format(add_repos_txt, target_connected_registry.name)
else:
msg = "Removing '{}' permissions from {}".format(remove_repos_txt, target_connected_registry.name)
_update_repo_permissions(cmd, resource_group_name, registry_name,
target_connected_registry, add_repos_set, remove_repos_set, msg=msg)
# endregion
|
py | b40f0141a37fc0f0872f8da04900f87ea24025f7 | """Bridges between the `asyncio` module and Tornado IOLoop.
This is a work in progress and interfaces are subject to change.
To test:
python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOLoop
python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOMainLoop
(the tests log a few warnings with AsyncIOMainLoop because they leave some
unfinished callbacks on the event loop that fail when it resumes)
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import functools
from tornado.ioloop import IOLoop
from tornado import stack_context
from tornado.util import timedelta_to_seconds
try:
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False):
self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
self.asyncio_loop.call_soon(self.make_current)
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {}
# Set of fds listening for reads/writes
self.readers = set()
self.writers = set()
self.closing = False
def close(self, all_fds=False):
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
if self.close_loop:
self.asyncio_loop.close()
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
if fd in self.handlers:
raise ValueError("fd %s added twice" % fd)
self.handlers[fd] = (fileobj, stack_context.wrap(handler))
if events & IOLoop.READ:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
if events & IOLoop.WRITE:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & IOLoop.READ:
if fd not in self.readers:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
else:
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if events & IOLoop.WRITE:
if fd not in self.writers:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
else:
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.handlers:
return
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
del self.handlers[fd]
def _handle_events(self, fd, events):
fileobj, handler_func = self.handlers[fd]
handler_func(fileobj, events)
def start(self):
self._setup_logging()
self.asyncio_loop.run_forever()
def stop(self):
self.asyncio_loop.stop()
def call_at(self, when, callback, *args, **kwargs):
# asyncio.call_at supports *args but not **kwargs, so bind them here.
# We do not synchronize self.time and asyncio_loop.time, so
# convert from absolute to relative.
return self.asyncio_loop.call_later(
max(0, when - self.time()), self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
if self.closing:
raise RuntimeError("IOLoop is closing")
self.asyncio_loop.call_soon_threadsafe(
self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
add_callback_from_signal = add_callback
class AsyncIOMainLoop(BaseAsyncIOLoop):
def initialize(self):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False)
class AsyncIOLoop(BaseAsyncIOLoop):
def initialize(self):
super(AsyncIOLoop, self).initialize(asyncio.new_event_loop(),
close_loop=True)
|
py | b40f027e5139f0f80176bc5e8c97052b7304ef2d | from django.contrib import admin
from ledger.accounts.models import EmailUser
from commercialoperator.components.organisations import models
from django.contrib.admin import actions
# Register your models here.
@admin.register(models.Organisation)
class OrganisationAdmin(admin.ModelAdmin):
list_display = ['organisation','admin_pin_one', 'admin_pin_two', 'user_pin_one', 'user_pin_two']
search_fields = ('organisation__name','admin_pin_one', 'admin_pin_two', 'user_pin_one', 'user_pin_two' )
@admin.register(models.OrganisationRequest)
class OrganisationRequestAdmin(admin.ModelAdmin):
list_display = ['name','requester', 'abn', 'status']
@admin.register(models.OrganisationAccessGroup)
class OrganisationAccessGroupAdmin(admin.ModelAdmin):
filter_horizontal = ('members',)
exclude = ('site',)
actions = None
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == "members":
#kwargs["queryset"] = EmailUser.objects.filter(email__icontains='@dbca.wa.gov.au')
kwargs["queryset"] = EmailUser.objects.filter(is_staff=True)
return super(OrganisationAccessGroupAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def has_add_permission(self, request):
return True if models.OrganisationAccessGroup.objects.count() == 0 else False
def has_delete_permission(self, request, obj=None):
return False
|
py | b40f03077d0dce14fe60a123069a423847436f50 | """
Run Flask dev server.
Allows us to run with `python3 -m nunaserver`.
"""
from nunaserver.server import app
if __name__ == "__main__":
app.run(port=5000)
|
py | b40f031b1f57065fe81730eba5595e6b1ed896ce | import base64
import errno
import os
import sys
from flask import (Flask, abort, make_response, redirect, render_template,
request)
from flask_wtf import Form
from wtforms import HiddenField, StringField, PasswordField
from wtforms.validators import DataRequired
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
AUTH_PORT = 8000
if app.debug is True:
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
else:
try:
secret_key_path = os.path.join(THIS_DIR, 'secret_key')
app.secret_key = open(secret_key_path, 'rb').read()
except IOError as exc:
if errno.ENOENT == exc.errno:
print('authenticator.py cannot find {}.'.format(secret_key_path))
print('Create it with \npython -c '
"'import os; print(os.urandom(32))' > {}".format(secret_key_path))
sys.exit(1)
raise exc
class LoginForm(Form):
login = StringField('Login', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
target = HiddenField('Target', validators=[DataRequired()])
def EncodeToken(user, password):
return base64.b64encode(user + ':' + password)
def DecodeToken(token):
auth_decoded = base64.b64decode(token)
user, password = auth_decoded.split(':', 2)
return user, password
def ValidUser(user, password):
if user == 'admin':
enc = EncodeToken(user, password)
return enc
@app.route('/', methods=['GET'])
def authenticate():
token = request.cookies.get('token')
if token is None:
abort(401)
username, password = DecodeToken(token)
if ValidUser(username, password) is not None:
# Add headers to be authenticated with services
resp = make_response()
resp.headers['REMOTE_USER'] = username
resp.headers['X-WEBAUTH-USER'] = username
return resp
abort(401)
@app.route('/login', methods=['GET', 'POST'])
def login():
target = request.headers.get('X-Original-URI', '/')
form = LoginForm(target=target)
if form.validate_on_submit():
username = form.login.data
password = form.password.data
target = form.target.data
auth_token = ValidUser(username, password)
if auth_token:
resp = make_response(redirect(target))
secure = True if app.debug is False else False
# Secure limits cookies to HTTPS traffic only.
# HttpOnly prevents JavaScript from reading the cookie
resp.set_cookie('token', auth_token,
secure=secure,
httponly=True,
)
# Set headers that will be received by the service for this request
resp.headers['REMOTE_USER'] = username
resp.headers['X-WEBAUTH-USER'] = username
resp.headers['X-Forwarded-User'] = username
return resp
return render_template('login.html', form=form)
if __name__ == '__main__':
app.run(port=AUTH_PORT)
|
py | b40f032b4bb48607e3ba24fc528c869c62ecbed1 | """Custom docutils writer for Texinfo."""
import re
import textwrap
import warnings
from os import path
from typing import (TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Optional, Pattern, Set,
Tuple, Union, cast)
from docutils import nodes, writers
from docutils.nodes import Element, Node, Text
from sphinx import __display_version__, addnodes
from sphinx.deprecation import RemovedInSphinx50Warning
from sphinx.domains import IndexEntry
from sphinx.domains.index import IndexDomain
from sphinx.errors import ExtensionError
from sphinx.locale import _, __, admonitionlabels
from sphinx.util import logging
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.i18n import format_date
from sphinx.writers.latex import collected_footnote
if TYPE_CHECKING:
from sphinx.builders.texinfo import TexinfoBuilder
logger = logging.getLogger(__name__)
COPYING = """\
@quotation
%(project)s %(release)s, %(date)s
%(author)s
Copyright @copyright{} %(copyright)s
@end quotation
"""
TEMPLATE = """\
\\input texinfo @c -*-texinfo-*-
@c %%**start of header
@setfilename %(filename)s
@documentencoding UTF-8
@ifinfo
@*Generated by Sphinx """ + __display_version__ + """.@*
@end ifinfo
@settitle %(title)s
@defindex ge
@paragraphindent %(paragraphindent)s
@exampleindent %(exampleindent)s
@finalout
%(direntry)s
@definfoenclose strong,`,'
@definfoenclose emph,`,'
@c %%**end of header
@copying
%(copying)s
@end copying
@titlepage
@title %(title)s
@insertcopying
@end titlepage
@contents
@c %%** start of user preamble
%(preamble)s
@c %%** end of user preamble
@ifnottex
@node Top
@top %(title)s
@insertcopying
@end ifnottex
@c %%**start of body
%(body)s
@c %%**end of body
@bye
"""
def find_subsections(section: Element) -> List[nodes.section]:
"""Return a list of subsections for the given ``section``."""
result = []
for child in section:
if isinstance(child, nodes.section):
result.append(child)
continue
elif isinstance(child, nodes.Element):
result.extend(find_subsections(child))
return result
def smart_capwords(s: str, sep: str = None) -> str:
"""Like string.capwords() but does not capitalize words that already
contain a capital letter."""
words = s.split(sep)
for i, word in enumerate(words):
if all(x.islower() for x in word):
words[i] = word.capitalize()
return (sep or ' ').join(words)
class TexinfoWriter(writers.Writer):
"""Texinfo writer for generating Texinfo documents."""
supported = ('texinfo', 'texi')
settings_spec: Tuple[str, Any, Tuple[Tuple[str, List[str], Dict[str, str]], ...]] = (
'Texinfo Specific Options', None, (
("Name of the Info file", ['--texinfo-filename'], {'default': ''}),
('Dir entry', ['--texinfo-dir-entry'], {'default': ''}),
('Description', ['--texinfo-dir-description'], {'default': ''}),
('Category', ['--texinfo-dir-category'], {'default':
'Miscellaneous'})))
settings_defaults: Dict = {}
output: str = None
visitor_attributes = ('output', 'fragment')
def __init__(self, builder: "TexinfoBuilder") -> None:
super().__init__()
self.builder = builder
def translate(self) -> None:
visitor = self.builder.create_translator(self.document, self.builder)
self.visitor = cast(TexinfoTranslator, visitor)
self.document.walkabout(visitor)
self.visitor.finish()
for attr in self.visitor_attributes:
setattr(self, attr, getattr(self.visitor, attr))
class TexinfoTranslator(SphinxTranslator):
builder: "TexinfoBuilder" = None
ignore_missing_images = False
default_elements = {
'author': '',
'body': '',
'copying': '',
'date': '',
'direntry': '',
'exampleindent': 4,
'filename': '',
'paragraphindent': 0,
'preamble': '',
'project': '',
'release': '',
'title': '',
}
def __init__(self, document: nodes.document, builder: "TexinfoBuilder") -> None:
super().__init__(document, builder)
self.init_settings()
self.written_ids: Set[str] = set() # node names and anchors in output
# node names and anchors that should be in output
self.referenced_ids: Set[str] = set()
self.indices: List[Tuple[str, str]] = [] # (node name, content)
self.short_ids: Dict[str, str] = {} # anchors --> short ids
self.node_names: Dict[str, str] = {} # node name --> node's name to display
self.node_menus: Dict[str, List[str]] = {} # node name --> node's menu entries
self.rellinks: Dict[str, List[str]] = {} # node name --> (next, previous, up)
self.collect_indices()
self.collect_node_names()
self.collect_node_menus()
self.collect_rellinks()
self.body: List[str] = []
self.context: List[str] = []
self.descs: List[addnodes.desc] = []
self.previous_section: nodes.section = None
self.section_level = 0
self.seen_title = False
self.next_section_ids: Set[str] = set()
self.escape_newlines = 0
self.escape_hyphens = 0
self.curfilestack: List[str] = []
self.footnotestack: List[Dict[str, List[Union[collected_footnote, bool]]]] = [] # NOQA
self.in_footnote = 0
self.in_samp = 0
self.handled_abbrs: Set[str] = set()
self.colwidths: List[int] = None
def finish(self) -> None:
if self.previous_section is None:
self.add_menu('Top')
for index in self.indices:
name, content = index
pointers = tuple([name] + self.rellinks[name])
self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
self.body.append('@unnumbered %s\n\n%s\n' % (name, content))
while self.referenced_ids:
# handle xrefs with missing anchors
r = self.referenced_ids.pop()
if r not in self.written_ids:
self.body.append('@anchor{%s}@w{%s}\n' % (r, ' ' * 30))
self.ensure_eol()
self.fragment = ''.join(self.body)
self.elements['body'] = self.fragment
self.output = TEMPLATE % self.elements
# -- Helper routines
def init_settings(self) -> None:
elements = self.elements = self.default_elements.copy()
elements.update({
# if empty, the title is set to the first section title
'title': self.settings.title,
'author': self.settings.author,
# if empty, use basename of input file
'filename': self.settings.texinfo_filename,
'release': self.escape(self.config.release),
'project': self.escape(self.config.project),
'copyright': self.escape(self.config.copyright),
'date': self.escape(self.config.today or
format_date(self.config.today_fmt or _('%b %d, %Y'),
language=self.config.language))
})
# title
title: str = self.settings.title
if not title:
title_node = self.document.next_node(nodes.title)
title = title_node.astext() if title_node else '<untitled>'
elements['title'] = self.escape_id(title) or '<untitled>'
# filename
if not elements['filename']:
elements['filename'] = self.document.get('source') or 'untitled'
if elements['filename'][-4:] in ('.txt', '.rst'): # type: ignore
elements['filename'] = elements['filename'][:-4] # type: ignore
elements['filename'] += '.info' # type: ignore
# direntry
if self.settings.texinfo_dir_entry:
entry = self.format_menu_entry(
self.escape_menu(self.settings.texinfo_dir_entry),
'(%s)' % elements['filename'],
self.escape_arg(self.settings.texinfo_dir_description))
elements['direntry'] = ('@dircategory %s\n'
'@direntry\n'
'%s'
'@end direntry\n') % (
self.escape_id(self.settings.texinfo_dir_category), entry)
elements['copying'] = COPYING % elements
# allow the user to override them all
elements.update(self.settings.texinfo_elements)
def collect_node_names(self) -> None:
"""Generates a unique id for each section.
Assigns the attribute ``node_name`` to each section."""
def add_node_name(name: str) -> str:
node_id = self.escape_id(name)
nth, suffix = 1, ''
while node_id + suffix in self.written_ids or \
node_id + suffix in self.node_names:
nth += 1
suffix = '<%s>' % nth
node_id += suffix
self.written_ids.add(node_id)
self.node_names[node_id] = name
return node_id
# must have a "Top" node
self.document['node_name'] = 'Top'
add_node_name('Top')
add_node_name('top')
# each index is a node
self.indices = [(add_node_name(name), content)
for name, content in self.indices]
# each section is also a node
for section in self.document.findall(nodes.section):
title = cast(nodes.TextElement, section.next_node(nodes.Titular))
name = title.astext() if title else '<untitled>'
section['node_name'] = add_node_name(name)
def collect_node_menus(self) -> None:
"""Collect the menu entries for each "node" section."""
node_menus = self.node_menus
targets: List[Element] = [self.document]
targets.extend(self.document.findall(nodes.section))
for node in targets:
assert 'node_name' in node and node['node_name']
entries = [s['node_name'] for s in find_subsections(node)]
node_menus[node['node_name']] = entries
# try to find a suitable "Top" node
title = self.document.next_node(nodes.title)
top = title.parent if title else self.document
if not isinstance(top, (nodes.document, nodes.section)):
top = self.document
if top is not self.document:
entries = node_menus[top['node_name']]
entries += node_menus['Top'][1:]
node_menus['Top'] = entries
del node_menus[top['node_name']]
top['node_name'] = 'Top'
# handle the indices
for name, _content in self.indices:
node_menus[name] = []
node_menus['Top'].append(name)
def collect_rellinks(self) -> None:
"""Collect the relative links (next, previous, up) for each "node"."""
rellinks = self.rellinks
node_menus = self.node_menus
for id in node_menus:
rellinks[id] = ['', '', '']
# up's
for id, entries in node_menus.items():
for e in entries:
rellinks[e][2] = id
# next's and prev's
for id, entries in node_menus.items():
for i, id in enumerate(entries):
# First child's prev is empty
if i != 0:
rellinks[id][1] = entries[i - 1]
# Last child's next is empty
if i != len(entries) - 1:
rellinks[id][0] = entries[i + 1]
# top's next is its first child
try:
first = node_menus['Top'][0]
except IndexError:
pass
else:
rellinks['Top'][0] = first
rellinks[first][1] = 'Top'
# -- Escaping
# Which characters to escape depends on the context. In some cases,
# namely menus and node names, it's not possible to escape certain
# characters.
def escape(self, s: str) -> str:
"""Return a string with Texinfo command characters escaped."""
s = s.replace('@', '@@')
s = s.replace('{', '@{')
s = s.replace('}', '@}')
# prevent `` and '' quote conversion
s = s.replace('``', "`@w{`}")
s = s.replace("''", "'@w{'}")
return s
def escape_arg(self, s: str) -> str:
"""Return an escaped string suitable for use as an argument
to a Texinfo command."""
s = self.escape(s)
# commas are the argument delimiters
s = s.replace(',', '@comma{}')
# normalize white space
s = ' '.join(s.split()).strip()
return s
def escape_id(self, s: str) -> str:
"""Return an escaped string suitable for node names and anchors."""
bad_chars = ',:()'
for bc in bad_chars:
s = s.replace(bc, ' ')
if re.search('[^ .]', s):
# remove DOTs if name contains other characters
s = s.replace('.', ' ')
s = ' '.join(s.split()).strip()
return self.escape(s)
def escape_menu(self, s: str) -> str:
"""Return an escaped string suitable for menu entries."""
s = self.escape_arg(s)
s = s.replace(':', ';')
s = ' '.join(s.split()).strip()
return s
def ensure_eol(self) -> None:
"""Ensure the last line in body is terminated by new line."""
if self.body and self.body[-1][-1:] != '\n':
self.body.append('\n')
def format_menu_entry(self, name: str, node_name: str, desc: str) -> str:
if name == node_name:
s = '* %s:: ' % (name,)
else:
s = '* %s: %s. ' % (name, node_name)
offset = max((24, (len(name) + 4) % 78))
wdesc = '\n'.join(' ' * offset + l for l in
textwrap.wrap(desc, width=78 - offset))
return s + wdesc.strip() + '\n'
def add_menu_entries(self, entries: List[str], reg: Pattern = re.compile(r'\s+---?\s+')
) -> None:
for entry in entries:
name = self.node_names[entry]
# special formatting for entries that are divided by an em-dash
try:
parts = reg.split(name, 1)
except TypeError:
# could be a gettext proxy
parts = [name]
if len(parts) == 2:
name, desc = parts
else:
desc = ''
name = self.escape_menu(name)
desc = self.escape(desc)
self.body.append(self.format_menu_entry(name, entry, desc))
def add_menu(self, node_name: str) -> None:
entries = self.node_menus[node_name]
if not entries:
return
self.body.append('\n@menu\n')
self.add_menu_entries(entries)
if (node_name != 'Top' or
not self.node_menus[entries[0]] or
self.config.texinfo_no_detailmenu):
self.body.append('\n@end menu\n')
return
def _add_detailed_menu(name: str) -> None:
entries = self.node_menus[name]
if not entries:
return
self.body.append('\n%s\n\n' % (self.escape(self.node_names[name],)))
self.add_menu_entries(entries)
for subentry in entries:
_add_detailed_menu(subentry)
self.body.append('\n@detailmenu\n'
' --- The Detailed Node Listing ---\n')
for entry in entries:
_add_detailed_menu(entry)
self.body.append('\n@end detailmenu\n'
'@end menu\n')
def tex_image_length(self, width_str: str) -> str:
match = re.match(r'(\d*\.?\d*)\s*(\S*)', width_str)
if not match:
# fallback
return width_str
res = width_str
amount, unit = match.groups()[:2]
if not unit or unit == "px":
# pixels: let TeX alone
return ''
elif unit == "%":
# a4paper: textwidth=418.25368pt
res = "%d.0pt" % (float(amount) * 4.1825368)
return res
def collect_indices(self) -> None:
def generate(content: List[Tuple[str, List[IndexEntry]]], collapsed: bool) -> str:
ret = ['\n@menu\n']
for _letter, entries in content:
for entry in entries:
if not entry[3]:
continue
name = self.escape_menu(entry[0])
sid = self.get_short_id('%s:%s' % (entry[2], entry[3]))
desc = self.escape_arg(entry[6])
me = self.format_menu_entry(name, sid, desc)
ret.append(me)
ret.append('@end menu\n')
return ''.join(ret)
indices_config = self.config.texinfo_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
self.indices.append((indexcls.localname,
generate(content, collapsed)))
# only add the main Index if it's not empty
domain = cast(IndexDomain, self.builder.env.get_domain('index'))
for docname in self.builder.docnames:
if domain.entries[docname]:
self.indices.append((_('Index'), '\n@printindex ge\n'))
break
# this is copied from the latex writer
# TODO: move this to sphinx.util
def collect_footnotes(self, node: Element) -> Dict[str, List[Union[collected_footnote, bool]]]: # NOQA
def footnotes_under(n: Element) -> Iterator[nodes.footnote]:
if isinstance(n, nodes.footnote):
yield n
else:
for c in n.children:
if isinstance(c, addnodes.start_of_file):
continue
elif isinstance(c, nodes.Element):
yield from footnotes_under(c)
fnotes: Dict[str, List[Union[collected_footnote, bool]]] = {}
for fn in footnotes_under(node):
label = cast(nodes.label, fn[0])
num = label.astext().strip()
fnotes[num] = [collected_footnote('', *fn.children), False]
return fnotes
# -- xref handling
def get_short_id(self, id: str) -> str:
"""Return a shorter 'id' associated with ``id``."""
# Shorter ids improve paragraph filling in places
# that the id is hidden by Emacs.
try:
sid = self.short_ids[id]
except KeyError:
sid = hex(len(self.short_ids))[2:]
self.short_ids[id] = sid
return sid
def add_anchor(self, id: str, node: Node) -> None:
if id.startswith('index-'):
return
id = self.curfilestack[-1] + ':' + id
eid = self.escape_id(id)
sid = self.get_short_id(id)
for id in (eid, sid):
if id not in self.written_ids:
self.body.append('@anchor{%s}' % id)
self.written_ids.add(id)
def add_xref(self, id: str, name: str, node: Node) -> None:
name = self.escape_menu(name)
sid = self.get_short_id(id)
if self.config.texinfo_cross_references:
self.body.append('@ref{%s,,%s}' % (sid, name))
self.referenced_ids.add(sid)
self.referenced_ids.add(self.escape_id(id))
else:
self.body.append(name)
# -- Visiting
def visit_document(self, node: Element) -> None:
self.footnotestack.append(self.collect_footnotes(node))
self.curfilestack.append(node.get('docname', ''))
if 'docname' in node:
self.add_anchor(':doc', node)
def depart_document(self, node: Element) -> None:
self.footnotestack.pop()
self.curfilestack.pop()
def visit_Text(self, node: Text) -> None:
s = self.escape(node.astext())
if self.escape_newlines:
s = s.replace('\n', ' ')
if self.escape_hyphens:
# prevent "--" and "---" conversion
s = s.replace('-', '@w{-}')
self.body.append(s)
def depart_Text(self, node: Text) -> None:
pass
def visit_section(self, node: Element) -> None:
self.next_section_ids.update(node.get('ids', []))
if not self.seen_title:
return
if self.previous_section:
self.add_menu(self.previous_section['node_name'])
else:
self.add_menu('Top')
node_name = node['node_name']
pointers = tuple([node_name] + self.rellinks[node_name])
self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
for id in sorted(self.next_section_ids):
self.add_anchor(id, node)
self.next_section_ids.clear()
self.previous_section = cast(nodes.section, node)
self.section_level += 1
def depart_section(self, node: Element) -> None:
self.section_level -= 1
headings = (
'@unnumbered',
'@chapter',
'@section',
'@subsection',
'@subsubsection',
)
rubrics = (
'@heading',
'@subheading',
'@subsubheading',
)
def visit_title(self, node: Element) -> None:
if not self.seen_title:
self.seen_title = True
raise nodes.SkipNode
parent = node.parent
if isinstance(parent, nodes.table):
return
if isinstance(parent, (nodes.Admonition, nodes.sidebar, nodes.topic)):
raise nodes.SkipNode
elif not isinstance(parent, nodes.section):
logger.warning(__('encountered title node not in section, topic, table, '
'admonition or sidebar'),
location=node)
self.visit_rubric(node)
else:
try:
heading = self.headings[self.section_level]
except IndexError:
heading = self.headings[-1]
self.body.append('\n%s ' % heading)
def depart_title(self, node: Element) -> None:
self.body.append('\n\n')
def visit_rubric(self, node: Element) -> None:
if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
try:
rubric = self.rubrics[self.section_level]
except IndexError:
rubric = self.rubrics[-1]
self.body.append('\n%s ' % rubric)
self.escape_newlines += 1
def depart_rubric(self, node: Element) -> None:
self.escape_newlines -= 1
self.body.append('\n\n')
def visit_subtitle(self, node: Element) -> None:
self.body.append('\n\n@noindent\n')
def depart_subtitle(self, node: Element) -> None:
self.body.append('\n\n')
# -- References
def visit_target(self, node: Element) -> None:
# postpone the labels until after the sectioning command
parindex = node.parent.index(node)
try:
try:
next = node.parent[parindex + 1]
except IndexError:
# last node in parent, look at next after parent
# (for section of equal level)
next = node.parent.parent[node.parent.parent.index(node.parent)]
if isinstance(next, nodes.section):
if node.get('refid'):
self.next_section_ids.add(node['refid'])
self.next_section_ids.update(node['ids'])
return
except (IndexError, AttributeError):
pass
if 'refuri' in node:
return
if node.get('refid'):
self.add_anchor(node['refid'], node)
for id in node['ids']:
self.add_anchor(id, node)
def depart_target(self, node: Element) -> None:
pass
def visit_reference(self, node: Element) -> None:
# an xref's target is displayed in Info so we ignore a few
# cases for the sake of appearance
if isinstance(node.parent, (nodes.title, addnodes.desc_type)):
return
if isinstance(node[0], nodes.image):
return
name = node.get('name', node.astext()).strip()
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if not uri:
return
if uri.startswith('mailto:'):
uri = self.escape_arg(uri[7:])
name = self.escape_arg(name)
if not name or name == uri:
self.body.append('@email{%s}' % uri)
else:
self.body.append('@email{%s,%s}' % (uri, name))
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.add_xref(id, name, node)
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.add_xref(id, name, node)
elif uri.startswith('info:'):
# references to an external Info file
uri = uri[5:].replace('_', ' ')
uri = self.escape_arg(uri)
id = 'Top'
if '#' in uri:
uri, id = uri.split('#', 1)
id = self.escape_id(id)
name = self.escape_menu(name)
if name == id:
self.body.append('@ref{%s,,,%s}' % (id, uri))
else:
self.body.append('@ref{%s,,%s,%s}' % (id, name, uri))
else:
uri = self.escape_arg(uri)
name = self.escape_arg(name)
show_urls = self.config.texinfo_show_urls
if self.in_footnote:
show_urls = 'inline'
if not name or uri == name:
self.body.append('@indicateurl{%s}' % uri)
elif show_urls == 'inline':
self.body.append('@uref{%s,%s}' % (uri, name))
elif show_urls == 'no':
self.body.append('@uref{%s,,%s}' % (uri, name))
else:
self.body.append('%s@footnote{%s}' % (name, uri))
raise nodes.SkipNode
def depart_reference(self, node: Element) -> None:
pass
def visit_number_reference(self, node: Element) -> None:
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_title_reference(self, node: Element) -> None:
text = node.astext()
self.body.append('@cite{%s}' % self.escape_arg(text))
raise nodes.SkipNode
# -- Blocks
def visit_paragraph(self, node: Element) -> None:
self.body.append('\n')
def depart_paragraph(self, node: Element) -> None:
self.body.append('\n')
def visit_block_quote(self, node: Element) -> None:
self.body.append('\n@quotation\n')
def depart_block_quote(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end quotation\n')
def visit_literal_block(self, node: Element) -> None:
self.body.append('\n@example\n')
def depart_literal_block(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end example\n')
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line_block(self, node: Element) -> None:
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
self.body.append('@display\n')
def depart_line_block(self, node: Element) -> None:
self.body.append('@end display\n')
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
def visit_line(self, node: Element) -> None:
self.escape_newlines += 1
def depart_line(self, node: Element) -> None:
self.body.append('@w{ }\n')
self.escape_newlines -= 1
# -- Inline
def visit_strong(self, node: Element) -> None:
self.body.append('@strong{')
def depart_strong(self, node: Element) -> None:
self.body.append('}')
def visit_emphasis(self, node: Element) -> None:
element = 'emph' if not self.in_samp else 'var'
self.body.append('@%s{' % element)
def depart_emphasis(self, node: Element) -> None:
self.body.append('}')
def is_samp(self, node: Element) -> bool:
return 'samp' in node['classes']
def visit_literal(self, node: Element) -> None:
if self.is_samp(node):
self.in_samp += 1
self.body.append('@code{')
def depart_literal(self, node: Element) -> None:
if self.is_samp(node):
self.in_samp -= 1
self.body.append('}')
def visit_superscript(self, node: Element) -> None:
self.body.append('@w{^')
def depart_superscript(self, node: Element) -> None:
self.body.append('}')
def visit_subscript(self, node: Element) -> None:
self.body.append('@w{[')
def depart_subscript(self, node: Element) -> None:
self.body.append(']}')
# -- Footnotes
def visit_footnote(self, node: Element) -> None:
raise nodes.SkipNode
def visit_collected_footnote(self, node: Element) -> None:
self.in_footnote += 1
self.body.append('@footnote{')
def depart_collected_footnote(self, node: Element) -> None:
self.body.append('}')
self.in_footnote -= 1
def visit_footnote_reference(self, node: Element) -> None:
num = node.astext().strip()
try:
footnode, used = self.footnotestack[-1][num]
except (KeyError, IndexError) as exc:
raise nodes.SkipNode from exc
# footnotes are repeated for each reference
footnode.walkabout(self) # type: ignore
raise nodes.SkipChildren
def visit_citation(self, node: Element) -> None:
self.body.append('\n')
for id in node.get('ids'):
self.add_anchor(id, node)
self.escape_newlines += 1
def depart_citation(self, node: Element) -> None:
self.escape_newlines -= 1
def visit_citation_reference(self, node: Element) -> None:
self.body.append('@w{[')
def depart_citation_reference(self, node: Element) -> None:
self.body.append(']}')
# -- Lists
def visit_bullet_list(self, node: Element) -> None:
bullet = node.get('bullet', '*')
self.body.append('\n\n@itemize %s\n' % bullet)
def depart_bullet_list(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end itemize\n')
def visit_enumerated_list(self, node: Element) -> None:
# doesn't support Roman numerals
enum = node.get('enumtype', 'arabic')
starters = {'arabic': '',
'loweralpha': 'a',
'upperalpha': 'A'}
start = node.get('start', starters.get(enum, ''))
self.body.append('\n\n@enumerate %s\n' % start)
def depart_enumerated_list(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end enumerate\n')
def visit_list_item(self, node: Element) -> None:
self.body.append('\n@item ')
def depart_list_item(self, node: Element) -> None:
pass
# -- Option List
def visit_option_list(self, node: Element) -> None:
self.body.append('\n\n@table @option\n')
def depart_option_list(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end table\n')
def visit_option_list_item(self, node: Element) -> None:
pass
def depart_option_list_item(self, node: Element) -> None:
pass
def visit_option_group(self, node: Element) -> None:
self.at_item_x = '@item'
def depart_option_group(self, node: Element) -> None:
pass
def visit_option(self, node: Element) -> None:
self.escape_hyphens += 1
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_option(self, node: Element) -> None:
self.escape_hyphens -= 1
def visit_option_string(self, node: Element) -> None:
pass
def depart_option_string(self, node: Element) -> None:
pass
def visit_option_argument(self, node: Element) -> None:
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node: Element) -> None:
pass
def visit_description(self, node: Element) -> None:
self.body.append('\n')
def depart_description(self, node: Element) -> None:
pass
# -- Definitions
def visit_definition_list(self, node: Element) -> None:
self.body.append('\n\n@table @asis\n')
def depart_definition_list(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end table\n')
def visit_definition_list_item(self, node: Element) -> None:
self.at_item_x = '@item'
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_term(self, node: Element) -> None:
for id in node.get('ids'):
self.add_anchor(id, node)
# anchors and indexes need to go in front
for n in node[::]:
if isinstance(n, (addnodes.index, nodes.target)):
n.walkabout(self)
node.remove(n)
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_term(self, node: Element) -> None:
pass
def visit_classifier(self, node: Element) -> None:
self.body.append(' : ')
def depart_classifier(self, node: Element) -> None:
pass
def visit_definition(self, node: Element) -> None:
self.body.append('\n')
def depart_definition(self, node: Element) -> None:
pass
# -- Tables
def visit_table(self, node: Element) -> None:
self.entry_sep = '@item'
def depart_table(self, node: Element) -> None:
self.body.append('\n@end multitable\n\n')
def visit_tabular_col_spec(self, node: Element) -> None:
pass
def depart_tabular_col_spec(self, node: Element) -> None:
pass
def visit_colspec(self, node: Element) -> None:
self.colwidths.append(node['colwidth'])
if len(self.colwidths) != self.n_cols:
return
self.body.append('\n\n@multitable ')
for n in self.colwidths:
self.body.append('{%s} ' % ('x' * (n + 2)))
def depart_colspec(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
self.colwidths = []
self.n_cols = node['cols']
def depart_tgroup(self, node: Element) -> None:
pass
def visit_thead(self, node: Element) -> None:
self.entry_sep = '@headitem'
def depart_thead(self, node: Element) -> None:
pass
def visit_tbody(self, node: Element) -> None:
pass
def depart_tbody(self, node: Element) -> None:
pass
def visit_row(self, node: Element) -> None:
pass
def depart_row(self, node: Element) -> None:
self.entry_sep = '@item'
def visit_entry(self, node: Element) -> None:
self.body.append('\n%s\n' % self.entry_sep)
self.entry_sep = '@tab'
def depart_entry(self, node: Element) -> None:
for _i in range(node.get('morecols', 0)):
self.body.append('\n@tab\n')
# -- Field Lists
def visit_field_list(self, node: Element) -> None:
pass
def depart_field_list(self, node: Element) -> None:
pass
def visit_field(self, node: Element) -> None:
self.body.append('\n')
def depart_field(self, node: Element) -> None:
self.body.append('\n')
def visit_field_name(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@*')
def depart_field_name(self, node: Element) -> None:
self.body.append(': ')
def visit_field_body(self, node: Element) -> None:
pass
def depart_field_body(self, node: Element) -> None:
pass
# -- Admonitions
def visit_admonition(self, node: Element, name: str = '') -> None:
if not name:
title = cast(nodes.title, node[0])
name = self.escape(title.astext())
self.body.append('\n@cartouche\n@quotation %s ' % name)
def _visit_named_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.body.append('\n@cartouche\n@quotation %s ' % label)
def depart_admonition(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end quotation\n'
'@end cartouche\n')
visit_attention = _visit_named_admonition
depart_attention = depart_admonition
visit_caution = _visit_named_admonition
depart_caution = depart_admonition
visit_danger = _visit_named_admonition
depart_danger = depart_admonition
visit_error = _visit_named_admonition
depart_error = depart_admonition
visit_hint = _visit_named_admonition
depart_hint = depart_admonition
visit_important = _visit_named_admonition
depart_important = depart_admonition
visit_note = _visit_named_admonition
depart_note = depart_admonition
visit_tip = _visit_named_admonition
depart_tip = depart_admonition
visit_warning = _visit_named_admonition
depart_warning = depart_admonition
# -- Misc
def visit_docinfo(self, node: Element) -> None:
raise nodes.SkipNode
def visit_generated(self, node: Element) -> None:
raise nodes.SkipNode
def visit_header(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footer(self, node: Element) -> None:
raise nodes.SkipNode
def visit_container(self, node: Element) -> None:
if node.get('literal_block'):
self.body.append('\n\n@float LiteralBlock\n')
def depart_container(self, node: Element) -> None:
if node.get('literal_block'):
self.body.append('\n@end float\n\n')
def visit_decoration(self, node: Element) -> None:
pass
def depart_decoration(self, node: Element) -> None:
pass
def visit_topic(self, node: Element) -> None:
# ignore TOC's since we have to have a "menu" anyway
if 'contents' in node.get('classes', []):
raise nodes.SkipNode
title = cast(nodes.title, node[0])
self.visit_rubric(title)
self.body.append('%s\n' % self.escape(title.astext()))
self.depart_rubric(title)
def depart_topic(self, node: Element) -> None:
pass
def visit_transition(self, node: Element) -> None:
self.body.append('\n\n%s\n\n' % ('_' * 66))
def depart_transition(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
self.body.append('\n\n@center --- ')
def depart_attribution(self, node: Element) -> None:
self.body.append('\n\n')
def visit_raw(self, node: Element) -> None:
format = node.get('format', '').split()
if 'texinfo' in format or 'texi' in format:
self.body.append(node.astext())
raise nodes.SkipNode
def visit_figure(self, node: Element) -> None:
self.body.append('\n\n@float Figure\n')
def depart_figure(self, node: Element) -> None:
self.body.append('\n@end float\n\n')
def visit_caption(self, node: Element) -> None:
if (isinstance(node.parent, nodes.figure) or
(isinstance(node.parent, nodes.container) and
node.parent.get('literal_block'))):
self.body.append('\n@caption{')
else:
logger.warning(__('caption not inside a figure.'),
location=node)
def depart_caption(self, node: Element) -> None:
if (isinstance(node.parent, nodes.figure) or
(isinstance(node.parent, nodes.container) and
node.parent.get('literal_block'))):
self.body.append('}\n')
def visit_image(self, node: Element) -> None:
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
name, ext = path.splitext(uri)
# width and height ignored in non-tex output
width = self.tex_image_length(node.get('width', ''))
height = self.tex_image_length(node.get('height', ''))
alt = self.escape_arg(node.get('alt', ''))
filename = "%s-figures/%s" % (self.elements['filename'][:-5], name) # type: ignore
self.body.append('\n@image{%s,%s,%s,%s,%s}\n' %
(filename, width, height, alt, ext[1:]))
def depart_image(self, node: Element) -> None:
pass
def visit_compound(self, node: Element) -> None:
pass
def depart_compound(self, node: Element) -> None:
pass
def visit_sidebar(self, node: Element) -> None:
self.visit_topic(node)
def depart_sidebar(self, node: Element) -> None:
self.depart_topic(node)
def visit_label(self, node: Element) -> None:
# label numbering is automatically generated by Texinfo
if self.in_footnote:
raise nodes.SkipNode
else:
self.body.append('@w{(')
def depart_label(self, node: Element) -> None:
self.body.append(')} ')
def visit_legend(self, node: Element) -> None:
pass
def depart_legend(self, node: Element) -> None:
pass
def visit_substitution_reference(self, node: Element) -> None:
pass
def depart_substitution_reference(self, node: Element) -> None:
pass
def visit_substitution_definition(self, node: Element) -> None:
raise nodes.SkipNode
def visit_system_message(self, node: Element) -> None:
self.body.append('\n@verbatim\n'
'<SYSTEM MESSAGE: %s>\n'
'@end verbatim\n' % node.astext())
raise nodes.SkipNode
def visit_comment(self, node: Element) -> None:
self.body.append('\n')
for line in node.astext().splitlines():
self.body.append('@c %s\n' % line)
raise nodes.SkipNode
def visit_problematic(self, node: Element) -> None:
self.body.append('>>')
def depart_problematic(self, node: Element) -> None:
self.body.append('<<')
def unimplemented_visit(self, node: Element) -> None:
logger.warning(__("unimplemented node type: %r"), node,
location=node)
def unknown_departure(self, node: Node) -> None:
pass
# -- Sphinx specific
def visit_productionlist(self, node: Element) -> None:
self.visit_literal_block(None)
names = []
productionlist = cast(Iterable[addnodes.production], node)
for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
for production in productionlist:
if production['tokenname']:
for id in production.get('ids'):
self.add_anchor(id, production)
s = production['tokenname'].ljust(maxlen) + ' ::='
else:
s = '%s ' % (' ' * maxlen)
self.body.append(self.escape(s))
self.body.append(self.escape(production.astext() + '\n'))
self.depart_literal_block(None)
raise nodes.SkipNode
def visit_production(self, node: Element) -> None:
pass
def depart_production(self, node: Element) -> None:
pass
def visit_literal_emphasis(self, node: Element) -> None:
self.body.append('@code{')
def depart_literal_emphasis(self, node: Element) -> None:
self.body.append('}')
def visit_literal_strong(self, node: Element) -> None:
self.body.append('@code{')
def depart_literal_strong(self, node: Element) -> None:
self.body.append('}')
def visit_index(self, node: Element) -> None:
# terminate the line but don't prevent paragraph breaks
if isinstance(node.parent, nodes.paragraph):
self.ensure_eol()
else:
self.body.append('\n')
for entry in node['entries']:
typ, text, tid, text2, key_ = entry
text = self.escape_menu(text)
self.body.append('@geindex %s\n' % text)
def visit_versionmodified(self, node: Element) -> None:
self.body.append('\n')
def depart_versionmodified(self, node: Element) -> None:
self.body.append('\n')
def visit_start_of_file(self, node: Element) -> None:
# add a document target
self.next_section_ids.add(':doc')
self.curfilestack.append(node['docname'])
self.footnotestack.append(self.collect_footnotes(node))
def depart_start_of_file(self, node: Element) -> None:
self.curfilestack.pop()
self.footnotestack.pop()
def visit_centered(self, node: Element) -> None:
txt = self.escape_arg(node.astext())
self.body.append('\n\n@center %s\n\n' % txt)
raise nodes.SkipNode
def visit_seealso(self, node: Element) -> None:
self.body.append('\n\n@subsubheading %s\n\n' %
admonitionlabels['seealso'])
def depart_seealso(self, node: Element) -> None:
self.body.append('\n')
def visit_meta(self, node: Element) -> None:
raise nodes.SkipNode
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_acks(self, node: Element) -> None:
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append('\n\n')
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
#############################################################
# Domain-specific object descriptions
#############################################################
# Top-level nodes for descriptions
##################################
def visit_desc(self, node: addnodes.desc) -> None:
self.descs.append(node)
self.at_deffnx = '@deffn'
def depart_desc(self, node: addnodes.desc) -> None:
self.descs.pop()
self.ensure_eol()
self.body.append('@end deffn\n')
def visit_desc_signature(self, node: Element) -> None:
self.escape_hyphens += 1
objtype = node.parent['objtype']
if objtype != 'describe':
for id in node.get('ids'):
self.add_anchor(id, node)
# use the full name of the objtype for the category
try:
domain = self.builder.env.get_domain(node.parent['domain'])
name = domain.get_type_name(domain.object_types[objtype],
self.config.primary_domain == domain.name)
except (KeyError, ExtensionError):
name = objtype
# by convention, the deffn category should be capitalized like a title
category = self.escape_arg(smart_capwords(name))
self.body.append('\n%s {%s} ' % (self.at_deffnx, category))
self.at_deffnx = '@deffnx'
self.desc_type_name = name
def depart_desc_signature(self, node: Element) -> None:
self.body.append("\n")
self.escape_hyphens -= 1
self.desc_type_name = None
def visit_desc_signature_line(self, node: Element) -> None:
pass
def depart_desc_signature_line(self, node: Element) -> None:
pass
def visit_desc_content(self, node: Element) -> None:
pass
def depart_desc_content(self, node: Element) -> None:
pass
def visit_desc_inline(self, node: Element) -> None:
pass
def depart_desc_inline(self, node: Element) -> None:
pass
# Nodes for high-level structure in signatures
##############################################
def visit_desc_name(self, node: Element) -> None:
pass
def depart_desc_name(self, node: Element) -> None:
pass
def visit_desc_addname(self, node: Element) -> None:
pass
def depart_desc_addname(self, node: Element) -> None:
pass
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(' -> ')
def depart_desc_returns(self, node: Element) -> None:
pass
def visit_desc_parameterlist(self, node: Element) -> None:
self.body.append(' (')
self.first_param = 1
def depart_desc_parameterlist(self, node: Element) -> None:
self.body.append(')')
def visit_desc_parameter(self, node: Element) -> None:
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
text = self.escape(node.astext())
# replace no-break spaces with normal ones
text = text.replace(' ', '@w{ }')
self.body.append(text)
raise nodes.SkipNode
def visit_desc_optional(self, node: Element) -> None:
self.body.append('[')
def depart_desc_optional(self, node: Element) -> None:
self.body.append(']')
def visit_desc_annotation(self, node: Element) -> None:
# Try to avoid duplicating info already displayed by the deffn category.
# e.g.
# @deffn {Class} Foo
# -- instead of --
# @deffn {Class} class Foo
txt = node.astext().strip()
if ((self.descs and txt == self.descs[-1]['objtype']) or
(self.desc_type_name and txt in self.desc_type_name.split())):
raise nodes.SkipNode
def depart_desc_annotation(self, node: Element) -> None:
pass
##############################################
def visit_inline(self, node: Element) -> None:
pass
def depart_inline(self, node: Element) -> None:
pass
def visit_abbreviation(self, node: Element) -> None:
abbr = node.astext()
self.body.append('@abbr{')
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append(',%s}' % self.escape_arg(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_manpage(self, node: Element) -> None:
return self.visit_literal_emphasis(node)
def depart_manpage(self, node: Element) -> None:
return self.depart_literal_emphasis(node)
def visit_download_reference(self, node: Element) -> None:
pass
def depart_download_reference(self, node: Element) -> None:
pass
def visit_hlist(self, node: Element) -> None:
self.visit_bullet_list(node)
def depart_hlist(self, node: Element) -> None:
self.depart_bullet_list(node)
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
pass
def visit_pending_xref(self, node: Element) -> None:
pass
def depart_pending_xref(self, node: Element) -> None:
pass
def visit_math(self, node: Element) -> None:
self.body.append('@math{' + self.escape_arg(node.astext()) + '}')
raise nodes.SkipNode
def visit_math_block(self, node: Element) -> None:
if node.get('label'):
self.add_anchor(node['label'], node)
self.body.append('\n\n@example\n%s\n@end example\n\n' %
self.escape_arg(node.astext()))
raise nodes.SkipNode
@property
def desc(self) -> Optional[addnodes.desc]:
warnings.warn('TexinfoWriter.desc is deprecated.', RemovedInSphinx50Warning)
if len(self.descs):
return self.descs[-1]
else:
return None
|
py | b40f03ab67ee991fbbe570912e486b90990a28fe | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors and others (see LICENSE.txt)
#
# Licensed under the terms of the MIT and other licenses where noted
# (see LICENSE.txt in this directory and NOTICE.txt in the root for details)
# -----------------------------------------------------------------------------
"""
spyder.plugins.help.utils
=================
Configuration files for the Help plugin rich text mode.
See their headers, LICENSE.txt in this directory or NOTICE.txt for licenses.
"""
import sys
from spyder.config.base import get_module_source_path
sys.path.insert(0, get_module_source_path(__name__))
|
py | b40f045c16a72fe9126fc9fdc36addde57f51e4a | # The following comment should be removed at some point in the future.
# It's included for now because without it InstallCommand.run() has a
# couple errors where we have to know req.name is str rather than
# Optional[str] for the InstallRequirement req.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import errno
import logging
import operator
import os
import shutil
import site
from optparse import SUPPRESS_HELP
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cache import WheelCache
from pip._internal.cli import cmdoptions
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.req_command import RequirementCommand, with_cleanup
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.exceptions import CommandError, InstallationError
from pip._internal.locations import distutils_scheme
from pip._internal.operations.check import check_install_conflicts
from pip._internal.req import install_given_reqs
from pip._internal.req.req_tracker import get_requirement_tracker
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.distutils_args import parse_distutils_args
from pip._internal.utils.filesystem import test_writable_dir
from pip._internal.utils.misc import (
ensure_dir,
get_installed_version,
protect_pip_from_modification_on_windows,
write_output,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import virtualenv_no_global
from pip._internal.wheel_builder import build, should_build_for_install_command
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Any, Iterable, List, Optional
from pip._internal.models.format_control import FormatControl
from pip._internal.req.req_install import InstallRequirement
from pip._internal.wheel_builder import BinaryAllowedPredicate
from pip._internal.locations import running_under_virtualenv
logger = logging.getLogger(__name__)
def get_check_binary_allowed(format_control):
# type: (FormatControl) -> BinaryAllowedPredicate
def check_binary_allowed(req):
# type: (InstallRequirement) -> bool
if req.use_pep517:
return True
canonical_name = canonicalize_name(req.name)
allowed_formats = format_control.get_allowed_formats(canonical_name)
return "binary" in allowed_formats
return check_binary_allowed
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmdoptions.add_target_python_options(cmd_opts)
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.) On Debian systems, this is the "
"default when running outside of a virtual environment "
"and not as root.")
cmd_opts.add_option(
'--no-user',
dest='use_system_location',
action='store_true',
help=SUPPRESS_HELP)
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(
'--system',
dest='use_system_location',
action='store_true',
help="Install using the system scheme (overrides --user on "
"Debian systems)")
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='only-if-needed',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled '
'[default: %default]. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='Reinstall all packages even if they are already '
'up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages, overwriting them. '
'This can break your system if the existing package '
'is of a different version or was installed '
'with a different package manager!'
)
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.use_pep517())
cmd_opts.add_option(cmdoptions.no_use_pep517())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-warn-script-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.prefer_binary())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
@with_cleanup
def run(self, options, args):
# type: (Values, List[Any]) -> int
if options.use_user_site and options.target_dir is not None:
raise CommandError("Can not combine '--user' and '--target'")
cmdoptions.check_install_build_global(options)
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
cmdoptions.check_dist_restriction(options, check_target=True)
if options.python_version:
python_versions = [options.python_version]
else:
python_versions = None
# compute install location defaults
if (not options.use_user_site and not options.prefix_path and not
options.target_dir and not options.use_system_location):
if not running_under_virtualenv() and os.geteuid() != 0:
options.use_user_site = True
if options.use_system_location:
options.use_user_site = False
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
options.use_user_site = decide_user_install(
options.use_user_site,
prefix_path=options.prefix_path,
target_dir=options.target_dir,
root_path=options.root_path,
isolated_mode=options.isolated_mode,
)
target_temp_dir = None # type: Optional[TempDirectory]
target_temp_dir_path = None # type: Optional[str]
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
# Create a target directory for using with the target option
target_temp_dir = TempDirectory(kind="target")
target_temp_dir_path = target_temp_dir.path
global_options = options.global_options or []
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
req_tracker = self.enter_context(get_requirement_tracker())
directory = TempDirectory(
options.build_dir,
delete=build_delete,
kind="install",
globally_managed=True,
)
try:
reqs = self.get_requirements(
args, options, finder, session,
check_supported_wheels=not options.target_dir,
)
warn_deprecated_install_options(
reqs, options.install_options
)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
session=session,
finder=finder,
use_user_site=options.use_user_site,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
ignore_installed=options.ignore_installed,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
upgrade_strategy=upgrade_strategy,
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(
reqs, check_supported_wheels=not options.target_dir
)
try:
pip_req = requirement_set.get_requirement("pip")
except KeyError:
modifying_pip = None
else:
# If we're not replacing an already installed pip,
# we're not modifying it.
modifying_pip = pip_req.satisfied_by is None
protect_pip_from_modification_on_windows(
modifying_pip=modifying_pip
)
check_binary_allowed = get_check_binary_allowed(
finder.format_control
)
reqs_to_build = [
r for r in requirement_set.requirements.values()
if should_build_for_install_command(
r, check_binary_allowed
)
]
_, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
build_options=[],
global_options=[],
)
# If we're using PEP 517, we cannot do a direct install
# so we fail here.
# We don't care about failures building legacy
# requirements, as we'll fall through to a direct
# install for those.
pep517_build_failures = [
r for r in build_failures if r.use_pep517
]
if pep517_build_failures:
raise InstallationError(
"Could not build wheels for {} which use"
" PEP 517 and cannot be installed directly".format(
", ".join(r.name for r in pep517_build_failures)))
to_install = resolver.get_installation_order(
requirement_set
)
# Consistency Checking of the package set we're installing.
should_warn_about_conflicts = (
not options.ignore_dependencies and
options.warn_about_conflicts
)
if should_warn_about_conflicts:
self._warn_about_conflicts(to_install)
# Don't warn about script install locations if
# --target has been specified
warn_script_location = options.warn_script_location
if options.target_dir:
warn_script_location = False
installed = install_given_reqs(
to_install,
install_options,
global_options,
root=options.root_path,
home=target_temp_dir_path,
prefix=options.prefix_path,
pycompile=options.compile,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
)
lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir_path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
working_set = pkg_resources.WorkingSet(lib_locations)
installed.sort(key=operator.attrgetter('name'))
items = []
for result in installed:
item = result.name
try:
installed_version = get_installed_version(
result.name, working_set=working_set
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed_desc = ' '.join(items)
if installed_desc:
write_output(
'Successfully installed %s', installed_desc,
)
except EnvironmentError as error:
show_traceback = (self.verbosity >= 1)
message = create_env_error_message(
error, show_traceback, options.use_user_site,
)
logger.error(message, exc_info=show_traceback)
return ERROR
if options.target_dir:
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
return SUCCESS
def _handle_target_dir(self, target_dir, target_temp_dir, upgrade):
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
with target_temp_dir:
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = distutils_scheme('', home=target_temp_dir.path)
purelib_dir = scheme['purelib']
platlib_dir = scheme['platlib']
data_dir = scheme['data']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
def _warn_about_conflicts(self, to_install):
try:
package_set, _dep_info = check_install_conflicts(to_install)
except Exception:
logger.error("Error checking for conflicts.", exc_info=True)
return
missing, conflicting = _dep_info
# NOTE: There is some duplication here from pip check
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
logger.critical(
"%s %s requires %s, which is not installed.",
project_name, version, dependency[1],
)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
logger.critical(
"%s %s has requirement %s, but you'll have %s %s which is "
"incompatible.",
project_name, version, req, dep_name, dep_version,
)
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
def site_packages_writable(**kwargs):
return all(
test_writable_dir(d) for d in set(get_lib_location_guesses(**kwargs))
)
def decide_user_install(
use_user_site, # type: Optional[bool]
prefix_path=None, # type: Optional[str]
target_dir=None, # type: Optional[str]
root_path=None, # type: Optional[str]
isolated_mode=False, # type: bool
):
# type: (...) -> bool
"""Determine whether to do a user install based on the input options.
If use_user_site is False, no additional checks are done.
If use_user_site is True, it is checked for compatibility with other
options.
If use_user_site is None, the default behaviour depends on the environment,
which is provided by the other arguments.
"""
# In some cases (config from tox), use_user_site can be set to an integer
# rather than a bool, which 'use_user_site is False' wouldn't catch.
if (use_user_site is not None) and (not use_user_site):
logger.debug("Non-user install by explicit request")
return False
if use_user_site:
if prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
logger.debug("User install by explicit request")
return True
# If we are here, user installs have not been explicitly requested/avoided
assert use_user_site is None
# user install incompatible with --prefix/--target
if prefix_path or target_dir:
logger.debug("Non-user install due to --prefix or --target option")
return False
# If user installs are not enabled, choose a non-user install
if not site.ENABLE_USER_SITE:
logger.debug("Non-user install because user site-packages disabled")
return False
# If we have permission for a non-user install, do that,
# otherwise do a user install.
if site_packages_writable(root=root_path, isolated=isolated_mode):
logger.debug("Non-user install because site-packages writeable")
return False
logger.info("Defaulting to user installation because normal site-packages "
"is not writeable")
return True
def warn_deprecated_install_options(requirements, options):
# type: (List[InstallRequirement], Optional[List[str]]) -> None
"""If any location-changing --install-option arguments were passed for
requirements or on the command-line, then show a deprecation warning.
"""
def format_options(option_names):
# type: (Iterable[str]) -> List[str]
return ["--{}".format(name.replace("_", "-")) for name in option_names]
offenders = []
for requirement in requirements:
install_options = requirement.install_options
location_options = parse_distutils_args(install_options)
if location_options:
offenders.append(
"{!r} from {}".format(
format_options(location_options.keys()), requirement
)
)
if options:
location_options = parse_distutils_args(options)
if location_options:
offenders.append(
"{!r} from command line".format(
format_options(location_options.keys())
)
)
if not offenders:
return
deprecated(
reason=(
"Location-changing options found in --install-option: {}. "
"This configuration may cause unexpected behavior and is "
"unsupported.".format(
"; ".join(offenders)
)
),
replacement=(
"using pip-level options like --user, --prefix, --root, and "
"--target"
),
gone_in="20.2",
issue=7309,
)
def create_env_error_message(error, show_traceback, using_user_site):
"""Format an error message for an EnvironmentError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an EnvironmentError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not using_user_site:
parts.extend([
user_option_part, " or ",
permissions_part.lower(),
])
else:
parts.append(permissions_part)
parts.append(".\n")
return "".join(parts).strip() + "\n"
|
py | b40f046eac6ff345849b624fc2f5b02416b54369 | import frappe
def get_context(context):
context.no_cache = True
if frappe.session.user == 'Guest':
frappe.local.flags.redirect_location = frappe.utils.get_url('/login')
raise frappe.Redirect
return context |
py | b40f07acb0dc5a2899115b115d391c8d559ea2f7 | # Generated by Django 3.1.7 on 2021-03-23 22:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0006_notification"),
]
operations = [
migrations.AddField(
model_name="eventtype",
name="color",
field=models.CharField(default="#343a40", max_length=7, verbose_name="color"),
),
]
|
py | b40f07f17a897170f96657c36e93682f968ebd36 | """
This file contains the creation of the walker for commonListener
"""
from antlr4 import CommonTokenStream, ParseTreeWalker
from qualitymeter.gen.javaLabeled.JavaLexer import JavaLexer
from qualitymeter.gen.javaLabeled.JavaParserLabeled import JavaParserLabeled
from qualitymeter.listener.listener import Listener
class WalkerCreator:
def __init__(self, streams):
self.classes = []
self.interfaces = []
self.classOrInterface = []
self.hierarchies = []
for stream in streams:
# Step 1: Create an instance of lexer
lexer = JavaLexer(stream)
# Step 2: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 3: Create an instance of parser
parser = JavaParserLabeled(token_stream)
# Step 4: Create parse tree
parse_tree = parser.compilationUnit()
# Step 5: Create an instance of listener
listener = Listener()
# Step 6: Create an instance of walker
walker = ParseTreeWalker()
# Step 7: Traverse parse tree
walker.walk(listener, parse_tree)
# Step 7: Save the tree's classes and interfaces
self.classes += listener.classes
self.interfaces += listener.interfaces
self.hierarchies += listener.hierarchies
self.classOrInterface = self.classes + self.interfaces
def find_parent(self, parent):
return [cls for cls in self.classes if cls.identifier.getText() == parent.identifier]
def find_implementation(self, implementation):
for clf in self.classOrInterface:
if implementation and clf:
if clf.identifier.getText() == implementation.identifier.getText():
return clf
|
py | b40f0980bb6ad5f23f48eedc23fb195d22f47c1d | import unittest
from flask import request
from app import app
from app.forms import SteuerlotseBaseForm
from app.forms.fields import SteuerlotseStringField
from app.forms.steps.step import FormStep
class MockForm(SteuerlotseBaseForm):
string_field = SteuerlotseStringField()
class TestStripWhitespaces(unittest.TestCase):
def test_if_string_field_then_strip_whitespaces_back(self):
step = FormStep(form=MockForm, title='step')
data = {'string_field': "Here is whitespace "}
expected_output = "Here is whitespace"
with app.test_request_context(method='POST', data=data):
form = step.create_form(request, prefilled_data={})
self.assertEqual(expected_output, form.data['string_field'])
def test_if_string_field_then_strip_whitespaces_front(self):
step = FormStep(form=MockForm, title='step')
data = {'string_field': " Here is whitespace"}
expected_output = "Here is whitespace"
with app.test_request_context(method='POST', data=data):
form = step.create_form(request, prefilled_data={})
self.assertEqual(expected_output, form.data['string_field'])
|
py | b40f09f9f92fbedc5ea257e08d81627385504a10 | # Generated by Django 3.2 on 2021-06-11 07:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0033_auto_20210611_0708'),
]
operations = [
migrations.AddField(
model_name='garden',
name='organizations',
field=models.ManyToManyField(blank=True, to='core.Organization'),
),
]
|
py | b40f0aeca588402ac758f3b4c1b641f38f85ac6b | from typing import Tuple
SLEEP_TIME: float = 1 #seconds
BROWSERS: Tuple[str, ...] = ("Mozilla Firefox", "Google Chrome", "Edge", "Opera", "Brave", "Chromium", "Internet Explorer")
BROWSER_PS_NAMES: Tuple[str, ...] = ("firefox", "chrome", "edge", "opera", "brave", "iexplore", "application") # 'application' for edge
SLEEP_TIME_KEYS: float = 0.05 #seconds
SLEEP_TIME_COPY: float = 0.1 #seconds
BROWSER_ENDS: Tuple[str, ...] = ('Mozilla Firefox', 'Google Chrome', 'Opera', 'Brave')
NEW_TABS: Tuple[str, ...] = ('New Tab - Google Chrome', 'Mozilla Firefox', 'Untitled - Brave', 'New Tab - Brave')
FIREFOX: Tuple[str, ...] = ('Mozilla Firefox',)
CHROME: Tuple[str, ...] = ('Google Chrome', 'Opera', 'Brave')
# BROWSER_ENDS: Tuple[bytes, ...] = (b'Mozilla Firefox', b'Google Chrome', b'Opera', b'Brave')
# NEW_TABS: Tuple[bytes, ...] = (b'New Tab - Google Chrome', b'Mozilla Firefox', b'Untitled - Brave', b'New Tab - Brave')
# FIREFOX: Tuple[bytes, ...] = (b'Mozilla Firefox',)
# CHROME: Tuple[bytes, ...] = (b'Google Chrome', b'Opera', b'Brave') |
py | b40f0b3e9f0de79765bb49753df96a7171c89cda | #
# PySNMP MIB module HUAWEI-RMON-EXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-RMON-EXT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:36:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint")
rmonExtend, hwInternetProtocol, hwLocal = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "rmonExtend", "hwInternetProtocol", "hwLocal")
OwnerString, = mibBuilder.importSymbols("IF-MIB", "OwnerString")
EntryStatus, = mibBuilder.importSymbols("RMON-MIB", "EntryStatus")
trapDestEntry, trapDestIndex = mibBuilder.importSymbols("RMON2-MIB", "trapDestEntry", "trapDestIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, NotificationType, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, Counter32, ModuleIdentity, Integer32, Unsigned32, MibIdentifier, Counter64, TimeTicks, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "NotificationType", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "Counter32", "ModuleIdentity", "Integer32", "Unsigned32", "MibIdentifier", "Counter64", "TimeTicks", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
performance = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4))
performance.setRevisions(('2003-03-15 00:00',))
if mibBuilder.loadTexts: performance.setLastUpdated('200303150000Z')
if mibBuilder.loadTexts: performance.setOrganization('Huawei Technologies co.,Ltd.')
prialarmTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1), )
if mibBuilder.loadTexts: prialarmTable.setStatus('current')
prialarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1), ).setIndexNames((0, "HUAWEI-RMON-EXT-MIB", "prialarmIndex"))
if mibBuilder.loadTexts: prialarmEntry.setStatus('current')
prialarmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: prialarmIndex.setStatus('current')
prialarmInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmInterval.setStatus('current')
prialarmVariable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmVariable.setStatus('current')
prialarmSympol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmSympol.setStatus('current')
prialarmSampleType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("absoluteValue", 1), ("deltaValue", 2), ("speedValue", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmSampleType.setStatus('current')
prialarmValue = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: prialarmValue.setStatus('current')
prialarmStartupAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("risingAlarm", 1), ("fallingAlarm", 2), ("risingOrFallingAlarm", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmStartupAlarm.setStatus('current')
prialarmRisingThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmRisingThreshold.setStatus('current')
prialarmFallingThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmFallingThreshold.setStatus('current')
prialarmRisingEventIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmRisingEventIndex.setStatus('current')
prialarmFallingEventIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmFallingEventIndex.setStatus('current')
prialarmStatCycle = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmStatCycle.setStatus('current')
prialarmStatType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("forever", 1), ("during", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmStatType.setStatus('current')
prialarmOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 14), OwnerString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmOwner.setStatus('current')
prialarmStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 4, 1, 1, 15), EntryStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: prialarmStatus.setStatus('current')
hwrmonEnableTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 5), )
if mibBuilder.loadTexts: hwrmonEnableTable.setStatus('current')
hwrmonEnableTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 5, 1), ).setIndexNames((0, "HUAWEI-RMON-EXT-MIB", "hwrmonEnableIfIndex"))
if mibBuilder.loadTexts: hwrmonEnableTableEntry.setStatus('current')
hwrmonEnableIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwrmonEnableIfIndex.setStatus('current')
hwrmonEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwrmonEnableStatus.setStatus('current')
hwTrapDestTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 6), )
if mibBuilder.loadTexts: hwTrapDestTable.setStatus('current')
hwTrapDestEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 6, 1), )
trapDestEntry.registerAugmentions(("HUAWEI-RMON-EXT-MIB", "hwTrapDestEntry"))
hwTrapDestEntry.setIndexNames(*trapDestEntry.getIndexNames())
if mibBuilder.loadTexts: hwTrapDestEntry.setStatus('current')
hwTrapDestVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("snmpv1", 1), ("snmpv2", 2), ("snmpv3andauthen", 3), ("snmpv3andnoauthen", 4), ("snmpv3andpriv", 5))).clone('snmpv1')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwTrapDestVersion.setStatus('current')
rmonExtendEventsV2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 0))
if mibBuilder.loadTexts: rmonExtendEventsV2.setStatus('current')
pririsingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 0, 1)).setObjects(("HUAWEI-RMON-EXT-MIB", "prialarmIndex"), ("HUAWEI-RMON-EXT-MIB", "prialarmSympol"), ("HUAWEI-RMON-EXT-MIB", "prialarmSampleType"), ("HUAWEI-RMON-EXT-MIB", "prialarmValue"), ("HUAWEI-RMON-EXT-MIB", "prialarmRisingThreshold"))
if mibBuilder.loadTexts: pririsingAlarm.setStatus('current')
prifallingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 2011, 1, 3, 4, 0, 2)).setObjects(("HUAWEI-RMON-EXT-MIB", "prialarmIndex"), ("HUAWEI-RMON-EXT-MIB", "prialarmSympol"), ("HUAWEI-RMON-EXT-MIB", "prialarmSampleType"), ("HUAWEI-RMON-EXT-MIB", "prialarmValue"), ("HUAWEI-RMON-EXT-MIB", "prialarmFallingThreshold"))
if mibBuilder.loadTexts: prifallingAlarm.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-RMON-EXT-MIB", hwTrapDestVersion=hwTrapDestVersion, rmonExtendEventsV2=rmonExtendEventsV2, prialarmIndex=prialarmIndex, prialarmStatType=prialarmStatType, hwrmonEnableStatus=hwrmonEnableStatus, hwrmonEnableTableEntry=hwrmonEnableTableEntry, prialarmTable=prialarmTable, prialarmValue=prialarmValue, hwTrapDestEntry=hwTrapDestEntry, PYSNMP_MODULE_ID=performance, prialarmStartupAlarm=prialarmStartupAlarm, prialarmInterval=prialarmInterval, prialarmStatus=prialarmStatus, performance=performance, prialarmRisingThreshold=prialarmRisingThreshold, prifallingAlarm=prifallingAlarm, prialarmEntry=prialarmEntry, prialarmSympol=prialarmSympol, hwTrapDestTable=hwTrapDestTable, hwrmonEnableIfIndex=hwrmonEnableIfIndex, prialarmFallingEventIndex=prialarmFallingEventIndex, prialarmFallingThreshold=prialarmFallingThreshold, prialarmStatCycle=prialarmStatCycle, prialarmRisingEventIndex=prialarmRisingEventIndex, hwrmonEnableTable=hwrmonEnableTable, prialarmOwner=prialarmOwner, prialarmVariable=prialarmVariable, prialarmSampleType=prialarmSampleType, pririsingAlarm=pririsingAlarm)
|
py | b40f0b8cd99765d16387a14d69735b0830844606 | from warnings import catch_warnings
import pytest
import numpy as np
import pandas as pd
from pandas import (Panel, Series, MultiIndex, DataFrame,
Timestamp, Index, date_range)
from pandas.util import testing as tm
from pandas.core.common import PerformanceWarning, UnsortedIndexError
from pandas.tests.indexing.common import _mklbl
class TestMultiIndexBasic(tm.TestCase):
def test_iloc_getitem_multiindex2(self):
# TODO(wesm): fix this
pytest.skip('this test was being suppressed, '
'needs to be fixed')
arr = np.random.randn(3, 3)
df = DataFrame(arr, columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
rs = df.iloc[2]
xp = Series(arr[2], index=df.columns)
tm.assert_series_equal(rs, xp)
rs = df.iloc[:, 2]
xp = Series(arr[:, 2], index=df.index)
tm.assert_series_equal(rs, xp)
rs = df.iloc[2, 2]
xp = df.values[2, 2]
self.assertEqual(rs, xp)
# for multiple items
# GH 5528
rs = df.iloc[[0, 1]]
xp = df.xs(4, drop_level=False)
tm.assert_frame_equal(rs, xp)
tup = zip(*[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
index = MultiIndex.from_tuples(tup)
df = DataFrame(np.random.randn(4, 4), index=index)
rs = df.iloc[[2, 3]]
xp = df.xs('b', drop_level=False)
tm.assert_frame_equal(rs, xp)
def test_setitem_multiindex(self):
for index_fn in ('ix', 'loc'):
def check(target, indexers, value, compare_fn, expected=None):
fn = getattr(target, index_fn)
fn.__setitem__(indexers, value)
result = fn.__getitem__(indexers)
if expected is None:
expected = value
compare_fn(result, expected)
# GH7190
index = pd.MultiIndex.from_product([np.arange(0, 100),
np.arange(0, 80)],
names=['time', 'firm'])
t, n = 0, 2
df = DataFrame(np.nan, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=0,
compare_fn=self.assertEqual)
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=1,
compare_fn=self.assertEqual)
df = DataFrame(columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=2,
compare_fn=self.assertEqual)
# GH 7218, assinging with 0-dim arrays
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df,
indexers=((t, n), 'X'),
value=np.array(3),
compare_fn=self.assertEqual,
expected=3, )
# GH5206
df = pd.DataFrame(np.arange(25).reshape(5, 5),
columns='A,B,C,D,E'.split(','), dtype=float)
df['F'] = 99
row_selection = df['A'] % 2 == 0
col_selection = ['B', 'C']
with catch_warnings(record=True):
df.ix[row_selection, col_selection] = df['F']
output = pd.DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
with catch_warnings(record=True):
tm.assert_frame_equal(df.ix[row_selection, col_selection],
output)
check(target=df,
indexers=(row_selection, col_selection),
value=df['F'],
compare_fn=tm.assert_frame_equal,
expected=output, )
# GH11372
idx = pd.MultiIndex.from_product([
['A', 'B', 'C'],
pd.date_range('2015-01-01', '2015-04-01', freq='MS')])
cols = pd.MultiIndex.from_product([
['foo', 'bar'],
pd.date_range('2016-01-01', '2016-02-01', freq='MS')])
df = pd.DataFrame(np.random.random((12, 4)),
index=idx, columns=cols)
subidx = pd.MultiIndex.from_tuples(
[('A', pd.Timestamp('2015-01-01')),
('A', pd.Timestamp('2015-02-01'))])
subcols = pd.MultiIndex.from_tuples(
[('foo', pd.Timestamp('2016-01-01')),
('foo', pd.Timestamp('2016-02-01'))])
vals = pd.DataFrame(np.random.random((2, 2)),
index=subidx, columns=subcols)
check(target=df,
indexers=(subidx, subcols),
value=vals,
compare_fn=tm.assert_frame_equal, )
# set all columns
vals = pd.DataFrame(
np.random.random((2, 4)), index=subidx, columns=cols)
check(target=df,
indexers=(subidx, slice(None, None, None)),
value=vals,
compare_fn=tm.assert_frame_equal, )
# identity
copy = df.copy()
check(target=df, indexers=(df.index, df.columns), value=df,
compare_fn=tm.assert_frame_equal, expected=copy)
def test_loc_getitem_series(self):
# GH14730
# passing a series as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = Series([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
result = x.loc[[1, 3]]
tm.assert_series_equal(result, expected)
# GH15424
y1 = Series([1, 3], index=[1, 2])
result = x.loc[y1]
tm.assert_series_equal(result, expected)
empty = Series(data=[], dtype=np.float64)
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = np.array([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
# empty array:
empty = np.array([])
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
# 0-dim array (scalar):
scalar = np.int64(1)
expected = Series(
data=[0, 1, 2],
index=['A', 'B', 'C'],
dtype=np.float64)
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_multiindex(self):
mi_labels = DataFrame(np.random.randn(4, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j', 'k'],
['X', 'X', 'Y', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_int.iloc[0]
with catch_warnings(record=True):
xp = mi_int.ix[4].ix[8]
tm.assert_series_equal(rs, xp, check_names=False)
self.assertEqual(rs.name, (4, 8))
self.assertEqual(xp.name, 8)
# 2nd (last) columns
rs = mi_int.iloc[:, 2]
with catch_warnings(record=True):
xp = mi_int.ix[:, 2]
tm.assert_series_equal(rs, xp)
# corner column
rs = mi_int.iloc[2, 2]
with catch_warnings(record=True):
xp = mi_int.ix[:, 2].ix[2]
self.assertEqual(rs, xp)
# this is basically regular indexing
rs = mi_labels.iloc[2, 2]
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j'].ix[0, 0]
self.assertEqual(rs, xp)
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_labels.loc['i']
with catch_warnings(record=True):
xp = mi_labels.ix['i']
tm.assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# with a tuple
rs = mi_labels.loc[('i', 'X')]
with catch_warnings(record=True):
xp = mi_labels.ix[('i', 'X')]
tm.assert_frame_equal(rs, xp)
rs = mi_int.loc[4]
with catch_warnings(record=True):
xp = mi_int.ix[4]
tm.assert_frame_equal(rs, xp)
def test_loc_multiindex_indexer_none(self):
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ['Attribute' + str(i) for i in range(1)]
attribute_values = ['Value' + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes, attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = DataFrame(df, columns=index)
result = df[attributes]
tm.assert_frame_equal(result, df)
# GH 7349
# loc with a multi-index seems to be doing fallback
df = DataFrame(np.arange(12).reshape(-1, 1),
index=pd.MultiIndex.from_product([[1, 2, 3, 4],
[1, 2, 3]]))
expected = df.loc[([1, 2], ), :]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_incomplete(self):
# GH 7399
# incomplete indexers
s = pd.Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.loc[:, 'a':'c']
result = s.loc[0:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[0:, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
# GH 7400
# multiindexer gettitem with list of indexers skips wrong element
s = pd.Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.iloc[[6, 7, 8, 12, 13, 14]]
result = s.loc[2:4:2, 'a':'c']
tm.assert_series_equal(result, expected)
def test_multiindex_perf_warn(self):
df = DataFrame({'jim': [0, 0, 1, 1],
'joe': ['x', 'x', 'z', 'y'],
'jolie': np.random.rand(4)}).set_index(['jim', 'joe'])
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.index]):
df.loc[(1, 'z')]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0, )]
def test_series_getitem_multiindex(self):
# GH 6018
# series regression getitem with a multi-index
s = Series([1, 2, 3])
s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)])
result = s[:, 0]
expected = Series([1], index=[0])
tm.assert_series_equal(result, expected)
result = s.loc[:, 1]
expected = Series([2, 3], index=[1, 2])
tm.assert_series_equal(result, expected)
# xs
result = s.xs(0, level=0)
expected = Series([1], index=[0])
tm.assert_series_equal(result, expected)
result = s.xs(1, level=1)
expected = Series([2, 3], index=[1, 2])
tm.assert_series_equal(result, expected)
# GH6258
dt = list(date_range('20130903', periods=3))
idx = MultiIndex.from_product([list('AB'), dt])
s = Series([1, 3, 4, 1, 3, 4], index=idx)
result = s.xs('20130903', level=1)
expected = Series([1, 1], index=list('AB'))
tm.assert_series_equal(result, expected)
# GH5684
idx = MultiIndex.from_tuples([('a', 'one'), ('a', 'two'), ('b', 'one'),
('b', 'two')])
s = Series([1, 2, 3, 4], index=idx)
s.index.set_names(['L1', 'L2'], inplace=True)
result = s.xs('one', level='L2')
expected = Series([1, 3], index=['a', 'b'])
expected.index.set_names(['L1'], inplace=True)
tm.assert_series_equal(result, expected)
def test_xs_multiindex(self):
# GH2903
columns = MultiIndex.from_tuples(
[('a', 'foo'), ('a', 'bar'), ('b', 'hello'),
('b', 'world')], names=['lvl0', 'lvl1'])
df = DataFrame(np.random.randn(4, 4), columns=columns)
df.sort_index(axis=1, inplace=True)
result = df.xs('a', level='lvl0', axis=1)
expected = df.iloc[:, 0:2].loc[:, 'a']
tm.assert_frame_equal(result, expected)
result = df.xs('foo', level='lvl1', axis=1)
expected = df.iloc[:, 1:2].copy()
expected.columns = expected.columns.droplevel('lvl1')
tm.assert_frame_equal(result, expected)
def test_multiindex_setitem(self):
# GH 3738
# setting with a multi-index right hand side
arrays = [np.array(['bar', 'bar', 'baz', 'qux', 'qux', 'bar']),
np.array(['one', 'two', 'one', 'one', 'two', 'one']),
np.arange(0, 6, 1)]
df_orig = pd.DataFrame(np.random.randn(6, 3),
index=arrays,
columns=['A', 'B', 'C']).sort_index()
expected = df_orig.loc[['bar']] * 2
df = df_orig.copy()
df.loc[['bar']] *= 2
tm.assert_frame_equal(df.loc[['bar']], expected)
# raise because these have differing levels
def f():
df.loc['bar'] *= 2
self.assertRaises(TypeError, f)
# from SO
# http://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
df_orig = DataFrame.from_dict({'price': {
('DE', 'Coal', 'Stock'): 2,
('DE', 'Gas', 'Stock'): 4,
('DE', 'Elec', 'Demand'): 1,
('FR', 'Gas', 'Stock'): 5,
('FR', 'Solar', 'SupIm'): 0,
('FR', 'Wind', 'SupIm'): 0
}})
df_orig.index = MultiIndex.from_tuples(df_orig.index,
names=['Sit', 'Com', 'Type'])
expected = df_orig.copy()
expected.iloc[[0, 2, 3]] *= 2
idx = pd.IndexSlice
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], :] *= 2
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], 'price'] *= 2
tm.assert_frame_equal(df, expected)
def test_getitem_duplicates_multiindex(self):
# GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise
# the appropriate error, only in PY3 of course!
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index), 1)
df = DataFrame(arr, index=index, columns=['val'])
result = df.val['D']
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
def f():
df.val['A']
self.assertRaises(KeyError, f)
def f():
df.val['X']
self.assertRaises(KeyError, f)
# A is treated as a special Timestamp
index = MultiIndex(levels=[['A', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
df = DataFrame(arr, index=index, columns=['val'])
result = df.val['A']
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
def f():
df.val['X']
self.assertRaises(KeyError, f)
# GH 7866
# multi-index slicing with missing indexers
idx = pd.MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = pd.Series(np.arange(9, dtype='int64'), index=idx).sort_index()
exp_idx = pd.MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = pd.Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
result = s.loc[['A']]
tm.assert_series_equal(result, expected)
result = s.loc[['A', 'D']]
tm.assert_series_equal(result, expected)
# not any values found
self.assertRaises(KeyError, lambda: s.loc[['D']])
# empty ok
result = s.loc[[]]
expected = s.iloc[[]]
tm.assert_series_equal(result, expected)
idx = pd.IndexSlice
expected = pd.Series([0, 3, 6], index=pd.MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
result = s.loc[idx[:, ['foo']]]
tm.assert_series_equal(result, expected)
result = s.loc[idx[:, ['foo', 'bah']]]
tm.assert_series_equal(result, expected)
# GH 8737
# empty indexer
multi_index = pd.MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(
np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5),
columns=multi_index.reindex([])[0])
result1 = df.loc[:, ([], slice(None))]
result2 = df.loc[:, (['foo'], [])]
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
self.assertEqual(result, np.mean)
def test_multiindex_assignment(self):
# GH3777 part 2
# mixed dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
df['d'] = np.nan
arr = np.array([0., 1.])
df.ix[4, 'd'] = arr
tm.assert_series_equal(df.ix[4, 'd'],
Series(arr, index=[8, 10], name='d'))
# single dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
df.ix[4, 'c'] = arr
exp = Series(arr, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# scalar ok
df.ix[4, 'c'] = 10
exp = Series(10, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# invalid assignments
def f():
df.ix[4, 'c'] = [0, 1, 2, 3]
self.assertRaises(ValueError, f)
def f():
df.ix[4, 'c'] = [0]
self.assertRaises(ValueError, f)
# groupby example
NUM_ROWS = 100
NUM_COLS = 10
col_names = ['A' + num for num in
map(str, np.arange(NUM_COLS).tolist())]
index_cols = col_names[:5]
df = DataFrame(np.random.randint(5, size=(NUM_ROWS, NUM_COLS)),
dtype=np.int64, columns=col_names)
df = df.set_index(index_cols).sort_index()
grp = df.groupby(level=index_cols[:4])
df['new_col'] = np.nan
f_index = np.arange(5)
def f(name, df2):
return Series(np.arange(df2.shape[0]),
name=df2.index.values[0]).reindex(f_index)
# TODO(wesm): unused?
# new_df = pd.concat([f(name, df2) for name, df2 in grp], axis=1).T
# we are actually operating on a copy here
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
df.ix[name, 'new_col'] = new_vals
def test_multiindex_label_slicing_with_negative_step(self):
s = Series(np.arange(20),
MultiIndex.from_product([list('abcde'), np.arange(4)]))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
assert_slices_equivalent(SLC[::-1], SLC[::-1])
assert_slices_equivalent(SLC['d'::-1], SLC[15::-1])
assert_slices_equivalent(SLC[('d', )::-1], SLC[15::-1])
assert_slices_equivalent(SLC[:'d':-1], SLC[:11:-1])
assert_slices_equivalent(SLC[:('d', ):-1], SLC[:11:-1])
assert_slices_equivalent(SLC['d':'b':-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[('d', ):'b':-1], SLC[15:3:-1])
assert_slices_equivalent(SLC['d':('b', ):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[('d', ):('b', ):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC['b':'d':-1], SLC[:0])
assert_slices_equivalent(SLC[('c', 2)::-1], SLC[10::-1])
assert_slices_equivalent(SLC[:('c', 2):-1], SLC[:9:-1])
assert_slices_equivalent(SLC[('e', 0):('c', 2):-1], SLC[16:9:-1])
def test_multiindex_slice_first_level(self):
# GH 12697
freq = ['a', 'b', 'c', 'd']
idx = pd.MultiIndex.from_product([freq, np.arange(500)])
df = pd.DataFrame(list(range(2000)), index=idx, columns=['Test'])
df_slice = df.loc[pd.IndexSlice[:, 30:70], :]
result = df_slice.loc['a']
expected = pd.DataFrame(list(range(30, 71)),
columns=['Test'],
index=range(30, 71))
tm.assert_frame_equal(result, expected)
result = df_slice.loc['d']
expected = pd.DataFrame(list(range(1530, 1571)),
columns=['Test'],
index=range(30, 71))
tm.assert_frame_equal(result, expected)
class TestMultiIndexSlicers(tm.TestCase):
def test_per_axis_per_level_getitem(self):
# GH6134
# example test case
ix = MultiIndex.from_product([_mklbl('A', 5), _mklbl('B', 7), _mklbl(
'C', 4), _mklbl('D', 2)])
df = DataFrame(np.arange(len(ix.get_values())), index=ix)
result = df.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C2' or c == 'C3')]]
result = df.loc[(slice('A1', 'A3'), slice(None), slice('C1', 'C3')), :]
tm.assert_frame_equal(result, expected)
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples([('A', 1), ('A', 2),
('A', 3), ('B', 1)],
names=['one', 'two'])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(
np.arange(16, dtype='int64').reshape(
4, 4), index=index, columns=columns)
df = df.sort_index(axis=0).sort_index(axis=1)
# identity
result = df.loc[(slice(None), slice(None)), :]
tm.assert_frame_equal(result, df)
result = df.loc[(slice(None), slice(None)), (slice(None), slice(None))]
tm.assert_frame_equal(result, df)
result = df.loc[:, (slice(None), slice(None))]
tm.assert_frame_equal(result, df)
# index
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), 1), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# columns
result = df.loc[:, (slice(None), ['foo'])]
expected = df.iloc[:, [1, 3]]
tm.assert_frame_equal(result, expected)
# both
result = df.loc[(slice(None), 1), (slice(None), ['foo'])]
expected = df.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(result, expected)
result = df.loc['A', 'a']
expected = DataFrame(dict(bar=[1, 5, 9], foo=[0, 4, 8]),
index=Index([1, 2, 3], name='two'),
columns=Index(['bar', 'foo'], name='lvl1'))
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), [1, 2]), :]
expected = df.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# multi-level series
s = Series(np.arange(len(ix.get_values())), index=ix)
result = s.loc['A1':'A3', :, ['C1', 'C3']]
expected = s.loc[[tuple([a, b, c, d])
for a, b, c, d in s.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_series_equal(result, expected)
# boolean indexers
result = df.loc[(slice(None), df.loc[:, ('a', 'bar')] > 5), :]
expected = df.iloc[[2, 3]]
tm.assert_frame_equal(result, expected)
def f():
df.loc[(slice(None), np.array([True, False])), :]
self.assertRaises(ValueError, f)
# ambiguous cases
# these can be multiply interpreted (e.g. in this case
# as df.loc[slice(None),[1]] as well
self.assertRaises(KeyError, lambda: df.loc[slice(None), [1]])
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# not lexsorted
self.assertEqual(df.index.lexsort_depth, 2)
df = df.sort_index(level=1, axis=0)
self.assertEqual(df.index.lexsort_depth, 0)
with tm.assertRaisesRegexp(
UnsortedIndexError,
'MultiIndex Slicing requires the index to be fully '
r'lexsorted tuple len \(2\), lexsort depth \(0\)'):
df.loc[(slice(None), df.loc[:, ('a', 'bar')] > 5), :]
def test_multiindex_slicers_non_unique(self):
# GH 7106
# non-unique mi index support
df = (DataFrame(dict(A=['foo', 'foo', 'foo', 'foo'],
B=['a', 'a', 'a', 'a'],
C=[1, 2, 1, 3],
D=[1, 2, 3, 4]))
.set_index(['A', 'B', 'C']).sort_index())
self.assertFalse(df.index.is_unique)
expected = (DataFrame(dict(A=['foo', 'foo'], B=['a', 'a'],
C=[1, 1], D=[1, 3]))
.set_index(['A', 'B', 'C']).sort_index())
result = df.loc[(slice(None), slice(None), 1), :]
tm.assert_frame_equal(result, expected)
# this is equivalent of an xs expression
result = df.xs(1, level=2, drop_level=False)
tm.assert_frame_equal(result, expected)
df = (DataFrame(dict(A=['foo', 'foo', 'foo', 'foo'],
B=['a', 'a', 'a', 'a'],
C=[1, 2, 1, 2],
D=[1, 2, 3, 4]))
.set_index(['A', 'B', 'C']).sort_index())
self.assertFalse(df.index.is_unique)
expected = (DataFrame(dict(A=['foo', 'foo'], B=['a', 'a'],
C=[1, 1], D=[1, 3]))
.set_index(['A', 'B', 'C']).sort_index())
result = df.loc[(slice(None), slice(None), 1), :]
self.assertFalse(result.index.is_unique)
tm.assert_frame_equal(result, expected)
# GH12896
# numpy-implementation dependent bug
ints = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 14, 16,
17, 18, 19, 200000, 200000]
n = len(ints)
idx = MultiIndex.from_arrays([['a'] * n, ints])
result = Series([1] * n, index=idx)
result = result.sort_index()
result = result.loc[(slice(None), slice(100000))]
expected = Series([1] * (n - 2), index=idx[:-2]).sort_index()
tm.assert_series_equal(result, expected)
def test_multiindex_slicers_datetimelike(self):
# GH 7429
# buggy/inconsistent behavior when slicing with datetime-like
import datetime
dates = [datetime.datetime(2012, 1, 1, 12, 12, 12) +
datetime.timedelta(days=i) for i in range(6)]
freq = [1, 2]
index = MultiIndex.from_product(
[dates, freq], names=['date', 'frequency'])
df = DataFrame(
np.arange(6 * 2 * 4, dtype='int64').reshape(
-1, 4), index=index, columns=list('ABCD'))
# multi-axis slicing
idx = pd.IndexSlice
expected = df.iloc[[0, 2, 4], [0, 1]]
result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),
Timestamp('2012-01-03 12:12:12')),
slice(1, 1)), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(idx[Timestamp('2012-01-01 12:12:12'):Timestamp(
'2012-01-03 12:12:12')], idx[1:1]), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),
Timestamp('2012-01-03 12:12:12')), 1),
slice('A', 'B')]
tm.assert_frame_equal(result, expected)
# with strings
result = df.loc[(slice('2012-01-01 12:12:12', '2012-01-03 12:12:12'),
slice(1, 1)), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(idx['2012-01-01 12:12:12':'2012-01-03 12:12:12'], 1),
idx['A', 'B']]
tm.assert_frame_equal(result, expected)
def test_multiindex_slicers_edges(self):
# GH 8132
# various edge cases
df = DataFrame(
{'A': ['A0'] * 5 + ['A1'] * 5 + ['A2'] * 5,
'B': ['B0', 'B0', 'B1', 'B1', 'B2'] * 3,
'DATE': ["2013-06-11", "2013-07-02", "2013-07-09", "2013-07-30",
"2013-08-06", "2013-06-11", "2013-07-02", "2013-07-09",
"2013-07-30", "2013-08-06", "2013-09-03", "2013-10-01",
"2013-07-09", "2013-08-06", "2013-09-03"],
'VALUES': [22, 35, 14, 9, 4, 40, 18, 4, 2, 5, 1, 2, 3, 4, 2]})
df['DATE'] = pd.to_datetime(df['DATE'])
df1 = df.set_index(['A', 'B', 'DATE'])
df1 = df1.sort_index()
# A1 - Get all values under "A0" and "A1"
result = df1.loc[(slice('A1')), :]
expected = df1.iloc[0:10]
tm.assert_frame_equal(result, expected)
# A2 - Get all values from the start to "A2"
result = df1.loc[(slice('A2')), :]
expected = df1
tm.assert_frame_equal(result, expected)
# A3 - Get all values under "B1" or "B2"
result = df1.loc[(slice(None), slice('B1', 'B2')), :]
expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13, 14]]
tm.assert_frame_equal(result, expected)
# A4 - Get all values between 2013-07-02 and 2013-07-09
result = df1.loc[(slice(None), slice(None),
slice('20130702', '20130709')), :]
expected = df1.iloc[[1, 2, 6, 7, 12]]
tm.assert_frame_equal(result, expected)
# B1 - Get all values in B0 that are also under A0, A1 and A2
result = df1.loc[(slice('A2'), slice('B0')), :]
expected = df1.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# B2 - Get all values in B0, B1 and B2 (similar to what #2 is doing for
# the As)
result = df1.loc[(slice(None), slice('B2')), :]
expected = df1
tm.assert_frame_equal(result, expected)
# B3 - Get all values from B1 to B2 and up to 2013-08-06
result = df1.loc[(slice(None), slice('B1', 'B2'),
slice('2013-08-06')), :]
expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13]]
tm.assert_frame_equal(result, expected)
# B4 - Same as A4 but the start of the date slice is not a key.
# shows indexing on a partial selection slice
result = df1.loc[(slice(None), slice(None),
slice('20130701', '20130709')), :]
expected = df1.iloc[[1, 2, 6, 7, 12]]
tm.assert_frame_equal(result, expected)
def test_per_axis_per_level_doc_examples(self):
# test index maker
idx = pd.IndexSlice
# from indexing.rst / advanced
index = MultiIndex.from_product([_mklbl('A', 4), _mklbl('B', 2),
_mklbl('C', 4), _mklbl('D', 2)])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(len(index) * len(columns), dtype='int64')
.reshape((len(index), len(columns))),
index=index, columns=columns)
result = df.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc[idx['A1':'A3', :, ['C1', 'C3']], :]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc[idx[:, :, ['C1', 'C3']], :]
tm.assert_frame_equal(result, expected)
# not sorted
def f():
df.loc['A1', (slice(None), 'foo')]
self.assertRaises(UnsortedIndexError, f)
df = df.sort_index(axis=1)
# slicing
df.loc['A1', (slice(None), 'foo')]
df.loc[(slice(None), slice(None), ['C1', 'C3']), (slice(None), 'foo')]
# setitem
df.loc(axis=0)[:, :, ['C1', 'C3']] = -10
def test_loc_axis_arguments(self):
index = MultiIndex.from_product([_mklbl('A', 4), _mklbl('B', 2),
_mklbl('C', 4), _mklbl('D', 2)])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(len(index) * len(columns), dtype='int64')
.reshape((len(index), len(columns))),
index=index,
columns=columns).sort_index().sort_index(axis=1)
# axis 0
result = df.loc(axis=0)['A1':'A3', :, ['C1', 'C3']]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc(axis='index')[:, :, ['C1', 'C3']]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
# axis 1
result = df.loc(axis=1)[:, 'foo']
expected = df.loc[:, (slice(None), 'foo')]
tm.assert_frame_equal(result, expected)
result = df.loc(axis='columns')[:, 'foo']
expected = df.loc[:, (slice(None), 'foo')]
tm.assert_frame_equal(result, expected)
# invalid axis
def f():
df.loc(axis=-1)[:, :, ['C1', 'C3']]
self.assertRaises(ValueError, f)
def f():
df.loc(axis=2)[:, :, ['C1', 'C3']]
self.assertRaises(ValueError, f)
def f():
df.loc(axis='foo')[:, :, ['C1', 'C3']]
self.assertRaises(ValueError, f)
def test_per_axis_per_level_setitem(self):
# test index maker
idx = pd.IndexSlice
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples([('A', 1), ('A', 2),
('A', 3), ('B', 1)],
names=['one', 'two'])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df_orig = DataFrame(
np.arange(16, dtype='int64').reshape(
4, 4), index=index, columns=columns)
df_orig = df_orig.sort_index(axis=0).sort_index(axis=1)
# identity
df = df_orig.copy()
df.loc[(slice(None), slice(None)), :] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:, :] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), slice(None)), (slice(None), slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, (slice(None), slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
# index
df = df_orig.copy()
df.loc[(slice(None), [1]), :] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), 1), :] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:, 1] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
# columns
df = df_orig.copy()
df.loc[:, (slice(None), ['foo'])] = 100
expected = df_orig.copy()
expected.iloc[:, [1, 3]] = 100
tm.assert_frame_equal(df, expected)
# both
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = 100
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, 1], idx[:, ['foo']]] = 100
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc['A', 'a'] = 100
expected = df_orig.copy()
expected.iloc[0:3, 0:2] = 100
tm.assert_frame_equal(df, expected)
# setting with a list-like
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[[100, 100], [100, 100]], dtype='int64')
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
# not enough values
df = df_orig.copy()
def f():
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[[100], [100, 100]], dtype='int64')
self.assertRaises(ValueError, f)
def f():
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[100, 100, 100, 100], dtype='int64')
self.assertRaises(ValueError, f)
# with an alignable rhs
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = df.loc[(slice(
None), 1), (slice(None), ['foo'])] * 5
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = expected.iloc[[0, 3], [1, 3]] * 5
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] *= df.loc[(slice(
None), 1), (slice(None), ['foo'])]
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(df, expected)
rhs = df_orig.loc[(slice(None), 1), (slice(None), ['foo'])].copy()
rhs.loc[:, ('c', 'bah')] = 10
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] *= rhs
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(df, expected)
class TestMultiIndexPanel(tm.TestCase):
def test_iloc_getitem_panel_multiindex(self):
# GH 7199
# Panel with multi-index
multi_index = pd.MultiIndex.from_tuples([('ONE', 'one'),
('TWO', 'two'),
('THREE', 'three')],
names=['UPPER', 'lower'])
simple_index = [x[0] for x in multi_index]
wd1 = Panel(items=['First', 'Second'], major_axis=['a', 'b', 'c', 'd'],
minor_axis=multi_index)
wd2 = Panel(items=['First', 'Second'], major_axis=['a', 'b', 'c', 'd'],
minor_axis=simple_index)
expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]]
result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG
tm.assert_frame_equal(result1, expected1)
expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]]
result2 = wd2.iloc[0, [True, True, True, False], [0, 2]]
tm.assert_frame_equal(result2, expected2)
expected1 = DataFrame(index=['a'], columns=multi_index,
dtype='float64')
result1 = wd1.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result1, expected1)
expected2 = DataFrame(index=['a'], columns=simple_index,
dtype='float64')
result2 = wd2.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result2, expected2)
# GH 7516
mi = MultiIndex.from_tuples([(0, 'x'), (1, 'y'), (2, 'z')])
p = Panel(np.arange(3 * 3 * 3, dtype='int64').reshape(3, 3, 3),
items=['a', 'b', 'c'], major_axis=mi,
minor_axis=['u', 'v', 'w'])
result = p.iloc[:, 1, 0]
expected = Series([3, 12, 21], index=['a', 'b', 'c'], name='u')
tm.assert_series_equal(result, expected)
result = p.loc[:, (1, 'y'), 'u']
tm.assert_series_equal(result, expected)
def test_panel_setitem_with_multiindex(self):
# 10360
# failing with a multi-index
arr = np.array([[[1, 2, 3], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float64)
# reg index
axes = dict(items=['A', 'B'], major_axis=[0, 1],
minor_axis=['X', 'Y', 'Z'])
p1 = Panel(0., **axes)
p1.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p1, expected)
# multi-indexes
axes['items'] = pd.MultiIndex.from_tuples([('A', 'a'), ('B', 'b')])
p2 = Panel(0., **axes)
p2.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p2, expected)
axes['major_axis'] = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
p3 = Panel(0., **axes)
p3.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p3, expected)
axes['minor_axis'] = pd.MultiIndex.from_product([['X'], range(3)])
p4 = Panel(0., **axes)
p4.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p4, expected)
arr = np.array(
[[[1, 0, 0], [2, 0, 0]], [[0, 0, 0], [0, 0, 0]]], dtype=np.float64)
p5 = Panel(0., **axes)
p5.iloc[0, :, 0] = [1, 2]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p5, expected)
|
py | b40f0cb07ec61e32720a7509ce14f69208150864 | # -*- coding: utf-8 -*-
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from oauth2client.file import Storage
import ConfigParser
import os.path
import requests
from datetime import datetime
date_fmt = "%Y-%m-%dT%H:%M:%SZ"
config = ConfigParser.SafeConfigParser()
config.read("credential.cfg")
channel = "yoshifumitest0000"
storage = Storage("youtube.dat")
credentials = storage.get()
def check_credential(credentials):
if credentials is None or credentials.invalid == True:
return False
elif datetime.now() >= credentials.token_expiry:
return False
else:
return True
if not check_credential(credentials):
flow = OAuth2WebServerFlow(
client_id=config.get(channel, "client_id"),
client_secret=config.get(channel, "client_secret"),
scope="https://gdata.youtube.com",
user_agent="youtoube-testuploader/1.0")
credentials = run(flow, storage)
location = (r"http://uploads.gdata.youtube.com/" +
r"feeds/api/users/%s/uploads") % config.get(channel, "username")
bounary_string = "YOUTUBE_PARTIAL_UPLOAD"
video_filename = "test.mp4"
filesize = os.path.getsize(video_filename)
api_xml_request = """\
Content-Type: application/atom+xml; charset=UTF-8
<?xml version="1.0"?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:media="http://search.yahoo.com/mrss/"
xmlns:yt="http://gdata.youtube.com/schemas/2007">
<media:group>
<media:title type="plain">Test partial update</media:title>
<media:description type="plain">
Ko-san looks funny
</media:description>
<media:category
scheme="http://gdata.youtube.com/schemas/2007/categories.cat">People
</media:category>
<media:keywords>test, life, funny</media:keywords>
</media:group>
</entry>
"""
headers = {"Authorization": "Bearer %s" % credentials.access_token,
"GData-Version": "2",
"X-GData-Client": config.get(channel, "client_id"),
"X-GData-Key": "key=%s" % config.get(channel, "key"),
"Slug": video_filename,
"Content-Type": "multipart/related; boundary=%s" % bounary_string,
"User-Agent": "youtoube-testuploader/1.0",
"Transfer-Encoding": "chunked",
"Connection": "close"}
read_length = 100 * 1024
body = "--%s\r\n" % bounary_string
body += api_xml_request
body += "\r\n"
with open(video_filename, "rb") as f:
length = 0
while True:
body += "--%s\r\n" % bounary_string
body += "Content-Type: video/mp4\r\n"
body += "Content-Transfer-Encoding: binary\r\n"
chunk = f.read(read_length)
size = len(chunk)
if size == 0:
break
length += size
print length
body += "Content-Range: bytes %d/*\r\n\r\n" % size
body += chunk
body += "\r\n"
body += "Content-Range: bytes */%d\n\n" % length
body += "--%s--\r\n" % bounary_string
fp = open("hoge.bin", "wb")
fp.write(body)
fp.close()
try:
r = requests.post(location, data=body, headers=headers)
print r.text
except requests.exceptions.ConnectionError as e:
print e.message
|
py | b40f0ee68fda586fc0b7aa5a09c6c76df0e683ab | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import reactive.designate_bind_handlers as handlers
_when_args = {}
_when_not_args = {}
def mock_hook_factory(d):
def mock_hook(*args, **kwargs):
def inner(f):
# remember what we were passed. Note that we can't actually
# determine the class we're attached to, as the decorator only gets
# the function.
try:
d[f.__name__].append(dict(args=args, kwargs=kwargs))
except KeyError:
d[f.__name__] = [dict(args=args, kwargs=kwargs)]
return f
return inner
return mock_hook
class TestDesignateHandlers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._patched_when = mock.patch('charms.reactive.when',
mock_hook_factory(_when_args))
cls._patched_when_started = cls._patched_when.start()
cls._patched_when_not = mock.patch('charms.reactive.when_not',
mock_hook_factory(_when_not_args))
cls._patched_when_not_started = cls._patched_when_not.start()
# force requires to rerun the mock_hook decorator:
# try except is Python2/Python3 compatibility as Python3 has moved
# reload to importlib.
try:
reload(handlers)
except NameError:
import importlib
importlib.reload(handlers)
@classmethod
def tearDownClass(cls):
cls._patched_when.stop()
cls._patched_when_started = None
cls._patched_when = None
cls._patched_when_not.stop()
cls._patched_when_not_started = None
cls._patched_when_not = None
# and fix any breakage we did to the module
try:
reload(handlers)
except NameError:
import importlib
importlib.reload(handlers)
def setUp(self):
self._patches = {}
self._patches_start = {}
def tearDown(self):
for k, v in self._patches.items():
v.stop()
setattr(self, k, None)
self._patches = None
self._patches_start = None
def patch(self, obj, attr, return_value=None):
mocked = mock.patch.object(obj, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def test_registered_hooks(self):
# test that the hooks actually registered the relation expressions that
# are meaningful for this interface: this is to handle regressions.
# The keys are the function names that the hook attaches to.
when_patterns = {
'setup_sync_target_alone': [('installed', )],
'send_info': [
('dns-backend.related', ),
('rndckey.available', ),
],
'config_changed': [
('dns-backend.related', ),
('rndckey.available', ),
],
'update_zones_from_peer': [
('cluster.connected', ),
('sync.request.sent', ),
],
'check_zone_status': [
('cluster.connected', ),
('installed', ),
],
'process_sync_requests': [
('cluster.connected', ),
('zones.initialised', ),
],
'assess_status': [('zones.initialised', )],
}
when_not_patterns = {
'install_packages': [('installed', )],
'setup_secret': [('rndckey.available', )],
'update_zones_from_peer': [('zones.initialised', )],
'setup_sync_target_alone': [
('cluster.connected', ),
('zones.initialised', ),
('sync.request.sent', ),
],
'check_zone_status': [
('zones.initialised', ),
('sync.request.sent', ),
],
}
# check the when hooks are attached to the expected functions
for t, p in [(_when_args, when_patterns),
(_when_not_args, when_not_patterns)]:
for f, args in t.items():
# check that function is in patterns
print(f)
self.assertTrue(f in p.keys())
# check that the lists are equal
newlist = [a['args'] for a in args]
self.assertEqual(newlist, p[f])
def test_install_packages(self):
self.patch(handlers.designate_bind, 'install')
self.patch(handlers.designate_bind, 'set_apparmor')
self.patch(handlers.reactive, 'set_state')
handlers.install_packages()
self.install.assert_called_once_with()
self.set_apparmor.assert_called_once_with()
self.set_state.assert_called_once_with('installed')
def test_setup_secret(self):
self.patch(handlers.designate_bind, 'init_rndckey')
self.patch(handlers.reactive, 'set_state')
self.init_rndckey.return_value = None
handlers.setup_secret()
self.assertFalse(self.set_state.called)
self.init_rndckey.return_value = 'secret'
handlers.setup_secret()
self.set_state.assert_called_with('rndckey.available')
def test_setup_info(self):
dnsclient = mock.MagicMock()
self.patch(handlers.designate_bind, 'get_rndc_secret')
self.patch(handlers.designate_bind, 'get_rndc_algorithm')
self.get_rndc_secret.return_value = 'secret'
self.get_rndc_algorithm.return_value = 'hmac-md5'
handlers.send_info(dnsclient)
dnsclient.send_rndckey_info.assert_called_once_with(
'secret',
'hmac-md5')
def test_config_changed(self):
self.patch(handlers.designate_bind, 'set_apparmor')
self.patch(handlers.designate_bind, 'render_all_configs')
handlers.config_changed('arg1', 'arg2')
self.set_apparmor.assert_called_once_with()
self.render_all_configs.assert_called_once_with(('arg1', 'arg2', ))
def test_setup_sync_target_alone(self):
self.patch(handlers.hookenv, 'is_leader')
self.patch(handlers.designate_bind, 'setup_sync')
self.patch(handlers.reactive, 'set_state')
self.is_leader.return_value = False
handlers.setup_sync_target_alone()
self.assertFalse(self.setup_sync.called)
self.assertFalse(self.set_state.called)
self.is_leader.return_value = True
handlers.setup_sync_target_alone()
self.setup_sync.assert_called_once_with()
self.set_state.assert_called_once_with('zones.initialised')
def test_update_zones_from_peer(self):
self.patch(handlers.designate_bind, 'retrieve_zones')
handlers.update_zones_from_peer('hacluster')
self.retrieve_zones.assert_called_once_with('hacluster')
def test_check_zone_status(self):
self.patch(handlers.hookenv, 'is_leader')
self.patch(handlers.reactive, 'set_state')
self.patch(handlers.designate_bind, 'get_sync_time')
self.patch(handlers.designate_bind, 'retrieve_zones')
self.patch(handlers.designate_bind, 'setup_sync')
self.patch(handlers.designate_bind, 'request_sync')
# Leader test: Retrieve sync
self.is_leader.return_value = True
self.get_sync_time.return_value = 100
handlers.check_zone_status('hacluster')
self.retrieve_zones.assert_called_once_with()
self.retrieve_zones.reset_mock()
# Leader test: Setup sync
self.is_leader.return_value = True
self.get_sync_time.return_value = None
handlers.check_zone_status('hacluster')
self.assertFalse(self.retrieve_zones.called)
self.setup_sync.assert_called_once_with()
self.set_state.assert_called_once_with('zones.initialised')
# Non-Leader test
self.is_leader.return_value = False
handlers.check_zone_status('hacluster')
self.request_sync.assert_called_once_with('hacluster')
def test_process_sync_requests(self):
self.patch(handlers.hookenv, 'is_leader')
self.patch(handlers.designate_bind, 'process_requests')
self.is_leader.return_value = False
handlers.process_sync_requests('hacluster')
self.assertFalse(self.process_requests.called)
self.process_requests.reset_mock()
self.is_leader.return_value = True
handlers.process_sync_requests('hacluster')
self.process_requests.assert_called_once_with('hacluster')
|
py | b40f0ffebb76413d3dde87ae8fd1080af145c182 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import unittest.mock
import numpy as np
try:
import scipy.integrate as SCIPY_INT
except ImportError: # pragma: NO COVER
SCIPY_INT = None
from tests import utils as base_utils
from tests.unit import utils
FLOAT64 = np.float64 # pylint: disable=no-member
SPACING = np.spacing # pylint: disable=no-member
class Test_make_subdivision_matrices(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(degree):
from bezier.hazmat import curve_helpers
return curve_helpers.make_subdivision_matrices(degree)
def _helper(self, degree, expected_l, expected_r):
left, right = self._call_function_under_test(degree)
self.assertEqual(left, expected_l)
self.assertEqual(right, expected_r)
def test_linear(self):
from bezier.hazmat import curve_helpers
self._helper(
1,
curve_helpers._LINEAR_SUBDIVIDE_LEFT,
curve_helpers._LINEAR_SUBDIVIDE_RIGHT,
)
def test_quadratic(self):
from bezier.hazmat import curve_helpers
self._helper(
2,
curve_helpers._QUADRATIC_SUBDIVIDE_LEFT,
curve_helpers._QUADRATIC_SUBDIVIDE_RIGHT,
)
def test_cubic(self):
from bezier.hazmat import curve_helpers
self._helper(
3,
curve_helpers._CUBIC_SUBDIVIDE_LEFT,
curve_helpers._CUBIC_SUBDIVIDE_RIGHT,
)
def test_quartic(self):
expected_l = np.asfortranarray(
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 0.0, 1.0, 3.0, 6.0],
[0.0, 0.0, 0.0, 1.0, 4.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
]
)
expected_r = np.asfortranarray(
[
[1.0, 0.0, 0.0, 0.0, 0.0],
[4.0, 1.0, 0.0, 0.0, 0.0],
[6.0, 3.0, 1.0, 0.0, 0.0],
[4.0, 3.0, 2.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
]
)
col_scaling = np.asfortranarray([[1.0, 2.0, 4.0, 8.0, 16.0]])
expected_l /= col_scaling
expected_r /= col_scaling[:, ::-1]
self._helper(4, expected_l, expected_r)
class Test_subdivide_nodes(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.subdivide_nodes(nodes)
def _helper(self, nodes, expected_l, expected_r):
left, right = self._call_function_under_test(nodes)
self.assertEqual(left, expected_l)
self.assertEqual(right, expected_r)
def _points_check(self, nodes, pts_exponent=5):
from bezier.hazmat import curve_helpers
left, right = self._call_function_under_test(nodes)
# Using the exponent means that ds = 1/2**exp, which
# can be computed without roundoff.
num_pts = 2 ** pts_exponent + 1
left_half = np.linspace(0.0, 0.5, num_pts)
right_half = np.linspace(0.5, 1.0, num_pts)
unit_interval = np.linspace(0.0, 1.0, num_pts)
pairs = [(left, left_half), (right, right_half)]
for sub_curve, half in pairs:
# Make sure sub_curve([0, 1]) == curve(half)
self.assertEqual(
curve_helpers.evaluate_multi(nodes, half),
curve_helpers.evaluate_multi(sub_curve, unit_interval),
)
def test_line(self):
nodes = np.asfortranarray([[0.0, 4.0], [1.0, 6.0]])
expected_l = np.asfortranarray([[0.0, 2.0], [1.0, 3.5]])
expected_r = np.asfortranarray([[2.0, 4.0], [3.5, 6.0]])
self._helper(nodes, expected_l, expected_r)
def test_line_check_evaluate(self):
# Use a fixed seed so the test is deterministic and round
# the nodes to 8 bits of precision to avoid round-off.
nodes = utils.get_random_nodes(shape=(2, 2), seed=88991, num_bits=8)
self._points_check(nodes)
def test_quadratic(self):
nodes = np.asfortranarray([[0.0, 4.0, 7.0], [1.0, 6.0, 3.0]])
expected_l = np.asfortranarray([[0.0, 2.0, 3.75], [1.0, 3.5, 4.0]])
expected_r = np.asfortranarray([[3.75, 5.5, 7.0], [4.0, 4.5, 3.0]])
self._helper(nodes, expected_l, expected_r)
def test_quadratic_check_evaluate(self):
# Use a fixed seed so the test is deterministic and round
# the nodes to 8 bits of precision to avoid round-off.
nodes = utils.get_random_nodes(shape=(2, 3), seed=10764, num_bits=8)
self._points_check(nodes)
def test_cubic(self):
nodes = np.asfortranarray([[0.0, 4.0, 7.0, 6.0], [1.0, 6.0, 3.0, 5.0]])
expected_l = np.asfortranarray(
[[0.0, 2.0, 3.75, 4.875], [1.0, 3.5, 4.0, 4.125]]
)
expected_r = np.asfortranarray(
[[4.875, 6.0, 6.5, 6.0], [4.125, 4.25, 4.0, 5.0]]
)
self._helper(nodes, expected_l, expected_r)
def test_cubic_check_evaluate(self):
# Use a fixed seed so the test is deterministic and round
# the nodes to 8 bits of precision to avoid round-off.
nodes = utils.get_random_nodes(shape=(2, 4), seed=990077, num_bits=8)
self._points_check(nodes)
def test_dynamic_subdivision_matrix(self):
# Use a fixed seed so the test is deterministic and round
# the nodes to 8 bits of precision to avoid round-off.
nodes = utils.get_random_nodes(shape=(2, 5), seed=103, num_bits=8)
self._points_check(nodes)
def _evaluate_multi_non_unity(test_case):
nodes = np.asfortranarray(
[[0.0, 0.5, 1.5, 2.0], [0.0, 3.0, 4.0, 8.0], [0.0, 1.0, 1.0, 1.0]]
)
lambda1 = np.asfortranarray([0.25, 0.5, 0.75])
lambda2 = np.asfortranarray([0.25, 0.125, -0.75])
result = test_case._call_function_under_test(nodes, lambda1, lambda2)
expected = np.asfortranarray(
[
[0.125, 0.0859375, 0.421875],
[0.453125, 0.390625, -2.109375],
[0.109375, 0.119140625, -0.421875],
]
)
test_case.assertEqual(result, expected)
def _evaluate_multi_constant(test_case):
num_vals = 257
lambda1 = np.linspace(0.0, 1.0, num_vals)
lambda2 = 1.0 - lambda1
# B(s) = [1]
nodes = np.ones((1, 8), order="F")
result = test_case._call_function_under_test(nodes, lambda1, lambda2)
expected = np.ones((1, num_vals), order="F")
test_case.assertEqual(result, expected)
class Test_evaluate_multi_vs(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes, lambda1, lambda2):
from bezier.hazmat import curve_helpers
return curve_helpers.evaluate_multi_vs(nodes, lambda1, lambda2)
def test_non_unity(self):
_evaluate_multi_non_unity(self)
def test_constant(self):
_evaluate_multi_constant(self)
def test_binomial_overflow_int32(self):
lambda1 = np.asfortranarray([0.5])
lambda2 = np.asfortranarray([0.5])
degree = 30
nodes = np.eye(degree + 1, order="F")
expected = np.asfortranarray(
[
1.0,
30.0,
435.0,
4060.0,
27405.0,
142506.0,
593775.0,
2035800.0,
5852925.0,
14307150.0,
30045015.0,
54627300.0,
86493225.0,
119759850.0,
145422675.0,
155117520.0,
145422675.0,
119759850.0,
86493225.0,
54627300.0,
30045015.0,
14307150.0,
5852925.0,
2035800.0,
593775.0,
142506.0,
27405.0,
4060.0,
435.0,
30.0,
1.0,
]
)
evaluated = self._call_function_under_test(nodes, lambda1, lambda2)
binomial_coefficients = evaluated.flatten() * 2.0 ** degree
self.assertEqual(expected, binomial_coefficients)
@unittest.skipIf(
base_utils.IS_LINUX and not base_utils.IS_64_BIT,
"32-bit is skipped on Linux",
)
def test_binomial_roundoff(self):
lamdba1 = np.asfortranarray([0.5])
lamdba2 = np.asfortranarray([0.5])
degree = 55
nodes = np.eye(degree + 1, order="F")
expected = np.asfortranarray(
[
1.0,
55.0,
1485.0,
26235.0,
341055.0,
3478761.0,
28989675.0,
202927725.0,
1217566350.0,
6358402050.0,
29248649430.0,
119653565850.0,
438729741450.0,
1451182990950.0,
4353548972850.0,
11899700525790.0,
29749251314475.0,
68248282427325.0,
144079707346575.0,
280576272201225.0,
505037289962205.0,
841728816603675.0,
1300853625660225.0,
1866442158555975.0,
2488589544741300.0,
3085851035479212.0,
3560597348629860.0 - 0.5,
3824345300380220.0 - 0.5,
3824345300380220.0 - 0.5,
3560597348629860.0 - 0.5,
3085851035479212.0 - 0.5,
2488589544741300.0 - 0.5,
1866442158555974.0 + 0.5,
1300853625660225.0 - 0.5 ** 2,
841728816603675.0 - 0.5 ** 3,
505037289962205.0 - 0.5 ** 4,
280576272201225.0 - 0.5 ** 4,
144079707346575.0 - 0.5 ** 5,
68248282427325.0 - 0.5 ** 6,
29749251314475.0 - 0.5 ** 7,
11899700525790.0 - 0.5 ** 8,
4353548972850.0 - 3 * 0.5 ** 11,
1451182990950.0 - 0.5 ** 11,
438729741450.0 - 3 * 0.5 ** 14,
119653565850.0 - 3 * 0.5 ** 16,
29248649430.0 - 3 * 0.5 ** 18,
6358402050.0 - 3 * 0.5 ** 20,
1217566350.0 - 0.5 ** 21,
202927725.0 - 3 * 0.5 ** 25,
28989675.0 - 0.5 ** 26,
3478761.0 - 0.5 ** 29,
341055.0 - 3 * 0.5 ** 34,
26235.0 - 0.5 ** 36,
1485.0 - 0.5 ** 40,
55.0 - 5 * 0.5 ** 47,
1.0,
],
)
evaluated = self._call_function_under_test(nodes, lamdba1, lamdba2)
binomial_coefficients = evaluated.flatten() * 2.0 ** degree
self.assertEqual(expected, binomial_coefficients)
class Test_evaluate_multi_de_casteljau(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes, lambda1, lambda2):
from bezier.hazmat import curve_helpers
return curve_helpers.evaluate_multi_de_casteljau(
nodes, lambda1, lambda2
)
def test_non_unity(self):
_evaluate_multi_non_unity(self)
def test_constant(self):
_evaluate_multi_constant(self)
def test_binomial_no_roundoff(self):
lamdba1 = np.asfortranarray([0.5])
lamdba2 = np.asfortranarray([0.5])
degree = 55
nodes = np.eye(degree + 1, order="F")
expected = np.asfortranarray(
[
1.0,
55.0,
1485.0,
26235.0,
341055.0,
3478761.0,
28989675.0,
202927725.0,
1217566350.0,
6358402050.0,
29248649430.0,
119653565850.0,
438729741450.0,
1451182990950.0,
4353548972850.0,
11899700525790.0,
29749251314475.0,
68248282427325.0,
144079707346575.0,
280576272201225.0,
505037289962205.0,
841728816603675.0,
1300853625660225.0,
1866442158555975.0,
2488589544741300.0,
3085851035479212.0,
3560597348629860.0,
3824345300380220.0,
3824345300380220.0,
3560597348629860.0,
3085851035479212.0,
2488589544741300.0,
1866442158555975.0,
1300853625660225.0,
841728816603675.0,
505037289962205.0,
280576272201225.0,
144079707346575.0,
68248282427325.0,
29749251314475.0,
11899700525790.0,
4353548972850.0,
1451182990950.0,
438729741450.0,
119653565850.0,
29248649430.0,
6358402050.0,
1217566350.0,
202927725.0,
28989675.0,
3478761.0,
341055.0,
26235.0,
1485.0,
55.0,
1.0,
],
)
evaluated = self._call_function_under_test(nodes, lamdba1, lamdba2)
binomial_coefficients = evaluated.flatten() * 2.0 ** degree
self.assertEqual(expected, binomial_coefficients)
class Test_evaluate_multi_barycentric(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes, lambda1, lambda2):
from bezier.hazmat import curve_helpers
return curve_helpers.evaluate_multi_barycentric(
nodes, lambda1, lambda2
)
def test_non_unity(self):
_evaluate_multi_non_unity(self)
def test_constant(self):
_evaluate_multi_constant(self)
def test_high_degree(self):
lamdba1 = np.asfortranarray([0.5])
lamdba2 = np.asfortranarray([0.5])
degree = 55
nodes = np.eye(degree + 1, order="F")
expected = np.asfortranarray(
[
1.0,
55.0,
1485.0,
26235.0,
341055.0,
3478761.0,
28989675.0,
202927725.0,
1217566350.0,
6358402050.0,
29248649430.0,
119653565850.0,
438729741450.0,
1451182990950.0,
4353548972850.0,
11899700525790.0,
29749251314475.0,
68248282427325.0,
144079707346575.0,
280576272201225.0,
505037289962205.0,
841728816603675.0,
1300853625660225.0,
1866442158555975.0,
2488589544741300.0,
3085851035479212.0,
3560597348629860.0,
3824345300380220.0,
3824345300380220.0,
3560597348629860.0,
3085851035479212.0,
2488589544741300.0,
1866442158555975.0,
1300853625660225.0,
841728816603675.0,
505037289962205.0,
280576272201225.0,
144079707346575.0,
68248282427325.0,
29749251314475.0,
11899700525790.0,
4353548972850.0,
1451182990950.0,
438729741450.0,
119653565850.0,
29248649430.0,
6358402050.0,
1217566350.0,
202927725.0,
28989675.0,
3478761.0,
341055.0,
26235.0,
1485.0,
55.0,
1.0,
],
)
evaluated = self._call_function_under_test(nodes, lamdba1, lamdba2)
binomial_coefficients = evaluated.flatten() * 2.0 ** degree
self.assertEqual(expected, binomial_coefficients)
class Test_evaluate_multi(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes, s_vals):
from bezier.hazmat import curve_helpers
return curve_helpers.evaluate_multi(nodes, s_vals)
def test_linear(self):
num_vals = 129
s_vals = np.linspace(0.0, 1.0, num_vals)
# B(s) = [s + 1, 1 - 2 s, 3 s - 7]
nodes = np.asfortranarray([[1.0, 2.0], [1.0, -1.0], [-7.0, -4.0]])
result = self._call_function_under_test(nodes, s_vals)
expected = np.empty((3, num_vals), order="F")
expected[0, :] = 1.0 + s_vals
expected[1, :] = 1.0 - 2.0 * s_vals
expected[2, :] = -7.0 + 3.0 * s_vals
self.assertEqual(result, expected)
def test_quadratic(self):
num_vals = 65
s_vals = np.linspace(0.0, 1.0, num_vals)
# B(s) = [s(4 - s), 2s(2s - 1)]
nodes = np.asfortranarray([[0.0, 2.0, 3.0], [0.0, -1.0, 2.0]])
result = self._call_function_under_test(nodes, s_vals)
expected = np.empty((2, num_vals), order="F")
expected[0, :] = s_vals * (4.0 - s_vals)
expected[1, :] = 2.0 * s_vals * (2.0 * s_vals - 1.0)
self.assertEqual(result, expected)
class Test_vec_size(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes, s_val):
from bezier.hazmat import curve_helpers
return curve_helpers.vec_size(nodes, s_val)
def test_linear(self):
nodes = np.asfortranarray([[0.0, 3.0], [0.0, -4.0]])
size = self._call_function_under_test(nodes, 0.25)
self.assertEqual(size, 0.25 * 5.0)
def test_quadratic(self):
nodes = np.asfortranarray([[0.0, 2.0, 1.0], [0.0, 3.0, 6.0]])
size = self._call_function_under_test(nodes, 0.5)
self.assertEqual(size, 3.25)
class Test_compute_length(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.compute_length(nodes)
def _scipy_skip(self):
if SCIPY_INT is None: # pragma: NO COVER
self.skipTest("SciPy not installed")
def test_invalid_size(self):
nodes = np.empty((2, 0), order="F")
with self.assertRaises(ValueError) as exc_info:
self._call_function_under_test(nodes)
expected = ("Curve should have at least one node.",)
self.assertEqual(exc_info.exception.args, expected)
def test_degree_zero(self):
nodes = np.asfortranarray([[0.0], [0.0]])
length = self._call_function_under_test(nodes)
self.assertEqual(length, 0.0)
def test_linear(self):
nodes = np.asfortranarray([[0.0, 3.0], [0.0, 4.0]])
length = self._call_function_under_test(nodes)
self.assertEqual(length, 5.0)
def test_quadratic(self):
self._scipy_skip()
nodes = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 2.0, 0.0]])
length = self._call_function_under_test(nodes)
# pylint: disable=no-member,assignment-from-no-return
# 2 INT_0^1 SQRT(16 s^2 - 16 s + 5) ds = SQRT(5) + sinh^{-1}(2)/2
arcs2 = np.arcsinh(2.0)
# pylint: enable=no-member,assignment-from-no-return
expected = np.sqrt(5.0) + 0.5 * arcs2
local_eps = abs(SPACING(expected))
self.assertAlmostEqual(length, expected, delta=local_eps)
def test_cubic(self):
self._scipy_skip()
nodes = np.asfortranarray([[0.0, 1.0, 2.0, 3.5], [0.0, 2.0, 0.0, 0.0]])
length = self._call_function_under_test(nodes)
# x(s) = s (s^2 + 6) / 2
# y(s) = 6 s (s - 1)^2
# x'(s)^2 + y'(s)^2 = (9/4)(145s^4 - 384s^3 + 356s^2 - 128s + 20)
expected = float.fromhex("0x1.05dd184047a7bp+2")
local_eps = abs(SPACING(expected))
self.assertAlmostEqual(length, expected, delta=local_eps)
class Test_elevate_nodes(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.elevate_nodes(nodes)
def test_linear(self):
nodes = np.asfortranarray([[0.0, 2.0], [0.0, 4.0]])
result = self._call_function_under_test(nodes)
expected = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 2.0, 4.0]])
self.assertEqual(result, expected)
def test_quadratic(self):
nodes = np.asfortranarray(
[[0.0, 3.0, 6.0], [0.5, 0.5, 0.5], [0.75, 3.0, 2.25]]
)
result = self._call_function_under_test(nodes)
expected = np.asfortranarray(
[
[0.0, 2.0, 4.0, 6.0],
[0.5, 0.5, 0.5, 0.5],
[0.75, 2.25, 2.75, 2.25],
]
)
self.assertEqual(result, expected)
class Test_de_casteljau_one_round(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes, lambda1, lambda2):
from bezier.hazmat import curve_helpers
return curve_helpers.de_casteljau_one_round(nodes, lambda1, lambda2)
def test_it(self):
nodes = np.asfortranarray([[0.0, 3.0], [1.0, 5.0]])
result = self._call_function_under_test(nodes, 0.25, 0.75)
self.assertEqual(result, np.asfortranarray([[2.25], [4.0]]))
class Test_specialize_curve(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes, start, end):
from bezier.hazmat import curve_helpers
return curve_helpers.specialize_curve(nodes, start, end)
def test_linear(self):
nodes = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
result = self._call_function_under_test(nodes, 0.25, 0.75)
expected = np.asfortranarray([[0.25, 0.75], [0.25, 0.75]])
self.assertEqual(result, expected)
def test_against_subdivision(self):
import bezier
nodes = np.asfortranarray([[0.0, 1.0, 3.0], [1.0, 6.0, 5.0]])
curve = bezier.Curve(nodes, 2)
left, right = curve.subdivide()
left_nodes = self._call_function_under_test(nodes, 0.0, 0.5)
self.assertEqual(left.nodes, left_nodes)
right_nodes = self._call_function_under_test(nodes, 0.5, 1.0)
self.assertEqual(right.nodes, right_nodes)
def test_cubic(self):
nodes = np.asfortranarray(
[[0.0, 1.0, 1.0, 3.0], [0.0, -1.0, -2.0, 2.0]]
)
result = self._call_function_under_test(nodes, 0.125, 0.625)
expected = (
np.asfortranarray(
[[171, 375, 499, 735], [-187, -423, -579, -335]], dtype=FLOAT64
)
/ 512.0
)
self.assertEqual(result, expected)
def test_quartic(self):
nodes = np.asfortranarray(
[[0.0, 1.0, 1.0, 3.0, 3.0], [5.0, 6.0, 7.0, 6.0, 7.0]]
)
result = self._call_function_under_test(nodes, 0.5, 0.75)
expected = np.asfortranarray(
[
[1.5625, 1.78125, 2.015625, 2.2578125, 2.47265625],
[6.375, 6.4375, 6.46875, 6.484375, 6.5234375],
]
)
self.assertEqual(result, expected)
class Test_evaluate_hodograph(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(s, nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.evaluate_hodograph(s, nodes)
def test_line(self):
nodes = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
first_deriv1 = self._call_function_under_test(0.25, nodes)
expected = np.asfortranarray(nodes[:, [1]] - nodes[:, [0]])
self.assertEqual(first_deriv1, expected)
# Make sure it is the same elsewhere since
# the derivative curve is degree 0.
first_deriv2 = self._call_function_under_test(0.75, nodes)
self.assertEqual(first_deriv1, first_deriv2)
def test_quadratic(self):
nodes = np.asfortranarray([[0.0, 0.5, 1.25], [0.0, 1.0, 0.25]])
# This defines the curve
# B(s) = [s(s + 4)/4, s(8 - 7s)/4]
# B'(s) = [(2 + s)/2, (4 - 7s)/2]
for s_val in (0.0, 0.25, 0.5, 0.625, 0.875):
first_deriv = self._call_function_under_test(s_val, nodes)
self.assertEqual(first_deriv.shape, (2, 1))
self.assertEqual(first_deriv[0, 0], (2.0 + s_val) / 2.0)
self.assertEqual(first_deriv[1, 0], (4.0 - 7.0 * s_val) / 2.0)
def test_cubic(self):
nodes = np.asfortranarray(
[[0.0, 0.25, 0.75, 1.25], [0.0, 1.0, 0.5, 1.0]]
)
# This defines the curve
# B(s) = [s(3 + 3s - s^2)/4, s(5s^2 - 9s + 6)/2]
# B'(s) = [3(1 + 2s - s^2)/4, 3(5s^2 - 6s + 2)/2]
for s_val in (0.125, 0.5, 0.75, 1.0, 1.125):
first_deriv = self._call_function_under_test(s_val, nodes)
self.assertEqual(first_deriv.shape, (2, 1))
x_prime = 3.0 * (1.0 + 2.0 * s_val - s_val * s_val) / 4.0
self.assertEqual(first_deriv[0, 0], x_prime)
y_prime = 3.0 * (5.0 * s_val * s_val - 6.0 * s_val + 2.0) / 2.0
self.assertEqual(first_deriv[1, 0], y_prime)
class Test_get_curvature(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes, tangent_vec, s):
from bezier.hazmat import curve_helpers
return curve_helpers.get_curvature(nodes, tangent_vec, s)
@staticmethod
def _get_tangent_vec(s, nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.evaluate_hodograph(s, nodes)
def test_line(self):
s = 0.5
nodes = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
tangent_vec = self._get_tangent_vec(s, nodes)
result = self._call_function_under_test(nodes, tangent_vec, s)
self.assertEqual(result, 0.0)
def test_elevated_line(self):
s = 0.25
nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 0.5, 1.0]])
tangent_vec = self._get_tangent_vec(s, nodes)
result = self._call_function_under_test(nodes, tangent_vec, s)
self.assertEqual(result, 0.0)
def test_quadratic(self):
s = 0.5
nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 1.0, 0.0]])
tangent_vec = self._get_tangent_vec(s, nodes)
result = self._call_function_under_test(nodes, tangent_vec, s)
self.assertEqual(result, -4.0)
class Test_newton_refine(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes, point, s):
from bezier.hazmat import curve_helpers
return curve_helpers.newton_refine(nodes, point, s)
def test_it(self):
nodes = np.asfortranarray(
[[0.0, 1.0, 3.0, 2.0], [0.0, -1.0, 2.0, 2.0], [0.0, 1.0, 2.0, 4.0]]
)
# curve(1/2) = p
point = np.asfortranarray([[1.75], [0.625], [1.625]])
new_s = self._call_function_under_test(nodes, point, 0.25)
self.assertEqual(110.0 * new_s, 57.0)
class Test_locate_point(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes, point):
from bezier.hazmat import curve_helpers
return curve_helpers.locate_point(nodes, point)
def test_it(self):
nodes = np.asfortranarray(
[[0.0, 3.0, 1.0], [0.0, 0.0, 1.0], [0.0, -1.0, 3.0]]
)
# C(1/8) = p
point = np.asfortranarray([[43.0], [1.0], [-11.0]]) / 64
result = self._call_function_under_test(nodes, point)
self.assertEqual(result, 0.125)
def test_no_match(self):
nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 1.0, 0.0]])
point = np.asfortranarray([[0.5], [2.0]])
self.assertIsNone(self._call_function_under_test(nodes, point))
def test_failure_on_invalid(self):
nodes = np.asfortranarray(
[[0.0, -1.0, 1.0, -0.75], [2.0, 0.0, 1.0, 1.625]]
)
point = np.asfortranarray([[-0.25], [1.375]])
with self.assertRaises(ValueError):
self._call_function_under_test(nodes, point)
def test_outside_left(self):
# Newton's method pushes the value slightly to the left of ``0.0``.
nodes = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 1.0, 0.0]])
point = np.asfortranarray([[0.0], [0.0]])
result = self._call_function_under_test(nodes, point)
self.assertEqual(result, 0.0)
def test_outside_right(self):
# Newton's method pushes the value slightly to the right of ``1.0``.
nodes = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 1.0, 0.0]])
point = np.asfortranarray([[2.0], [0.0]])
result = self._call_function_under_test(nodes, point)
self.assertEqual(result, 1.0)
class Test_reduce_pseudo_inverse(utils.NumPyTestCase):
EPS = 0.5 ** 52
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.reduce_pseudo_inverse(nodes)
def test_to_constant(self):
nodes = np.asfortranarray([[-2.0, -2.0], [1.0, 1.0]])
result = self._call_function_under_test(nodes)
expected = np.asfortranarray([[-2.0], [1.0]])
self.assertEqual(result, expected)
def test_to_linear(self):
nodes = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 2.0, 4.0]])
result = self._call_function_under_test(nodes)
expected = np.asfortranarray([[0.0, 2.0], [0.0, 4.0]])
self.assertEqual(result, expected)
def _actually_inverse_helper(self, degree):
from bezier.hazmat import curve_helpers
from bezier.hazmat import helpers
nodes = np.eye(degree + 2, order="F")
reduction_mat = self._call_function_under_test(nodes)
id_mat = np.eye(degree + 1, order="F")
elevation_mat = curve_helpers.elevate_nodes(id_mat)
result = helpers.matrix_product(elevation_mat, reduction_mat)
return result, id_mat
def test_to_linear_actually_inverse(self):
result, id_mat = self._actually_inverse_helper(1)
self.assertEqual(result, id_mat)
def test_from_quadratic_not_elevated(self):
from bezier.hazmat import curve_helpers
nodes = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 1.5, 0.0]])
result = self._call_function_under_test(nodes)
expected = np.asfortranarray([[0.0, 2.0], [0.5, 0.5]])
self.assertEqual(result, expected)
re_elevated = curve_helpers.elevate_nodes(result)
self.assertTrue(np.any(nodes != re_elevated))
def test_to_quadratic(self):
nodes = np.asfortranarray(
[
[0.0, 2.0, 4.0, 6.0],
[0.5, 0.5, 0.5, 0.5],
[0.75, 2.25, 2.75, 2.25],
]
)
result = self._call_function_under_test(nodes)
expected = np.asfortranarray(
[[0.0, 3.0, 6.0], [0.5, 0.5, 0.5], [0.75, 3.0, 2.25]]
)
self.assertEqual(result, expected)
def test_to_quadratic_actually_inverse(self):
result, id_mat = self._actually_inverse_helper(2)
max_err = np.abs(result - id_mat).max()
self.assertLess(max_err, self.EPS)
def test_to_cubic(self):
nodes = np.asfortranarray([[0.0, 0.75, 2.0, 2.75, 2.0]])
result = self._call_function_under_test(nodes)
expected = np.asfortranarray([[0.0, 1.0, 3.0, 2.0]])
self.assertEqual(result, expected)
def test_to_cubic_actually_inverse(self):
result, id_mat = self._actually_inverse_helper(3)
max_err = np.abs(result - id_mat).max()
self.assertLess(max_err, self.EPS)
def test_unsupported_degree(self):
from bezier.hazmat import helpers
degree = 5
nodes = utils.get_random_nodes(
shape=(2, degree + 1), seed=3820, num_bits=8
)
with self.assertRaises(helpers.UnsupportedDegree) as exc_info:
self._call_function_under_test(nodes)
self.assertEqual(exc_info.exception.degree, degree)
self.assertEqual(exc_info.exception.supported, (1, 2, 3, 4))
class Test_projection_error(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes, projected):
from bezier.hazmat import curve_helpers
return curve_helpers.projection_error(nodes, projected)
def test_it(self):
nodes = np.asfortranarray([[0.0, 3.0], [4.0, 0.0]])
result = self._call_function_under_test(nodes, nodes)
self.assertEqual(result, 0.0)
projected = np.asfortranarray([[0.5, 2.5], [4.5, 0.5]])
result = self._call_function_under_test(nodes, projected)
self.assertEqual(5.0 * result, 1.0)
def test_nodes_zero(self):
nodes = np.asfortranarray([[0.0], [0.0]])
result = self._call_function_under_test(nodes, nodes)
self.assertEqual(result, 0.0)
class Test_maybe_reduce(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.maybe_reduce(nodes)
def _low_degree_helper(self, nodes):
was_reduced, new_nodes = self._call_function_under_test(nodes)
self.assertFalse(was_reduced)
self.assertIs(new_nodes, nodes)
def test_low_degree(self):
nodes = np.asfortranarray([[1.0], [1.0]])
self._low_degree_helper(nodes)
nodes = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
self._low_degree_helper(nodes)
# NOTE: This **should** be reduced, but we don't bother reducing
# to a point (since it isn't a curve).
nodes = np.asfortranarray([[2.0, 2.0], [2.0, 2.0]])
was_reduced, new_nodes = self._call_function_under_test(nodes)
self.assertTrue(was_reduced)
expected = np.asfortranarray([[2.0], [2.0]])
self.assertEqual(new_nodes, expected)
def test_to_linear(self):
nodes = np.asfortranarray([[0.0, 1.0, 2.0], [3.0, 3.5, 4.0]])
was_reduced, new_nodes = self._call_function_under_test(nodes)
self.assertTrue(was_reduced)
expected = np.asfortranarray([[0.0, 2.0], [3.0, 4.0]])
self.assertEqual(expected, new_nodes)
def test_to_quadratic(self):
nodes = np.asfortranarray([[3.0, 2.0, 1.0, 0.0], [0.0, 2.0, 2.0, 0.0]])
was_reduced, new_nodes = self._call_function_under_test(nodes)
self.assertTrue(was_reduced)
expected = np.asfortranarray([[3.0, 1.5, 0.0], [0.0, 3.0, 0.0]])
self.assertEqual(expected, new_nodes)
def test_from_cubic_not_elevated(self):
nodes = np.asfortranarray(
[[0.0, -1.0, 1.0, -0.75], [2.0, 0.0, 1.0, 1.625]]
)
was_reduced, new_nodes = self._call_function_under_test(nodes)
self.assertFalse(was_reduced)
self.assertIs(new_nodes, nodes)
def test_to_cubic(self):
nodes = np.asfortranarray(
[[0.0, 0.75, 2.0, 3.5, 5.0], [0.0, 1.5, 2.5, 3.0, 3.0]]
)
was_reduced, new_nodes = self._call_function_under_test(nodes)
self.assertTrue(was_reduced)
expected = np.asfortranarray(
[[0.0, 1.0, 3.0, 5.0], [0.0, 2.0, 3.0, 3.0]]
)
self.assertEqual(expected, new_nodes)
def test_unsupported_degree(self):
from bezier.hazmat import helpers
degree = 5
nodes = utils.get_random_nodes(
shape=(2, degree + 1), seed=77618, num_bits=8
)
with self.assertRaises(helpers.UnsupportedDegree) as exc_info:
self._call_function_under_test(nodes)
self.assertEqual(exc_info.exception.degree, degree)
self.assertEqual(exc_info.exception.supported, (0, 1, 2, 3, 4))
class Test_full_reduce(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.full_reduce(nodes)
def test_linear(self):
nodes = np.asfortranarray([[5.5, 5.5]])
new_nodes = self._call_function_under_test(nodes)
expected = np.asfortranarray([[5.5]])
self.assertEqual(expected, new_nodes)
def test_single(self):
nodes = np.asfortranarray([[0.0, 2.0, 4.0, 6.0], [0.0, 4.0, 6.0, 6.0]])
new_nodes = self._call_function_under_test(nodes)
expected = np.asfortranarray([[0.0, 3.0, 6.0], [0.0, 6.0, 6.0]])
self.assertEqual(expected, new_nodes)
def test_multiple(self):
nodes = np.asfortranarray([[0.0, 1.0, 2.0, 3.0], [4.0, 4.5, 5.0, 5.5]])
new_nodes = self._call_function_under_test(nodes)
expected = np.asfortranarray([[0.0, 3.0], [4.0, 5.5]])
self.assertEqual(expected, new_nodes)
def test_no_reduce(self):
nodes = np.asfortranarray(
[[0.0, -1.0, 1.0, -0.75], [2.0, 0.0, 1.0, 1.625]]
)
new_nodes = self._call_function_under_test(nodes)
self.assertIs(new_nodes, nodes)
def test_unsupported_degree(self):
from bezier.hazmat import helpers
degree = 5
nodes = utils.get_random_nodes(
shape=(2, degree + 1), seed=360009, num_bits=8
)
with self.assertRaises(helpers.UnsupportedDegree) as exc_info:
self._call_function_under_test(nodes)
self.assertEqual(exc_info.exception.degree, degree)
self.assertEqual(exc_info.exception.supported, (0, 1, 2, 3, 4))
class Test_discrete_turning_angle(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import curve_helpers
return curve_helpers.discrete_turning_angle(nodes)
def test_linear(self):
nodes = np.asfortranarray(
[
[0.0, 1.0],
[0.0, 4.0],
]
)
angle = self._call_function_under_test(nodes)
self.assertEqual(0.0, angle)
def test_overshoot_pi(self):
nodes = np.asfortranarray(
[
[1.125, 0.625, 0.125],
[0.5, -0.5, 0.5],
]
)
angle = self._call_function_under_test(nodes)
expected = float.fromhex("0x1.1b6e192ebbe44p+1")
local_eps = abs(SPACING(expected))
self.assertAlmostEqual(expected, angle, delta=local_eps)
def test_undershoot_minus_pi(self):
nodes = np.asfortranarray(
[
[11.0, 7.0, 3.0],
[8.0, 10.0, 4.0],
]
)
angle = self._call_function_under_test(nodes)
expected = float.fromhex("0x1.7249faa996a21p+0")
local_eps = abs(SPACING(expected))
self.assertAlmostEqual(expected, angle, delta=local_eps)
|
py | b40f1026b39bfd7c141595d6f743bb586746b58b | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_gallery # noqa
from sphinx_gallery.sorting import ExplicitOrder
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
# -- Project information -----------------------------------------------------
project = "rlberry"
copyright = "2022, rlberry team"
author = "rlberry team"
ver_file = os.path.join("../rlberry", "_version.py")
with open(ver_file) as f:
exec(f.read())
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.mathjax",
"sphinx.ext.autosectionlabel",
"sphinxcontrib.video",
"numpydoc",
"sphinx_gallery.gen_gallery",
"myst_parser",
]
autodoc_default_options = {"members": True, "inherited-members": True}
# generate autosummary even if no references
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "themes"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# Copied from scikit-learn:
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get("NO_MATHJAX"):
extensions.append("sphinx.ext.imgmath")
imgmath_image_format = "svg"
mathjax_path = ""
else:
extensions.append("sphinx.ext.mathjax")
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "scikit-learn-fork"
html_theme_options = {"mathjax_path": mathjax_path}
html_theme_path = ["themes"]
html_logo = "../assets/logo_wide.svg"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
html_extra_path = ["_video"]
sphinx_gallery_conf = {
"doc_module": "rlberry",
"backreferences_dir": os.path.join("generated"),
"reference_url": {"rlberry": None},
"matplotlib_animations": True,
"remove_config_comments": True,
"subsection_order": ExplicitOrder(
[
"../examples/demo_env",
"../examples/demo_agents",
"../examples/demo_bandits",
"../examples/demo_examples",
]
),
}
|
py | b40f10c1191d71b95767478c5a66545da3835a25 | """An experiment that enables auto removing reactions added to menus.
"""
import discord
from discord.ext import menus
_old_update = menus.Menu.update
async def update(self: menus.Menu, payload: discord.RawReactionActionEvent):
await _old_update(self, payload)
if payload.event_type != "REACTION_ADD":
return
permissions = self.ctx.channel.permissions_for(self.ctx.me)
if not (permissions.manage_messages or permissions.administrator):
return
await self.message.remove_reaction(payload.emoji, discord.Object(id=payload.user_id))
update.__doc__ = _old_update.__doc__
menus.Menu.update = update
|
py | b40f110c3679383c845244d83952a065aa5bc9a5 | import requests
from canis import oauth
from canis.song import Song
from canis.memoize import memoize
class NotFound(Exception):
def __init__(self, song):
self.song = song
def id_for_song(song):
album_subq = '%20album:{}'.format(song.album) if song.album else ''
query = 'q=track:{}%20artist:{}{}&type=track'.format(song.title, song.artist, album_subq)
r = requests.get('https://api.spotify.com/v1/search?{}'.format(query), headers=headers())
json = r.json()
try:
track = json['tracks']['items'][0]
except Exception:
# Sometimes album is incorrect, try again without album
if song.album:
stripped = Song(song.title, song.artist, None)
return id_for_song(stripped)
raise NotFound(song)
return track['uri']
@memoize
def playlist_id_for_name(name):
url = 'https://api.spotify.com/v1/users/{}/playlists?limit=50'.format(oauth.user_id)
r = requests.get(url, headers=headers())
resp = r.json()
for playlist in resp['items']:
if playlist['name'] == name:
return playlist['id']
return create_playlist(name)
def create_playlist(name):
url = 'https://api.spotify.com/v1/users/{}/playlists'.format(oauth.user_id)
r = requests.post(url, json={'name': name, 'public': 'false'}, headers=headers())
resp = r.json()
return resp['id']
def add_song_to_playlist(song_id, playlist_id):
url = 'https://api.spotify.com/v1/users/{}/playlists/{}/tracks'.format(oauth.user_id, playlist_id)
r = requests.post(url, json={'uris': [song_id]}, headers=headers())
def headers():
return {'Authorization': 'Bearer {}'.format(oauth.access_token)} |
py | b40f1201124d5bec57e0586aff8f41937d45f0cd | """Module providing a taskcontroller than runs tasks serially."""
from ..taskcontroller.base import _BaseController
class SerialController(_BaseController):
"""A simple taskcontroller that runs tasks in serial in one process.
This is just the default, null task controller.
"""
def run(self, tasks):
results = [t.run() for t in tasks]
# to make the results look like deferred results
for r in results:
r.taskid = 0
return results
|
py | b40f128c6ba09d4edd85e0702be456f0fa939414 | import tensorflow as tf
import tensorflow.keras.backend as K
from skatingAI.nets.hrnet.HPNetBase import HPNetBase
layers = tf.keras.layers
BN_MOMENTUM = 0.01
class HPNet(HPNetBase):
def _build_model(self) -> tf.keras.Model:
input = self.conv3x3_block(self.bgnet_input, filter_counts=[16, 16, 33], name="input")
input = layers.concatenate([input, self.inputs])
# --------------first-block-------------------#
# input = self.stride_down(self.inputs, name="input")
block_l_1 = self.conv3x3_block(input, filter_counts=[16, 16, 16, 16], name="1bl")
block_m_1 = self.stride_down(block_l_1, name="1bm")
block_s_1 = self.stride_down(block_m_1, 1, k=4, name="1bs")
block_xs_1 = self.stride_down(block_s_1, 1, k=4, name="1bxs")
block_l_1 = self.conv3x3_block(block_l_1, 2, filter_counts=[16, 16, 16, 16], name="2bl")
block_m_1 = self.conv3x3_block(block_m_1, 2, filter_counts=[16, 32, 32, 64], name="2bm")
block_s_1 = self.conv3x3_block(block_s_1, 2, filter_counts=[32, 64, 64, 128], name="2bs")
block_xs_1 = self.conv3x3_block(block_xs_1, 2, filter_counts=[32, 64, 64, 128], name="2bxs")
block_s_2 = self.stride_up(block_xs_1, 3, k=4, name="3bs")
block_s_2 = layers.concatenate([block_s_1, block_s_2])
block_m_2 = self.stride_up(block_s_2, 3, k=4, name="3bm")
block_m_2 = layers.concatenate([block_m_1, block_m_2])
block_l_2 = self.stride_up(block_m_2, 3, name="3bl")
concat = layers.concatenate([block_l_1, block_l_2])
self.outputs = layers.Conv2D(filters=self.output_channels, kernel_size=3,
activation='softmax',
padding="same",
name=f"output")(concat)
model = tf.keras.Model(inputs=self.inputs, outputs=self.outputs)
return model
|
py | b40f12b306629752373b7d8cdb489c198346365f | from matplotlib import pyplot
from PlotInfo import PlotInfo
from Marker import Marker
from LabelProperties import LabelProperties
class Label(PlotInfo):
"""
Labels a point on the plot with text and/or arrows
"""
def __init__(self, x, y, text=None, bbox=None):
PlotInfo.__init__(self, "label")
self.x = x
"""
The label's x coordinate
"""
self.y = y
"""
The label's y coordinate
"""
self.text = text
"""
The text that should be displayed with the label
"""
self.textX = x
self.textY = y
self.arrow = None
self._marker = Marker()
self._labelProperties = LabelProperties()
if bbox:
self.bbox = dict(bbox)
else:
self.bbox = None
@property
def marker(self):
"""
The marker type that should be used to mark the labeled point
"""
return self._marker.marker
@marker.setter
def marker(self, value):
self._marker.marker = value
@property
def textOffset(self):
return (self.textX - self.x, self.textY - self.y)
@textOffset.setter
def textOffset(self, offset):
if type(offset) not in [tuple, list] or len(offset) != 2:
raise AttributeError, "Expected a two-element tuple when " \
"setting textOffset"
self.setTextOffset(offset[0], offset[1])
def setTextOffset(self, x, y):
self.textX = self.x + x
self.textY = self.y + y
@property
def textPosition(self):
return (self.textX, self.textY)
@textPosition.setter
def textPosition(self, pos):
if type(pos) not in [tuple, list] or len(pos) != 2:
raise AttributeError, "Expected a two-element tuple when " \
"setting textOffset"
self.setTextPosition(pos[0], pos[1])
def setTextPosition(self, x, y):
self.textX = x
self.textY = y
@property
def labelProperties(self):
"""
A dictionary of properties that control the appearance of the label. See
:ref:`styling-labels` for more information on which properties can be
set.
"""
return self._labelProperties
@labelProperties.setter
def labelProperties(self, propsobj):
self.labelProperties.update(propsobj)
@property
def rotation(self):
return self._labelProperties["rotation"]
@rotation.setter
def rotation(self, value):
self._labelProperties["rotation"] = value
def hasArrow(self, style="->", color="black"):
"""
Defines an arrow between the label's text and its point. Valid arrow
styles are given in `Matplotlib's documentation <http://matplotlib.github.com/users/annotations_guide.html?highlight=arrowprops#annotating-with-arrow>`_.
"""
self.arrow = dict(facecolor=color, arrowstyle=style)
def draw(self, fig, axis, transform=None):
kwdict = {}
kwdict["xytext"] = (self.textX, self.textY)
kwdict["xycoords"] = "data"
kwdict["textcoords"] = "data"
kwdict["arrowprops"] = self.arrow
kwdict["horizontalalignment"] = "center"
kwdict.update(self.labelProperties)
# For props, see
# http://matplotlib.sourceforge.net/api/artist_api.html#matplotlib.patches.Rectangle
if self.bbox: kwdict["bbox"] = self.bbox
handles = []
labels = []
handles.append(axis.annotate(self.text, (self.x, self.y), **kwdict))
labels.append(None)
if self.marker is not None:
handles.append(axis.scatter([self.x],[self.y],marker=self.marker,
color="black"))
labels.append(None)
return [handles, labels]
|
py | b40f13684bac27dbe9661e5f0cd57828d06da5cc | # coding=utf-8
data_path = '../data'
cv_train_num = 100000 # 用于交叉验证
train_num = 120000
test_num = 90000
w2v_dim = 300
seed = 2017
|
py | b40f13ec5064e8279fca965e4656f22893257b68 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018 NII.
#
# invenio-iiif-manifest is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Sphinx configuration."""
import os
import sphinx.environment
# -- General configuration ------------------------------------------------
# Do not warn on external images.
suppress_warnings = ['image.nonlocal_uri']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'invenio-iiif-manifest'
copyright = u'2018, NII'
author = u'NII'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join(os.path.dirname(__file__), '..',
'invenio_iiif_manifest', 'version.py'),
'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_options = {
'description': 'Invenio module that adds more fun to the platform.',
'github_user': 'inveniosoftware',
'github_repo': 'invenio-iiif-manifest',
'github_button': False,
'github_banner': True,
'show_powered_by': False,
'extra_nav_links': {
'invenio-iiif-manifest@GitHub': 'https://github.com/inveniosoftware/invenio-iiif-manifest',
'invenio-iiif-manifest@PyPI': 'https://pypi.python.org/pypi/invenio-iiif-manifest/',
}
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'invenio-iiif-manifest_namedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'invenio-iiif-manifest.tex', u'invenio-iiif-manifest Documentation',
u'NII', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'invenio-iiif-manifest', u'invenio-iiif-manifest Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'invenio-iiif-manifest', u'invenio-iiif-manifest Documentation',
author, 'invenio-iiif-manifest', 'Invenio module that adds more fun to the platform.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {
# 'python': ('https://docs.python.org/', None),
# # TODO: Configure external documentation references, eg:
# # 'Flask-Admin': ('https://flask-admin.readthedocs.io/en/latest/', None),
# }
# Autodoc configuraton.
autoclass_content = 'both'
|
py | b40f15ffc544c104bb1abc1711fe802b22173c85 | __author__ = 'tkapriya'
|
py | b40f163c50c1832a019ed5513dfc81018ff9d921 | import pytest
from quantities import quantity
from quantities import length
from quantities import time
from quantities import velocity
from quantities import thermodynamic_temperature
def test_QuantityType_multiply_divide():
v_type = velocity.VelocityType
l_type = length.LengthType
t_type = time.TimeType
tem_type = thermodynamic_temperature.ThermodynamicTemperatureType
assert l_type == v_type.multiply(t_type)
assert l_type == t_type.multiply(v_type)
assert v_type == l_type.divide(t_type)
assert t_type == l_type.divide(v_type)
with pytest.raises(TypeError):
l_type.multiply(v_type)
with pytest.raises(TypeError):
l_type.multiply(None)
with pytest.raises(TypeError):
l_type.multiply(int)
with pytest.raises(TypeError):
l_type.divide(None)
with pytest.raises(TypeError):
l_type.divide(str)
with pytest.raises(TypeError):
v_type.multiply(l_type)
with pytest.raises(TypeError):
v_type.multiply(None)
with pytest.raises(TypeError):
v_type.multiply(float)
with pytest.raises(TypeError):
v_type.divide(tem_type)
with pytest.raises(TypeError):
v_type.divide(tem_type)
with pytest.raises(TypeError):
v_type.divide(float)
with pytest.raises(TypeError):
v_type.divide(None)
with pytest.raises(TypeError):
t_type.multiply(l_type)
with pytest.raises(TypeError):
t_type.multiply(None)
with pytest.raises(TypeError):
t_type.multiply(float)
with pytest.raises(TypeError):
t_type.divide(l_type)
with pytest.raises(TypeError):
t_type.divide(dict)
with pytest.raises(TypeError):
t_type.divide(None)
def test_QuantityType_add_substract():
l_type = length.LengthType
v_type = velocity.VelocityType
with pytest.raises(TypeError):
l_type.add(v_type)
with pytest.raises(TypeError):
l_type.substract(v_type)
with pytest.raises(TypeError):
l_type.add(float)
with pytest.raises(TypeError):
l_type.substract(None)
assert l_type == l_type.add(l_type)
assert l_type == l_type.substract(l_type) |
py | b40f163f7bb49c22bf7c0965c72ef59c51aea23c | import unittest
import operator as op
from redex import combinator as cb
from redex.function import Signature
class CombinatorTest(unittest.TestCase):
def test_combinator_is_dataclass(self):
class A(cb.Combinator):
a: int
pass
try:
A(signature=Signature(n_in=0, n_out=0), a=0)
except:
self.fail("__init__ is not implemented for Add class.")
class SerialTest(unittest.TestCase):
def test_signature(self):
serial = cb.serial(op.add, op.add)
self.assertEqual(serial.signature.n_in, 3)
self.assertEqual(serial.signature.n_out, 1)
self.assertEqual(serial.signature.in_shape, ((), (), ()))
def test_empty(self):
serial = cb.serial()
self.assertEqual(serial(1, 2, 3, 4), (1, 2, 3, 4))
def test_single_child(self):
serial = cb.serial(op.add)
self.assertEqual(serial(1, 2), 1 + 2)
def test_many_children(self):
serial = cb.serial(op.add, op.sub, op.add)
self.assertEqual(serial(1, 2, 3, 4), ((1 + 2) - 3) + 4)
def test_nested(self):
serial = cb.serial(cb.serial(op.add, op.sub), op.add)
self.assertEqual(serial(1, 2, 3, 4), ((1 + 2) - 3) + 4)
def test_nested_aslist(self):
serial = cb.serial([op.add, [op.sub, op.add]])
self.assertEqual(serial(1, 2, 3, 4), ((1 + 2) - 3) + 4)
def test_extra_input(self):
serial = cb.serial(op.add)
self.assertEqual(serial(1, 2, 3, 4), (1 + 2, 3, 4))
def test_less_input(self):
serial = cb.serial(op.add)
with self.assertRaises(ValueError):
serial(1)
class BranchTest(unittest.TestCase):
def test_signature(self):
branch = cb.branch(op.add, op.add)
self.assertEqual(branch.signature.n_in, 2)
self.assertEqual(branch.signature.n_out, 2)
self.assertEqual(branch.signature.in_shape, ((), ()))
def test_empty(self):
branch = cb.branch()
self.assertEqual(branch(1, 2, 3, 4), (1, 2, 3, 4))
def test_single_child(self):
branch = cb.branch(op.add)
self.assertEqual(branch(1, 2), 1 + 2)
def test_many_children(self):
branch = cb.branch(op.add, op.add)
self.assertEqual(branch(1, 2), (1 + 2, 1 + 2))
def test_nested(self):
branch = cb.branch(cb.branch(op.add, op.add), op.add)
self.assertEqual(branch(1, 2), (1 + 2, 1 + 2, 1 + 2))
def test_nested_aslist(self):
branch = cb.branch([op.add, [op.sub, op.add]])
self.assertEqual(branch(1, 2), (1 + 2, 1 - 2, 1 + 2))
def test_extra_input(self):
branch = cb.branch(op.add)
self.assertEqual(branch(1, 2, 3, 4), (1 + 2, 3, 4))
def test_less_input(self):
branch = cb.branch(op.add)
with self.assertRaises(ValueError):
branch(1)
class ParallelTest(unittest.TestCase):
def test_signature(self):
parallel = cb.parallel(op.add, op.add)
self.assertEqual(parallel.signature.n_in, 4)
self.assertEqual(parallel.signature.n_out, 2)
self.assertEqual(parallel.signature.in_shape, ((), (), (), ()))
def test_empty(self):
parallel = cb.parallel()
self.assertEqual(parallel(1, 2, 3, 4), (1, 2, 3, 4))
def test_single_child(self):
parallel = cb.parallel(op.add)
self.assertEqual(parallel(1, 2), 1 + 2)
def test_many_children(self):
parallel = cb.parallel(op.add, op.sub)
self.assertEqual(parallel(1, 2, 3, 4), (1 + 2, 3 - 4))
def test_nested(self):
parallel = cb.parallel(cb.parallel(op.add, op.sub), op.add)
self.assertEqual(parallel(1, 2, 3, 4, 5, 6), (1 + 2, 3 - 4, 5 + 6))
def test_nested_aslist(self):
parallel = cb.parallel([[op.add, op.sub], op.add])
self.assertEqual(parallel(1, 2, 3, 4, 5, 6), (1 + 2, 3 - 4, 5 + 6))
def test_extra_input(self):
parallel = cb.parallel(op.add)
self.assertEqual(parallel(1, 2, 3, 4), (1 + 2, 3, 4))
def test_less_input(self):
parallel = cb.parallel(op.add)
with self.assertRaises(ValueError):
parallel(1)
class ResidualTest(unittest.TestCase):
def test_signature(self):
residual = cb.residual(op.add, op.add)
self.assertEqual(residual.signature.n_in, 3)
self.assertEqual(residual.signature.n_out, 1)
self.assertEqual(residual.signature.in_shape, ((), (), ()))
def test_empty(self):
with self.assertRaises(ValueError):
cb.residual()
def test_single_child(self):
residual = cb.residual(op.add)
self.assertEqual(residual(1, 2), (1 + 2) + 1)
def test_many_children(self):
residual = cb.residual(op.add, op.sub)
self.assertEqual(residual(1, 2, 3), ((1 + 2) - 3) + 1)
def test_single_shortcut(self):
residual = cb.residual(op.add, shortcut=op.sub)
self.assertEqual(residual(1, 2), (1 + 2) + (1 - 2))
def test_many_shortcuts(self):
residual = cb.residual(op.add, shortcut=cb.serial(op.add, op.sub))
self.assertEqual(residual(1, 2, 3), (1 + 2) + (1 + 2 - 3))
def test_nested(self):
residual = cb.residual(cb.residual(op.add, op.sub), op.add)
self.assertEqual(residual(1, 2, 3, 4), ((((1 + 2) - 3) + 1) + 4) + 1)
def test_nested_aslist(self):
residual = cb.residual([op.add, [op.sub, op.add]])
self.assertEqual(residual(1, 2, 3, 4), ((1 + 2) - 3) + 4 + 1)
def test_extra_input(self):
residual = cb.residual(op.add)
self.assertEqual(residual(1, 2, 3, 4), ((1 + 2) + 1, 3, 4))
def test_less_input(self):
residual = cb.residual(op.add)
with self.assertRaises(ValueError):
residual(1)
def test_extra_output(self):
with self.assertRaises(ValueError):
cb.residual(cb.dup())
def test_less_output(self):
with self.assertRaises(ValueError):
cb.residual(cb.drop())
def test_extra_shortcut_output(self):
with self.assertRaises(ValueError):
cb.residual(op.add, shortcut=cb.dup())
def test_less_shortcut_output(self):
with self.assertRaises(ValueError):
cb.residual(op.add, shortcut=cb.drop())
class SelectTest(unittest.TestCase):
def test_signature(self):
select = cb.select(indices=[0])
self.assertEqual(select.signature.n_in, 1)
self.assertEqual(select.signature.n_out, 1)
self.assertEqual(select.signature.in_shape, ((),))
def test_empty(self):
select = cb.select(indices=[])
self.assertEqual(select(1, 2, 3, 4), (1, 2, 3, 4))
def test_single_1st_item(self):
select = cb.select(indices=[0])
self.assertEqual(select(1, 2, 3, 4), (1, 2, 3, 4))
def test_many_1st_items(self):
select = cb.select(indices=[0, 0])
self.assertEqual(select(1, 2, 3, 4), (1, 1, 2, 3, 4))
def test_many_3rd_items(self):
select = cb.select(indices=[2, 2])
self.assertEqual(select(1, 2, 3, 4), (3, 3, 4))
def test_consume_more(self):
select = cb.select(indices=[0], n_in=2)
self.assertEqual(select(1, 2, 3, 4), (1, 3, 4))
def test_consume_less(self):
select = cb.select(indices=[2], n_in=1)
self.assertEqual(select(1, 2, 3, 4), (3, 2, 3, 4))
def test_donot_consume(self):
select = cb.select(indices=[2], n_in=0)
self.assertEqual(select(1, 2, 3, 4), (3, 1, 2, 3, 4))
def test_extra_input(self):
select = cb.select(indices=[2])
self.assertEqual(select(1, 2, 3, 4), (3, 4))
def test_less_input(self):
select = cb.select(indices=[2])
with self.assertRaises(IndexError):
select(1)
class DupTest(unittest.TestCase):
def test_signature(self):
dup = cb.dup()
self.assertEqual(dup.signature.n_in, 1)
self.assertEqual(dup.signature.n_out, 2)
self.assertEqual(dup.signature.in_shape, ((),))
def test_default(self):
dup = cb.dup()
self.assertEqual(dup(1), (1, 1))
def test_single_item(self):
dup = cb.dup(n_in=1)
self.assertEqual(dup(1), (1, 1))
def test_many_items(self):
dup = cb.dup(n_in=2)
self.assertEqual(dup(1, 2), (1, 2, 1, 2))
def test_without_any_item(self):
dup = cb.dup(n_in=0)
self.assertEqual(dup(), ())
def test_extra_input(self):
dup = cb.dup()
self.assertEqual(dup(1, 2, 3, 4), (1, 1, 2, 3, 4))
def test_less_input(self):
dup = cb.dup(n_in=2)
with self.assertRaises(ValueError):
dup(1)
class DropTest(unittest.TestCase):
def test_signature(self):
drop = cb.drop()
self.assertEqual(drop.signature.n_in, 1)
self.assertEqual(drop.signature.n_out, 0)
self.assertEqual(drop.signature.in_shape, ((),))
def test_default(self):
drop = cb.drop()
self.assertEqual(drop(1, 2), 2)
def test_single_item(self):
drop = cb.drop(n_in=1)
self.assertEqual(drop(1), ())
def test_many_items(self):
drop = cb.drop(n_in=2)
self.assertEqual(drop(1, 2), ())
def test_without_any_item(self):
drop = cb.drop(n_in=0)
self.assertEqual(drop(), ())
def test_extra_input(self):
drop = cb.drop()
self.assertEqual(drop(1, 2, 3, 4), (2, 3, 4))
def test_less_input(self):
drop = cb.drop(n_in=2)
with self.assertRaises(ValueError):
drop(1)
class IdentityTest(unittest.TestCase):
def test_signature(self):
identity = cb.identity()
self.assertEqual(identity.signature.n_in, 1)
self.assertEqual(identity.signature.n_out, 1)
self.assertEqual(identity.signature.in_shape, ((),))
def test_default(self):
identity = cb.identity()
self.assertEqual(identity(1, 2), (1, 2))
def test_single_item(self):
identity = cb.identity(n_in=1)
self.assertEqual(identity(1), 1)
def test_many_items(self):
identity = cb.identity(n_in=2)
self.assertEqual(identity(1, 2), (1, 2))
def test_without_any_item(self):
identity = cb.identity(n_in=0)
self.assertEqual(identity(), ())
def test_extra_input(self):
identity = cb.identity()
self.assertEqual(identity(1, 2, 3, 4), (1, 2, 3, 4))
def test_less_input(self):
identity = cb.identity(n_in=2)
with self.assertRaises(ValueError):
identity(1)
class AddTest(unittest.TestCase):
def test_signature(self):
add = cb.add()
self.assertEqual(add.signature.n_in, 2)
self.assertEqual(add.signature.n_out, 1)
self.assertEqual(
add.signature.in_shape,
((), ()),
)
def test_default(self):
add = cb.add()
self.assertEqual(add(4, 2), 6)
def test_single_item(self):
add = cb.add(n_in=1)
self.assertEqual(add(1), 1)
def test_many_items(self):
add = cb.add(n_in=4)
self.assertEqual(add(1, 2, 3, 4), 10)
def test_extra_input(self):
add = cb.add(n_in=2)
self.assertEqual(add(1, 2, 3, 4), (3, 3, 4))
def test_less_input(self):
add = cb.add(n_in=2)
with self.assertRaises(ValueError):
add(1)
class SubTest(unittest.TestCase):
def test_signature(self):
sub = cb.sub()
self.assertEqual(sub.signature.n_in, 2)
self.assertEqual(sub.signature.n_out, 1)
self.assertEqual(
sub.signature.in_shape,
((), ()),
)
def test_default(self):
sub = cb.sub()
self.assertEqual(sub(4, 2), 2)
def test_single_item(self):
sub = cb.sub(n_in=1)
self.assertEqual(sub(1), 1)
def test_many_items(self):
sub = cb.sub(n_in=4)
self.assertEqual(sub(1, 2, 3, 4), -8)
def test_extra_input(self):
sub = cb.sub(n_in=2)
self.assertEqual(sub(1, 2, 3, 4), (-1, 3, 4))
def test_less_input(self):
sub = cb.sub(n_in=2)
with self.assertRaises(ValueError):
sub(1)
class MulTest(unittest.TestCase):
def test_signature(self):
mul = cb.mul()
self.assertEqual(mul.signature.n_in, 2)
self.assertEqual(mul.signature.n_out, 1)
self.assertEqual(
mul.signature.in_shape,
((), ()),
)
def test_default(self):
mul = cb.mul()
self.assertEqual(mul(4, 2), 8)
def test_single_item(self):
mul = cb.mul(n_in=1)
self.assertEqual(mul(1), 1)
def test_many_items(self):
mul = cb.mul(n_in=4)
self.assertEqual(mul(1, 2, 3, 4), 24)
def test_extra_input(self):
mul = cb.mul(n_in=2)
self.assertEqual(mul(1, 2, 3, 4), (2, 3, 4))
def test_less_input(self):
mul = cb.mul(n_in=2)
with self.assertRaises(ValueError):
mul(1)
class DivTest(unittest.TestCase):
def test_signature(self):
div = cb.div()
self.assertEqual(div.signature.n_in, 2)
self.assertEqual(div.signature.n_out, 1)
self.assertEqual(
div.signature.in_shape,
((), ()),
)
def test_default(self):
div = cb.div()
self.assertEqual(div(4, 2), 2)
def test_single_item(self):
div = cb.div(n_in=1)
self.assertEqual(div(1), 1)
def test_many_items(self):
div = cb.div(n_in=4)
self.assertAlmostEqual(div(1, 2, 3, 4), 0.04166, places=4)
def test_extra_input(self):
div = cb.div(n_in=2)
self.assertAlmostEqual(div(1, 2, 3, 4), (0.5000, 3, 4), places=4)
def test_less_input(self):
div = cb.div(n_in=2)
with self.assertRaises(ValueError):
div(1)
|
py | b40f167c9a647859d82acb842f5277b486cda723 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: force_control.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='force_control.proto',
package='gemini.forcecontrol',
syntax='proto3',
serialized_options=b'\n\026io.gemini.forcecontrolB\014ForceControlP\001\242\002\003HLW',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13\x66orce_control.proto\x12\x13gemini.forcecontrol\"T\n\x10GeneralizedForce\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\x12\t\n\x01k\x18\x04 \x01(\x02\x12\t\n\x01m\x18\x05 \x01(\x02\x12\t\n\x01n\x18\x06 \x01(\x02\"a\n\x0c\x46orceRequest\x12\x10\n\x08vesselId\x18\x01 \x01(\t\x12?\n\x10generalizedForce\x18\x02 \x01(\x0b\x32%.gemini.forcecontrol.GeneralizedForce\" \n\rForceResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x32\x65\n\x0c\x46orceControl\x12U\n\nApplyForce\x12!.gemini.forcecontrol.ForceRequest\x1a\".gemini.forcecontrol.ForceResponse\"\x00\x42.\n\x16io.gemini.forcecontrolB\x0c\x46orceControlP\x01\xa2\x02\x03HLWb\x06proto3'
)
_GENERALIZEDFORCE = _descriptor.Descriptor(
name='GeneralizedForce',
full_name='gemini.forcecontrol.GeneralizedForce',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='gemini.forcecontrol.GeneralizedForce.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='y', full_name='gemini.forcecontrol.GeneralizedForce.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='z', full_name='gemini.forcecontrol.GeneralizedForce.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='k', full_name='gemini.forcecontrol.GeneralizedForce.k', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='m', full_name='gemini.forcecontrol.GeneralizedForce.m', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='n', full_name='gemini.forcecontrol.GeneralizedForce.n', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=128,
)
_FORCEREQUEST = _descriptor.Descriptor(
name='ForceRequest',
full_name='gemini.forcecontrol.ForceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='vesselId', full_name='gemini.forcecontrol.ForceRequest.vesselId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='generalizedForce', full_name='gemini.forcecontrol.ForceRequest.generalizedForce', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=130,
serialized_end=227,
)
_FORCERESPONSE = _descriptor.Descriptor(
name='ForceResponse',
full_name='gemini.forcecontrol.ForceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='gemini.forcecontrol.ForceResponse.success', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=229,
serialized_end=261,
)
_FORCEREQUEST.fields_by_name['generalizedForce'].message_type = _GENERALIZEDFORCE
DESCRIPTOR.message_types_by_name['GeneralizedForce'] = _GENERALIZEDFORCE
DESCRIPTOR.message_types_by_name['ForceRequest'] = _FORCEREQUEST
DESCRIPTOR.message_types_by_name['ForceResponse'] = _FORCERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GeneralizedForce = _reflection.GeneratedProtocolMessageType('GeneralizedForce', (_message.Message,), {
'DESCRIPTOR' : _GENERALIZEDFORCE,
'__module__' : 'force_control_pb2'
# @@protoc_insertion_point(class_scope:gemini.forcecontrol.GeneralizedForce)
})
_sym_db.RegisterMessage(GeneralizedForce)
ForceRequest = _reflection.GeneratedProtocolMessageType('ForceRequest', (_message.Message,), {
'DESCRIPTOR' : _FORCEREQUEST,
'__module__' : 'force_control_pb2'
# @@protoc_insertion_point(class_scope:gemini.forcecontrol.ForceRequest)
})
_sym_db.RegisterMessage(ForceRequest)
ForceResponse = _reflection.GeneratedProtocolMessageType('ForceResponse', (_message.Message,), {
'DESCRIPTOR' : _FORCERESPONSE,
'__module__' : 'force_control_pb2'
# @@protoc_insertion_point(class_scope:gemini.forcecontrol.ForceResponse)
})
_sym_db.RegisterMessage(ForceResponse)
DESCRIPTOR._options = None
_FORCECONTROL = _descriptor.ServiceDescriptor(
name='ForceControl',
full_name='gemini.forcecontrol.ForceControl',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=263,
serialized_end=364,
methods=[
_descriptor.MethodDescriptor(
name='ApplyForce',
full_name='gemini.forcecontrol.ForceControl.ApplyForce',
index=0,
containing_service=None,
input_type=_FORCEREQUEST,
output_type=_FORCERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_FORCECONTROL)
DESCRIPTOR.services_by_name['ForceControl'] = _FORCECONTROL
# @@protoc_insertion_point(module_scope)
|
py | b40f173839a9a849ca1fa47d8ea61caa148d1535 | #! /usr/bin/[ython
# -*- coding=utf-8 -*-
from ckeditor_uploader.fields import RichTextUploadingField
from django import forms
from django.db.models import Q
from django.forms import widgets, ModelForm,HiddenInput
from django.urls import reverse
from froala_editor.widgets import FroalaEditor
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet
import haystack.inputs
from account.models import BlogUser
from blogs.models import Artical
from comment.models import Comment
class TitleSearchForm(SearchForm):
title = forms.CharField(required=True,widget=forms.TextInput(attrs={"placeholder": "title", "class": "form-control"
}))
class ArticleEditForm(ModelForm):
class Meta:
model = Artical
fields = ["title","body","status"]
options = {
"width": "900",
"height": "500",
"toolbarButtons":
[
'bold', 'italic', 'underline', "quote",'insertImage',
'insertLink', 'undo', 'redo',"getPDF","fontAwesome",
"emoticons","spellChecker","selectAll","fullscreen",
"codeBeautifierOptions","indent","paragraphFormat","charCounterCount",
"textColor","backgroundColor"
],
"lineHeights": {
'1.15': '1.15',
'1.5': '1.5',
"Double": '2'
},
"quickInsertButtons": [
"image","emoticons",
],
"imageDefaultDisplay":"block",
"placeholderText":"写下你想要分享的内容",
"pastePlain":True
}
widgets = {
"body":FroalaEditor(
theme="dark",
options=options,
),
"title":forms.TextInput(
attrs={
"style":"width:900px;height:6em;outline:none;border:none;font-size:20px",
"placeholder":"请输入标题",
"class":"form-control"
},
),
"status":forms.Select(
attrs={
"style":"height:50px",
"class":"form-control"
}
)
}
class ArticleUpdateForm(ModelForm):
class Meta:
model = Artical
fields = ["title","body","status"]
class MySearchForm(SearchForm):
q = forms.CharField(required=False,
widget=forms.TextInput(attrs={
'type': 'search',
"style":"text-shadow: 0 0 0 black;color: grey;width: 150px;background-color:white",
"class":"form-control" ,
"placeholder":"Search"
}
))
def search(self):
# First, store the SearchQuerySet received from other processing.
sqs = SearchQuerySet().auto_query(self.cleaned_data["q"]).load_all()
if not self.is_valid():
return self.no_query_found()
return sqs
|
py | b40f17d70850cd5d0854114cf74534c2ffaf0ece | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jfsrgko^pk^rm^pyf=-bhhq)0c-2tqzao^8xw)%abc*so4&_*q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL='core.User'
|
py | b40f18eee3cdc7f199a67e91ca4718e3bec93579 | # -*- coding: utf-8 -*-
#
# QAPI code generation
#
# Copyright (c) 2015-2019 Red Hat Inc.
#
# Authors:
# Markus Armbruster <[email protected]>
# Marc-André Lureau <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from contextlib import contextmanager
import os
import re
from typing import (
Dict,
Iterator,
List,
Optional,
Tuple,
)
from .common import (
c_fname,
c_name,
gen_endif,
gen_if,
guardend,
guardstart,
mcgen,
)
from .schema import QAPISchemaObjectType, QAPISchemaVisitor
from .source import QAPISourceInfo
class QAPIGen:
def __init__(self, fname: Optional[str]):
self.fname = fname
self._preamble = ''
self._body = ''
def preamble_add(self, text: str) -> None:
self._preamble += text
def add(self, text: str) -> None:
self._body += text
def get_content(self) -> str:
return self._top() + self._preamble + self._body + self._bottom()
def _top(self) -> str:
# pylint: disable=no-self-use
return ''
def _bottom(self) -> str:
# pylint: disable=no-self-use
return ''
def write(self, output_dir: str) -> None:
# Include paths starting with ../ are used to reuse modules of the main
# schema in specialised schemas. Don't overwrite the files that are
# already generated for the main schema.
if self.fname.startswith('../'):
return
pathname = os.path.join(output_dir, self.fname)
odir = os.path.dirname(pathname)
if odir:
os.makedirs(odir, exist_ok=True)
# use os.open for O_CREAT to create and read a non-existant file
fd = os.open(pathname, os.O_RDWR | os.O_CREAT, 0o666)
with os.fdopen(fd, 'r+', encoding='utf-8') as fp:
text = self.get_content()
oldtext = fp.read(len(text) + 1)
if text != oldtext:
fp.seek(0)
fp.truncate(0)
fp.write(text)
def _wrap_ifcond(ifcond: List[str], before: str, after: str) -> str:
if before == after:
return after # suppress empty #if ... #endif
assert after.startswith(before)
out = before
added = after[len(before):]
if added[0] == '\n':
out += '\n'
added = added[1:]
out += gen_if(ifcond)
out += added
out += gen_endif(ifcond)
return out
def build_params(arg_type: Optional[QAPISchemaObjectType],
boxed: bool,
extra: Optional[str] = None) -> str:
ret = ''
sep = ''
if boxed:
assert arg_type
ret += '%s arg' % arg_type.c_param_type()
sep = ', '
elif arg_type:
assert not arg_type.variants
for memb in arg_type.members:
ret += sep
sep = ', '
if memb.optional:
ret += 'bool has_%s, ' % c_name(memb.name)
ret += '%s %s' % (memb.type.c_param_type(),
c_name(memb.name))
if extra:
ret += sep + extra
return ret if ret else 'void'
class QAPIGenCCode(QAPIGen):
def __init__(self, fname: Optional[str]):
super().__init__(fname)
self._start_if: Optional[Tuple[List[str], str, str]] = None
def start_if(self, ifcond: List[str]) -> None:
assert self._start_if is None
self._start_if = (ifcond, self._body, self._preamble)
def end_if(self) -> None:
assert self._start_if
self._wrap_ifcond()
self._start_if = None
def _wrap_ifcond(self) -> None:
self._body = _wrap_ifcond(self._start_if[0],
self._start_if[1], self._body)
self._preamble = _wrap_ifcond(self._start_if[0],
self._start_if[2], self._preamble)
def get_content(self) -> str:
assert self._start_if is None
return super().get_content()
class QAPIGenC(QAPIGenCCode):
def __init__(self, fname: str, blurb: str, pydoc: str):
super().__init__(fname)
self._blurb = blurb
self._copyright = '\n * '.join(re.findall(r'^Copyright .*', pydoc,
re.MULTILINE))
def _top(self) -> str:
return mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
%(blurb)s
*
* %(copyright)s
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*/
''',
blurb=self._blurb, copyright=self._copyright)
def _bottom(self) -> str:
return mcgen('''
/* Dummy declaration to prevent empty .o file */
char qapi_dummy_%(name)s;
''',
name=c_fname(self.fname))
class QAPIGenH(QAPIGenC):
def _top(self) -> str:
return super()._top() + guardstart(self.fname)
def _bottom(self) -> str:
return guardend(self.fname)
@contextmanager
def ifcontext(ifcond: List[str], *args: QAPIGenCCode) -> Iterator[None]:
"""
A with-statement context manager that wraps with `start_if()` / `end_if()`.
:param ifcond: A list of conditionals, passed to `start_if()`.
:param args: any number of `QAPIGenCCode`.
Example::
with ifcontext(ifcond, self._genh, self._genc):
modify self._genh and self._genc ...
Is equivalent to calling::
self._genh.start_if(ifcond)
self._genc.start_if(ifcond)
modify self._genh and self._genc ...
self._genh.end_if()
self._genc.end_if()
"""
for arg in args:
arg.start_if(ifcond)
yield
for arg in args:
arg.end_if()
class QAPISchemaMonolithicCVisitor(QAPISchemaVisitor):
def __init__(self,
prefix: str,
what: str,
blurb: str,
pydoc: str):
self._prefix = prefix
self._what = what
self._genc = QAPIGenC(self._prefix + self._what + '.c',
blurb, pydoc)
self._genh = QAPIGenH(self._prefix + self._what + '.h',
blurb, pydoc)
def write(self, output_dir: str) -> None:
self._genc.write(output_dir)
self._genh.write(output_dir)
class QAPISchemaModularCVisitor(QAPISchemaVisitor):
def __init__(self,
prefix: str,
what: str,
user_blurb: str,
builtin_blurb: Optional[str],
pydoc: str):
self._prefix = prefix
self._what = what
self._user_blurb = user_blurb
self._builtin_blurb = builtin_blurb
self._pydoc = pydoc
self._genc: Optional[QAPIGenC] = None
self._genh: Optional[QAPIGenH] = None
self._module: Dict[Optional[str], Tuple[QAPIGenC, QAPIGenH]] = {}
self._main_module: Optional[str] = None
@staticmethod
def _is_user_module(name: Optional[str]) -> bool:
return bool(name and not name.startswith('./'))
@staticmethod
def _is_builtin_module(name: Optional[str]) -> bool:
return not name
def _module_dirname(self, name: Optional[str]) -> str:
if self._is_user_module(name):
return os.path.dirname(name)
return ''
def _module_basename(self, what: str, name: Optional[str]) -> str:
ret = '' if self._is_builtin_module(name) else self._prefix
if self._is_user_module(name):
basename = os.path.basename(name)
ret += what
if name != self._main_module:
ret += '-' + os.path.splitext(basename)[0]
else:
name = name[2:] if name else 'builtin'
ret += re.sub(r'-', '-' + name + '-', what)
return ret
def _module_filename(self, what: str, name: Optional[str]) -> str:
return os.path.join(self._module_dirname(name),
self._module_basename(what, name))
def _add_module(self, name: Optional[str], blurb: str) -> None:
basename = self._module_filename(self._what, name)
genc = QAPIGenC(basename + '.c', blurb, self._pydoc)
genh = QAPIGenH(basename + '.h', blurb, self._pydoc)
self._module[name] = (genc, genh)
self._genc, self._genh = self._module[name]
def _add_user_module(self, name: str, blurb: str) -> None:
assert self._is_user_module(name)
if self._main_module is None:
self._main_module = name
self._add_module(name, blurb)
def _add_system_module(self, name: Optional[str], blurb: str) -> None:
self._add_module(name and './' + name, blurb)
def write(self, output_dir: str, opt_builtins: bool = False) -> None:
for name in self._module:
if self._is_builtin_module(name) and not opt_builtins:
continue
(genc, genh) = self._module[name]
genc.write(output_dir)
genh.write(output_dir)
def _begin_system_module(self, name: None) -> None:
pass
def _begin_user_module(self, name: str) -> None:
pass
def visit_module(self, name: Optional[str]) -> None:
if name is None:
if self._builtin_blurb:
self._add_system_module(None, self._builtin_blurb)
self._begin_system_module(name)
else:
# The built-in module has not been created. No code may
# be generated.
self._genc = None
self._genh = None
else:
self._add_user_module(name, self._user_blurb)
self._begin_user_module(name)
def visit_include(self, name: str, info: QAPISourceInfo) -> None:
relname = os.path.relpath(self._module_filename(self._what, name),
os.path.dirname(self._genh.fname))
self._genh.preamble_add(mcgen('''
#include "%(relname)s.h"
''',
relname=relname))
|
py | b40f191791c70eb2d226026e994962a71c6e7c45 | from pagetools.src.utils.page_processing import string_to_coords
from typing import Dict, List, Set, Union
from pathlib import Path
from lxml import etree
# TODO: Replace by using PAGEPy library
class Page:
def __init__(self, xml: Path):
self.filename = xml
self.tree = self.get_tree()
self.ns = self.autoextract_namespace(self.tree)
def get_filename(self) -> Path:
return self.filename
def get_ns(self) -> Dict[str, str]:
return self.ns
@staticmethod
def autoextract_namespace(tree: etree.Element) -> Union[None, Dict[str, str]]:
"""
:param tree:
:return:
"""
if tree is None:
return
extracted_ns = tree.xpath('namespace-uri(.)')
if extracted_ns.startswith("http://schema.primaresearch.org/PAGE/gts/pagecontent/"):
return {"page": extracted_ns}
return {}
def get_tree(self, root: bool = False) -> etree.Element:
try:
if self.tree:
return self.tree.getroot() if root else self.tree
except AttributeError:
pass
try:
tree = etree.parse(str(self.filename))
except (etree.XMLSyntaxError, etree.ParseError) as e:
return None
return tree
def get_element_data(self, element_types: Set[str]) -> List[Dict]:
element_data = []
for element_type in element_types:
element_regions = self.tree.getroot().findall(f".//page:{element_type}", namespaces=self.ns)
for region in element_regions:
if element_type == "TextLine":
orientation = float(region.getparent().attrib.get("orientation", 0))
else:
orientation = float(region.attrib.get("orientation", 0))
coords = region.find("./page:Coords", namespaces=self.ns).attrib["points"]
text_line_data = {"id": region.attrib.get("id"),
"orientation": orientation,
"coords": string_to_coords(coords),
"text_equivs": []
}
text_equivs = region.findall("./page:TextEquiv", namespaces=self.ns)
if len(text_equivs) > 0:
for text_equiv in text_equivs:
idx = text_equiv.attrib.get("index")
content = "".join(text_equiv.find("./page:Unicode", namespaces=self.ns).itertext())
text_line_data["text_equivs"].append({"index": idx, "content": content})
element_data.append(text_line_data)
return element_data
def get_text_equivs(self) -> List[etree.Element]:
return self.tree.getroot().xpath(".//page:TextEquiv", namespaces=self.ns)
def get_texts(self) -> List[etree.Element]:
return [elem for elem in self.tree.xpath(".//page:Unicode", namespaces=self.ns)]
def get_text_regions(self) -> List[etree.Element]:
return [elem for elem in self.tree.findall(".//page:TextRegion", namespaces=self.ns)]
def export(self, out: Path, pretty=True, encoding="unicode"):
with out.open("w") as outfile:
outfile.write(etree.tostring(self.tree, pretty_print=pretty, encoding=encoding))
|
py | b40f1a363a0c1f7551a5d3e9b847a4b143ad8fcb | from setuptools import setup, find_packages
from pyreuters import __version__, __package__
def readme():
with open('README.rst') as f:
return f.read()
setup(
name=__package__,
version=__version__,
description='Python API To Read Reuters Market Data file',
author='Kapil Sharma',
author_email='[email protected]',
packages=find_packages(),
install_requires=[
'pandas>=0.18',
'numpy>=1.10',
'tables',
'pysftp',
'statsmodels'
],
entry_points={
'console_scripts':
["reuters_download=pyreuters.bin.download:main",
"reuters_convert=pyreuters.bin.convert:main",
"reuters_search=pyreuters.bin.search:main"]
},
package_data={
'': ['*.json']
},
zip_safe=False
) |
py | b40f1a3dead6eccbd8ef976c0458b3cde1273864 | """
* Assignment: CSV Writer Iris
* Complexity: easy
* Lines of code: 3 lines
* Time: 5 min
English:
1. Using `csv.writer()` save `DATA` to file
2. Use Unix `\n` line terminator
3. Run doctests - all must succeed
Polish:
1. Za pomocą `csv.writer()` zapisz `DATA` do pliku
2. Użyj zakończenia linii Unix `\n`
3. Uruchom doctesty - wszystkie muszą się powieść
Hint:
* For Python before 3.8: `dict(OrderedDict)`
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> from os import remove
>>> result = open(FILE).read()
>>> remove(FILE)
>>> assert result is not Ellipsis, \
'Assign result to variable: `result`'
>>> assert type(result) is str, \
'Variable `result` has invalid type, should be str'
>>> print(result)
Sepal length,Sepal width,Petal length,Petal width,Species
5.8,2.7,5.1,1.9,virginica
5.1,3.5,1.4,0.2,setosa
5.7,2.8,4.1,1.3,versicolor
6.3,2.9,5.6,1.8,virginica
6.4,3.2,4.5,1.5,versicolor
4.7,3.2,1.3,0.2,setosa
7.0,3.2,4.7,1.4,versicolor
7.6,3.0,6.6,2.1,virginica
4.9,3.0,1.4,0.2,setosa
<BLANKLINE>
"""
import csv
DATA = [
('Sepal length', 'Sepal width', 'Petal length', 'Petal width', 'Species'),
(5.8, 2.7, 5.1, 1.9, 'virginica'),
(5.1, 3.5, 1.4, 0.2, 'setosa'),
(5.7, 2.8, 4.1, 1.3, 'versicolor'),
(6.3, 2.9, 5.6, 1.8, 'virginica'),
(6.4, 3.2, 4.5, 1.5, 'versicolor'),
(4.7, 3.2, 1.3, 0.2, 'setosa'),
(7.0, 3.2, 4.7, 1.4, 'versicolor'),
(7.6, 3.0, 6.6, 2.1, 'virginica'),
(4.9, 3.0, 1.4, 0.2, 'setosa')]
FILE = r'_temporary.csv'
# ContextManager: Write DATA to FILE, generate header from DATA
with open(FILE, mode='w') as file:
...
|
py | b40f1b2b4aae03803064ecba3686d7c536e55cd1 | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import public_get_user_order as public_get_user_order_internal
from accelbyte_py_sdk.api.platform.models import ErrorEntity
from accelbyte_py_sdk.api.platform.models import OrderInfo
@click.command()
@click.argument("order_no", type=str)
@click.argument("user_id", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def public_get_user_order(
order_no: str,
user_id: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(public_get_user_order_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = public_get_user_order_internal(
order_no=order_no,
user_id=user_id,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"publicGetUserOrder failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
public_get_user_order.operation_id = "publicGetUserOrder"
public_get_user_order.is_deprecated = False
|
py | b40f1beef892703d4b1a5923c1c7cbdf987483db | import sys
import os
import Fs.Nsp as Nsp
import Fs.Xci as Xci
import sq_tools
import Hex
from binascii import hexlify as hx, unhexlify as uhx
import listmanager
def get_key_fromdict(fp):
if fp.endswith('.nsp') or fp.endswith('.nsx'):
files_list=sq_tools.ret_nsp_offsets(fp)
files=list();filesizes=list()
fplist=list()
for k in range(len(files_list)):
entry=files_list[k]
fplist.append(entry[0])
for i in range(len(files_list)):
entry=files_list[i]
filepath=entry[0]
if filepath.endswith('.cnmt.nca'):
f=Nsp(fp,'rb')
titleid,titleversion,base_ID,keygeneration,rightsId,RSV,RGV,ctype,metasdkversion,exesdkversion,hasHtmlManual,Installedsize,DeltaSize,ncadata=f.get_data_from_cnmt(filepath)
titlekey,dectkey=f.db_get_titlekey(rightsId,keygeneration)
f.flush()
f.close()
# print(titlekey);print(rightsId)
return titlekey
def rename_nsx(fp):
if fp.endswith('.txt'):
filelist=listmanager.read_lines_to_list(fp,all=True)
for file in filelist:
if file[0]=='"':
file=file[1:]
if file[-1]=='"':
file=file[:-1]
file=os.path.abspath(file)
test_ifnsx(file)
listmanager.striplines(fp,number=1,counter=True)
else:
test_ifnsx(fp)
try:
os.remove(fp)
except:
pass
def test_ifnsx(fp):
if fp.endswith('.nsp') or fp.endswith('.nsx'):
print('Checking file {}'.format(fp))
titlekey=get_key_fromdict(fp)
if titlekey != False:
check=bytes.fromhex(titlekey)
if sum(check)==0:
print(' - File is nsx')
newpath =fp[:-1]+'x'
if newpath==fp:
print(' > Current name is correct')
else:
os.rename(fp, newpath)
print(' > Renamed to {}'.format(os.path.basename(newpath)))
else:
print(' - File is nsp')
newpath =fp[:-1]+'p'
if newpath==fp:
print(' > Current name is correct')
else:
os.rename(fp, newpath)
print(' > Renamed to {}'.format(os.path.basename(newpath)))
else:
print(' - File is standard crypto. Skipping...')
else:
print(" - File isn't nsp or nsx. Skipping...")
def verify_ticket(fp):
if fp.endswith('.nsp') or fp.endswith('.nsz'):
files_list=sq_tools.ret_nsp_offsets(fp)
for i in range(len(files_list)):
entry=files_list[i]
filepath=entry[0]
if filepath.endswith('.tick'):
pass
# f=Nsp(fp,'rb')
# f.flush()
# f.close()
elif fp.endswith('.xci') or fp.endswith('.xcz'):
files_list=sq_tools.ret_xci_offsets(fp)
pass
# f=Xci(fp)
# f.flush()
# f.close() |
py | b40f1cf5ff4d6323a6df05896bd409265aaa1854 | """All constants related to the ZHA component."""
import enum
import logging
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
DOMAIN = 'zha'
BAUD_RATES = [
2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000
]
DATA_ZHA = 'zha'
DATA_ZHA_CONFIG = 'config'
DATA_ZHA_BRIDGE_ID = 'zha_bridge_id'
DATA_ZHA_DISPATCHERS = 'zha_dispatchers'
DATA_ZHA_CORE_EVENTS = 'zha_core_events'
DATA_ZHA_GATEWAY = 'zha_gateway'
ZHA_DISCOVERY_NEW = 'zha_discovery_new_{}'
COMPONENTS = (
BINARY_SENSOR,
FAN,
LIGHT,
LOCK,
SENSOR,
SWITCH,
)
CONF_BAUDRATE = 'baudrate'
CONF_DATABASE = 'database_path'
CONF_DEVICE_CONFIG = 'device_config'
CONF_RADIO_TYPE = 'radio_type'
CONF_USB_PATH = 'usb_path'
DATA_DEVICE_CONFIG = 'zha_device_config'
ENABLE_QUIRKS = 'enable_quirks'
RADIO = 'radio'
RADIO_DESCRIPTION = 'radio_description'
CONTROLLER = 'controller'
DEFAULT_RADIO_TYPE = 'ezsp'
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = 'zigbee.db'
ATTR_CLUSTER_ID = 'cluster_id'
ATTR_CLUSTER_TYPE = 'cluster_type'
ATTR_ATTRIBUTE = 'attribute'
ATTR_VALUE = 'value'
ATTR_MANUFACTURER = 'manufacturer'
ATTR_COMMAND = 'command'
ATTR_COMMAND_TYPE = 'command_type'
ATTR_ARGS = 'args'
ATTR_ENDPOINT_ID = 'endpoint_id'
IN = 'in'
OUT = 'out'
CLIENT_COMMANDS = 'client_commands'
SERVER_COMMANDS = 'server_commands'
SERVER = 'server'
IEEE = 'ieee'
MODEL = 'model'
NAME = 'name'
LQI = 'lqi'
RSSI = 'rssi'
LAST_SEEN = 'last_seen'
SENSOR_TYPE = 'sensor_type'
HUMIDITY = 'humidity'
TEMPERATURE = 'temperature'
ILLUMINANCE = 'illuminance'
PRESSURE = 'pressure'
METERING = 'metering'
ELECTRICAL_MEASUREMENT = 'electrical_measurement'
GENERIC = 'generic'
BATTERY = 'battery'
UNKNOWN = 'unknown'
UNKNOWN_MANUFACTURER = 'unk_manufacturer'
UNKNOWN_MODEL = 'unk_model'
OPENING = 'opening'
OCCUPANCY = 'occupancy'
ACCELERATION = 'acceleration'
ATTR_LEVEL = 'level'
ZDO_CHANNEL = 'zdo'
ON_OFF_CHANNEL = 'on_off'
ATTRIBUTE_CHANNEL = 'attribute'
BASIC_CHANNEL = 'basic'
COLOR_CHANNEL = 'light_color'
FAN_CHANNEL = 'fan'
LEVEL_CHANNEL = ATTR_LEVEL
ZONE_CHANNEL = ZONE = 'ias_zone'
ELECTRICAL_MEASUREMENT_CHANNEL = 'electrical_measurement'
POWER_CONFIGURATION_CHANNEL = 'power'
EVENT_RELAY_CHANNEL = 'event_relay'
DOORLOCK_CHANNEL = 'door_lock'
SIGNAL_ATTR_UPDATED = 'attribute_updated'
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_AVAILABLE = 'available'
SIGNAL_REMOVE = 'remove'
QUIRK_APPLIED = 'quirk_applied'
QUIRK_CLASS = 'quirk_class'
MANUFACTURER_CODE = 'manufacturer_code'
POWER_SOURCE = 'power_source'
MAINS_POWERED = 'Mains'
BATTERY_OR_UNKNOWN = 'Battery or Unknown'
BELLOWS = 'bellows'
ZHA = 'homeassistant.components.zha'
ZIGPY = 'zigpy'
ZIGPY_XBEE = 'zigpy_xbee'
ZIGPY_DECONZ = 'zigpy_deconz'
ORIGINAL = 'original'
CURRENT = 'current'
DEBUG_LEVELS = {
BELLOWS: logging.DEBUG,
ZHA: logging.DEBUG,
ZIGPY: logging.DEBUG,
ZIGPY_XBEE: logging.DEBUG,
ZIGPY_DECONZ: logging.DEBUG,
}
ADD_DEVICE_RELAY_LOGGERS = [ZHA, ZIGPY]
TYPE = 'type'
NWK = 'nwk'
SIGNATURE = 'signature'
RAW_INIT = 'raw_device_initialized'
ZHA_GW_MSG = 'zha_gateway_message'
DEVICE_REMOVED = 'device_removed'
DEVICE_INFO = 'device_info'
DEVICE_FULL_INIT = 'device_fully_initialized'
DEVICE_JOINED = 'device_joined'
LOG_OUTPUT = 'log_output'
LOG_ENTRY = 'log_entry'
MFG_CLUSTER_ID_START = 0xfc00
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = 'ezsp'
xbee = 'xbee'
deconz = 'deconz'
@classmethod
def list(cls):
"""Return list of enum's values."""
return [e.value for e in RadioType]
DISCOVERY_KEY = 'zha_discovery_info'
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_ASAP = (REPORT_CONFIG_MIN_INT_ASAP, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_BATTERY_SAVE = (REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_IMMEDIATE = (REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
REPORT_CONFIG_OP = (REPORT_CONFIG_MIN_INT_OP, REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE)
|
py | b40f1d5bcb2de9ff2a1819e28ab8a640b060d981 | from typing import overload
class Foo(object):
@overload
def fun(self, s:str) -> str: pass
@overload
def fun(self, i:int) -> int: pass
def fun(self, x):
pass |
py | b40f1e4bfdabf6ff4bd3b9c4a3e88a1a5a50057a | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='nwb')
crawl_bibleis(crawler, out, bible='NWBWBT')
|
py | b40f204aa28187b49de61bb429bb71bbabe08c0c | """
Django settings for serviceManProject project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v@+4r*qu_$_$9zixfyp0oi6go+hgs@qz_01p&y7vu+-4k*gmv4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'provider',
'client',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'serviceManProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'serviceManProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'MainDb',
# 'USER': 'root',
# 'PASSWORD': '',
# 'HOST': 'localhost', # Or an IP Address that your DB is hosted on
# 'PORT': '',
# }
# TODO test
# TODO this is todo test
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#TODO
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join('media/')
MEDIA_URL = '/media/'
|
py | b40f20c566f9ab9bbf733aba1f0e8e0ee4c94528 | import unittest
from sapai import *
from sapai.shop import *
class TestShop(unittest.TestCase):
def test_shop_slot_pet(self):
slot = ShopSlot("pet")
slot.item = Pet("ant")
slot.roll()
self.assertIsInstance(slot.item, Pet)
def test_shop_slot_food(self):
slot = ShopSlot("food")
slot.item = Food("apple")
slot.roll()
self.assertIsInstance(slot.item, Food)
def test_shop_level_up(self):
slot = ShopSlot("levelup")
tier = slot.item.tier
self.assertEqual(tier, 2)
def test_max_shop(self):
s = Shop(turn=11)
s.freeze(0)
for index in range(10):
s.roll()
def test_rabbit_buy_food(self):
test_player = Player(shop=["honey"], team=["rabbit"])
start_health = 2
self.assertEqual(test_player.team[0].pet.health, start_health)
test_player.buy_food(0, 0)
expected_end_health = start_health + 1
self.assertEqual(test_player.team[0].pet.health, expected_end_health)
def test_empty_shop_from_state(self):
pet = Pet("fish")
orig_shop = Shop(shop_slots=[pet])
orig_shop.buy(pet)
self.assertEqual(len(orig_shop.shop_slots), 0)
copy_shop = Shop.from_state(orig_shop.state)
self.assertEqual(len(copy_shop.shop_slots), 0)
|
py | b40f214b03ad933d9355060068f618b3307601ab | #Mihail Mihaylov 001062346
#Aisha Scacchi 001067045
#LOGIN DETAILS FOR DEMO ARE
#ID - 1
#PASSWORD - 123
import datetime
from tkinter import *
import tkinter
import sys
#Set dimensions for window
window = tkinter.Tk()
window.title("GSU Elections")
window.geometry("+800+400")
#Create empty list which we are going to use later
#To store necessery date
listOfVoters = []
listOfCandidates = []
listOfAvailablePositions = []
#Set the election period
#Currently from 20th January 2020 to 31st January 2020
electionStartDate = datetime.date(2020, 1, 20)
electionEndDate = datetime.date(2020, 1, 31)
#Creating empty variables where we are going to store the winners
#and the current logged in voter
president_winner = ""
officer_winner = ""
faculty_officer_winner = ""
currentVoter = ''
class Candidate:
def __init__(self, position, firstName, lastName):
self.position = position
self.firstName = firstName
self.lastName = lastName
self.firstPreference = 0
self.secondPreference = 0
self.thirdPreference = 0
self.fourthPreference = 0
class Voter:
def __init__(self, studentId, password, firstName, lastName):
self.studentId = studentId
self.password = password
self.firstName = firstName
self.lastName = lastName
self.votedForPresident = 0
self.votedForOfficer = 0
self.votedForFacultyOfficer = 0
#Login Page
voterId = StringVar()
tkinter.Label(window, text="ID (enter '1')").grid(row=0, column = 0)
tkinter.Entry(window, textvariable = voterId, ).grid(row=0, column=1)
password = StringVar()
tkinter.Label(window, text="Password (enter '123')").grid(row=1, column = 0)
tkinter.Entry(window, show="*", textvariable = password).grid(row=1, column=1)
def login():
entered_id = voterId.get()
entered_password = password.get()
#check login details
if any(x.studentId == entered_id and x.password == entered_password for x in listOfVoters):
#check election date
if electionStartDate <= currentDate <= electionEndDate:
#get current logged in voter
for voter in listOfVoters:
if voter.studentId == entered_id and voter.password == entered_password:
currentVoter = voter
def vote(*args):
chosen_position = drop_down_menu.get()
if chosen_position == 'President':
#Get all the candidates appling for President
listOfCandPresidents = []
for cand in listOfCandidates:
if cand.position == 'President':
listOfCandPresidents.append(cand.firstName + " " + cand.lastName)
def submit():
#Check if the current logged in user have already voted for the given positon
readVotedPeopleFile = open("PeopleWhoHaveVoted.txt", "r").readlines()
for person in readVotedPeopleFile:
if currentVoter.firstName + " " + currentVoter.lastName + " " + "President\n" == person:
currentVoter.votedForPresident = 1
#Save 1st, 2nd, 3rd, 4th preference into Votes.txt
if currentVoter.votedForPresident == 0:
writeVotedPeopleFile = open("PeopleWhoHaveVoted.txt", "a")
writeVotedPeopleFile.writelines(currentVoter.firstName + " " + currentVoter.lastName + " President\n")
first_preference_person = first_preference.get()
second_preference_person = second_preference.get()
third_preference_person = third_preference.get()
fourth_preference_person = fourth_preference.get()
votes_file = open("Votes.txt", "a")
votes_file.writelines(first_preference_person + " " + "1\n")
votes_file.writelines(second_preference_person + " " + "2\n")
votes_file.writelines(third_preference_person + " " + "3\n")
votes_file.writelines(fourth_preference_person + " " + "4\n")
votes_file.close()
def results():
#Read the votes and pick a winner
result_file = open("Votes.txt", "r")
file_lines = result_file.readlines()
votesCounter = 0
for line in file_lines:
votesCounter += 1
first_name = line.split()[0]
last_name = line.split()[1]
preference = line.split()[2]
for candToCheck in listOfCandidates:
if candToCheck.firstName + ' ' + candToCheck.lastName == first_name + ' ' + last_name:
if preference == "1":
candToCheck.firstPreference += 1
if preference == "2":
candToCheck.secondPreference += 1
if preference == "3":
candToCheck.thirdPreference += 1
if preference == "4":
candToCheck.fourthPreference += 1
break
listWinners = []
for winner in listOfCandidates:
if winner.position == "President":
listWinners.append(winner)
#Sort the winners by 1st preference, if there is a tie
#We look at the 2nd preference and so on
sorted_winners = sorted(listWinners, key=lambda x: x.firstPreference, reverse=True)
if sorted_winners[0].firstPreference == sorted_winners[1].firstPreference:
if sorted_winners[0].secondPreference == sorted_winners[1].secondPreference:
if sorted_winners[0].thirdPreference == sorted_winners[1].thirdPreference:
if sorted_winners[0].fourthPreference == sorted_winners[1].fourthPreference:
sorted_winners = sorted(listWinners, key=lambda x: (
x.firstPreference, x.fourthPreference), reverse=True)
else:
sorted_winners = sorted(listWinners, key=lambda x: (
x.firstPreference, x.thirdPreference), reverse=True)
else:
sorted_winners = sorted(listWinners,
key=lambda x: (x.firstPreference, x.secondPreference),
reverse=True)
#Print out the results
tkinter.Label(window, text="Position: GSU " + cand.position).grid(row=15, column=0)
tkinter.Label(window,
text="Candidate 1stPreference 2ndPreference 3rdPreference 4thPreference").grid(
row=16, column=0)
rowCounter = 16
for winner in sorted_winners:
rowCounter += 1
tkinter.Label(window,
text=winner.firstName + " " + winner.lastName + " " + str(
winner.firstPreference) + " " + str(
winner.secondPreference) + " " + str(
winner.thirdPreference) + " " + str(
winner.fourthPreference)).grid(row=rowCounter, column=0)
president_winner = sorted_winners[0].firstName + " " + sorted_winners[0].lastName
tkinter.Label(window,
text="Winner: " + sorted_winners[0].firstName + " " + sorted_winners[
0].lastName).grid(row=rowCounter + 1, column=0)
tkinter.Label(window,
text="Votes Received: " + str(sorted_winners[0].firstPreference)).grid(
row=rowCounter + 2, column=0)
tkinter.Label(window, text="Total votes cast overall: " + str(votesCounter)).grid(
row=rowCounter + 3, column=0)
percentsOfVoters = (sorted_winners[0].firstPreference * 100) / votesCounter
tkinter.Label(window,
text=str(round(percentsOfVoters, 1)) + " % of the voters voted for " +
sorted_winners[0].firstName + " " + sorted_winners[0].lastName).grid(
row=rowCounter + 4, column=0)
winners_file = open("winners.txt", "a")
winners_file.writelines(
"Winner for position " + sorted_winners[0].position + " " + "is " + sorted_winners[
0].firstName + " " + sorted_winners[0].lastName + "\n")
def summary():
#Get a summary of the winners for all the position
read_winners_file = open("winners.txt", "r")
lineCounter = rowCounter + 6
for line in read_winners_file:
tkinter.Label(window, text=line).grid(row=lineCounter, column=0)
lineCounter += 1
tkinter.Button(window, command=summary, text='Summary').grid(row=rowCounter + 5,
column=2)
tkinter.Button(window, command=results, text='Show election results').grid(row=13, column=2)
else:
tkinter.Label(window, text="You have already voted for President", fg="red").grid(row=11, column=0)
first_preference = tkinter.StringVar(window)
first_preference.set("---")
tkinter.Label(window, text="Select 1st Preference").grid(row=7, column=0)
w_president = tkinter.OptionMenu(window, first_preference, *listOfCandPresidents)
w_president.grid(row=7, column=1)
second_preference = tkinter.StringVar(window)
second_preference.set("---")
tkinter.Label(window, text="Select 2nd Preference").grid(row=8, column=0)
w_president = tkinter.OptionMenu(window, second_preference, *listOfCandPresidents)
w_president.grid(row=8, column=1)
third_preference = tkinter.StringVar(window)
third_preference.set("---")
tkinter.Label(window, text="Select 3rd Preference").grid(row=9, column=0)
w_president = tkinter.OptionMenu(window, third_preference, *listOfCandPresidents)
w_president.grid(row=9, column=1)
fourth_preference = tkinter.StringVar(window)
fourth_preference.set("---")
tkinter.Label(window, text="Select 4th Preference").grid(row=10, column=0)
w_president = tkinter.OptionMenu(window, fourth_preference, *listOfCandPresidents)
w_president.grid(row=10, column=1)
tkinter.Button(window, command=submit, text='Submit').grid(row=11, column=2)
if chosen_position == 'Officer':
# Get all the candidates appling for Officer
listOfCandOfficer = []
for cand in listOfCandidates:
if cand.position == 'Officer':
listOfCandOfficer.append(cand.firstName + " " + cand.lastName)
def submit():
# Check if the current logged in user have already voted for the given positon
readVotedPeopleFile = open("PeopleWhoHaveVoted.txt", "r").readlines()
for person in readVotedPeopleFile:
if currentVoter.firstName + " " + currentVoter.lastName + " Officer\n" == person:
currentVoter.votedForOfficer = 1
# Save 1st, 2nd, 3rd, 4th preference into Votes.txt
if currentVoter.votedForOfficer == 0:
writeVotedPeopleFile = open("PeopleWhoHaveVoted.txt", "a")
writeVotedPeopleFile.writelines(currentVoter.firstName + " " + currentVoter.lastName + " Officer\n")
first_preference_person = first_preference.get()
second_preference_person = second_preference.get()
third_preference_person = third_preference.get()
fourth_preference_person = fourth_preference.get()
votes_file = open("Votes.txt", "a")
votes_file.writelines(first_preference_person + " " + "1\n")
votes_file.writelines(second_preference_person + " " + "2\n")
votes_file.writelines(third_preference_person + " " + "3\n")
votes_file.writelines(fourth_preference_person + " " + "4\n")
votes_file.close()
def results():
# Read the votes and pick a winner
result_file = open("Votes.txt", "r")
file_lines = result_file.readlines()
votesCounter = 0
for line in file_lines:
votesCounter += 1
first_name = line.split()[0]
last_name = line.split()[1]
preference = line.split()[2]
for candToCheck in listOfCandidates:
if candToCheck.firstName + ' ' + candToCheck.lastName == first_name + ' ' + last_name:
if preference == "1":
candToCheck.firstPreference += 1
if preference == "2":
candToCheck.secondPreference += 1
if preference == "3":
candToCheck.thirdPreference += 1
if preference == "4":
candToCheck.fourthPreference += 1
break
listWinners = []
for winner in listOfCandidates:
if winner.position == "Officer":
listWinners.append(winner)
# Sort the winners by 1st preference, if there is a tie
# We look at the 2nd preference and so on
sorted_winners = sorted(listWinners, key=lambda x: x.firstPreference, reverse=True)
if sorted_winners[0].firstPreference == sorted_winners[1].firstPreference:
if sorted_winners[0].secondPreference == sorted_winners[1].secondPreference:
if sorted_winners[0].thirdPreference == sorted_winners[1].thirdPreference:
if sorted_winners[0].fourthPreference == sorted_winners[1].fourthPreference:
sorted_winners = sorted(listWinners, key=lambda x: (
x.firstPreference, x.fourthPreference), reverse=True)
else:
sorted_winners = sorted(listWinners, key=lambda x: (
x.firstPreference, x.thirdPreference), reverse=True)
else:
sorted_winners = sorted(listWinners,
key=lambda x: (x.firstPreference, x.secondPreference),
reverse=True)
# Print out the results
tkinter.Label(window, text="Position: GSU Officer").grid(row=15, column=0)
tkinter.Label(window,
text="Candidate 1stPreference 2ndPreference 3rdPreference 4thPreference").grid(
row=16, column=0)
rowCounter = 16
for winner in sorted_winners:
rowCounter += 1
tkinter.Label(window,text=winner.firstName + " " + winner.lastName + " " + str(winner.firstPreference) + " " + str(winner.secondPreference) + " " + str(winner.thirdPreference) + " " + str(winner.fourthPreference)).grid(row=rowCounter, column=0)
officer_winner = sorted_winners[0].firstName + " " + sorted_winners[1].lastName
tkinter.Label(window,text="Winner: " + sorted_winners[0].firstName + " " + sorted_winners[0].lastName).grid(row=rowCounter + 1, column=0)
tkinter.Label(window,text="Votes Received: " + str(sorted_winners[0].firstPreference)).grid(row=rowCounter + 2, column=0)
tkinter.Label(window, text="Total votes cast overall: " + str(votesCounter)).grid(row=rowCounter + 3, column=0)
percentsOfVoters = (sorted_winners[0].firstPreference * 100) / votesCounter
tkinter.Label(window,
text=str(round(percentsOfVoters, 1)) + " % of the voters voted for " +
sorted_winners[0].firstName + " " + sorted_winners[0].lastName).grid(
row=rowCounter + 4, column=0)
winners_file = open("winners.txt", "a")
winners_file.writelines(
"Winner for position " + sorted_winners[0].position + " " + "is " + sorted_winners[
0].firstName + " " + sorted_winners[0].lastName + "\n")
def summary():
# Get a summary of the winners for all the position
read_winners_file = open("winners.txt", "r")
lineCounter = rowCounter + 6
for line in read_winners_file:
tkinter.Label(window, text=line).grid(row=lineCounter, column=0)
lineCounter += 1
tkinter.Button(window, command=summary, text='Summary').grid(row=rowCounter + 5,
column=2)
rowCounter = 16
for winner in sorted_winners:
rowCounter += 1
tkinter.Label(window, text=winner.firstName + " " + winner.lastName + " " + str(
winner.firstPreference) + " " + str(winner.secondPreference) + " " + str(
winner.thirdPreference) + " " + str(winner.fourthPreference)).grid(
row=rowCounter, column=0)
tkinter.Button(window, command=results, text='Show election results').grid(row=13, column=2)
else:
tkinter.Label(window, text="You have already voted for Officer", fg="red").grid(row=11, column=0)
first_preference = tkinter.StringVar(window)
first_preference.set("---")
tkinter.Label(window, text="Select 1st Preference").grid(row=7, column=0)
w_president = tkinter.OptionMenu(window, first_preference, *listOfCandOfficer)
w_president.grid(row=7, column=1)
second_preference = tkinter.StringVar(window)
second_preference.set("---")
tkinter.Label(window, text="Select 2nd Preference").grid(row=8, column=0)
w_president = tkinter.OptionMenu(window, second_preference, *listOfCandOfficer)
w_president.grid(row=8, column=1)
third_preference = tkinter.StringVar(window)
third_preference.set("---")
tkinter.Label(window, text="Select 3rd Preference").grid(row=9, column=0)
w_president = tkinter.OptionMenu(window, third_preference, *listOfCandOfficer)
w_president.grid(row=9, column=1)
fourth_preference = tkinter.StringVar(window)
fourth_preference.set("---")
tkinter.Label(window, text="Select 4th Preference").grid(row=10, column=0)
w_president = tkinter.OptionMenu(window, fourth_preference, *listOfCandOfficer)
w_president.grid(row=10, column=1)
tkinter.Button(window, command=submit, text='Submit').grid(row=11, column=2)
if chosen_position == 'Faculty Officer':
# Get all the candidates appling for Faculty Officer
listOfCandFacultyOfficer = []
for cand in listOfCandidates:
if cand.position == 'Faculty Officer':
listOfCandFacultyOfficer.append(cand.firstName + " " + cand.lastName)
def submit():
# Check if the current logged in user have already voted for the given positon
readVotedPeopleFile = open("PeopleWhoHaveVoted.txt", "r").readlines()
for person in readVotedPeopleFile:
if currentVoter.firstName + " " + currentVoter.lastName + " " + "Faculty Officer\n" == person:
currentVoter.votedForFacultyOfficer = 1
# Save 1st, 2nd, 3rd, 4th preference into Votes.txt
if currentVoter.votedForFacultyOfficer == 0:
writeVotedPeopleFile = open("PeopleWhoHaveVoted.txt", "a")
writeVotedPeopleFile.writelines(currentVoter.firstName + " " + currentVoter.lastName + " Faculty Officer\n")
first_preference_person = first_preference.get()
second_preference_person = second_preference.get()
third_preference_person = third_preference.get()
fourth_preference_person = fourth_preference.get()
votes_file = open("Votes.txt", "a")
votes_file.writelines(first_preference_person + " " + "1\n")
votes_file.writelines(second_preference_person + " " + "2\n")
votes_file.writelines(third_preference_person + " " + "3\n")
votes_file.writelines(fourth_preference_person + " " + "4\n")
votes_file.close()
def results():
# Read the votes and pick a winner
result_file = open("Votes.txt", "r")
file_lines = result_file.readlines()
votesCounter = 0
for line in file_lines:
votesCounter += 1
first_name = line.split()[0]
last_name = line.split()[1]
preference = line.split()[2]
for candToCheck in listOfCandidates:
if candToCheck.firstName + ' ' + candToCheck.lastName == first_name + ' ' + last_name:
if preference == "1":
candToCheck.firstPreference += 1
if preference == "2":
candToCheck.secondPreference += 1
if preference == "3":
candToCheck.thirdPreference += 1
if preference == "4":
candToCheck.fourthPreference += 1
break
listWinners = []
for winner in listOfCandidates:
if winner.position == "Faculty Officer":
listWinners.append(winner)
# Sort the winners by 1st preference, if there is a tie
# We look at the 2nd preference and so on
sorted_winners = sorted(listWinners, key=lambda x: x.firstPreference, reverse=True)
if sorted_winners[0].firstPreference == sorted_winners[1].firstPreference:
if sorted_winners[0].secondPreference == sorted_winners[1].secondPreference:
if sorted_winners[0].thirdPreference == sorted_winners[1].thirdPreference:
if sorted_winners[0].fourthPreference == sorted_winners[1].fourthPreference:
sorted_winners = sorted(listWinners, key=lambda x: (
x.firstPreference, x.fourthPreference), reverse=True)
else:
sorted_winners = sorted(listWinners, key=lambda x: (
x.firstPreference, x.thirdPreference), reverse=True)
else:
sorted_winners = sorted(listWinners,
key=lambda x: (x.firstPreference, x.secondPreference),
reverse=True)
# Print out the results
tkinter.Label(window, text="Position: GSU Faculty Officer").grid(row=15, column=0)
tkinter.Label(window,
text="Candidate 1stPreference 2ndPreference 3rdPreference 4thPreference").grid(
row=16, column=0)
rowCounter = 16
for winner in sorted_winners:
rowCounter += 1
tkinter.Label(window,
text=winner.firstName + " " + winner.lastName + " " + str(
winner.firstPreference) + " " + str(
winner.secondPreference) + " " + str(
winner.thirdPreference) + " " + str(
winner.fourthPreference)).grid(
row=rowCounter, column=0)
faculty_officer_winner = sorted_winners[0].firstName + " " + sorted_winners[1].lastName
tkinter.Label(window,
text="Winner: " + sorted_winners[0].firstName + " " + sorted_winners[
0].lastName).grid(row=rowCounter + 1, column=0)
tkinter.Label(window,
text="Votes Received: " + str(sorted_winners[0].firstPreference)).grid(
row=rowCounter + 2, column=0)
tkinter.Label(window, text="Total votes cast overall: " + str(votesCounter)).grid(
row=rowCounter + 3, column=0)
percentsOfVoters = (sorted_winners[0].firstPreference * 100) / votesCounter
tkinter.Label(window,
text=str(round(percentsOfVoters, 1)) + " % of the voters voted for " +
sorted_winners[0].firstName + " " + sorted_winners[0].lastName).grid(
row=rowCounter + 4, column=0)
winners_file = open("winners.txt", "a")
winners_file.writelines(
"Winner for position " + sorted_winners[0].position + " " + "is " + sorted_winners[
0].firstName + " " + sorted_winners[0].lastName + "\n")
def summary():
# Get a summary of the winners for all the position
read_winners_file = open("winners.txt", "r")
lineCounter = rowCounter + 6
for line in read_winners_file:
tkinter.Label(window, text=line).grid(row=lineCounter, column=0)
lineCounter += 1
tkinter.Button(window, command=summary, text='Summary').grid(row=rowCounter + 5,
column=2)
rowCounter = 16
for winner in sorted_winners:
rowCounter += 1
tkinter.Label(window, text=winner.firstName + " " + winner.lastName + " " + str(
winner.firstPreference) + " " + str(winner.secondPreference) + " " + str(
winner.thirdPreference) + " " + str(winner.fourthPreference)).grid(
row=rowCounter, column=0)
tkinter.Button(window, command=results, text='Show election results').grid(row=13, column=2)
else:
tkinter.Label(window, text="You have already voted for Faculty Officer", fg="red").grid(row=11, column=0)
first_preference = tkinter.StringVar(window)
first_preference.set("---")
tkinter.Label(window, text="Select 1st Preference").grid(row=7, column=0)
w_president = tkinter.OptionMenu(window, first_preference, *listOfCandFacultyOfficer)
w_president.grid(row=7, column=1)
second_preference = tkinter.StringVar(window)
second_preference.set("---")
tkinter.Label(window, text="Select 2nd Preference").grid(row=8, column=0)
w_president = tkinter.OptionMenu(window, second_preference, *listOfCandFacultyOfficer)
w_president.grid(row=8, column=1)
third_preference = tkinter.StringVar(window)
third_preference.set("---")
tkinter.Label(window, text="Select 3rd Preference").grid(row=9, column=0)
w_president = tkinter.OptionMenu(window, third_preference, *listOfCandFacultyOfficer)
w_president.grid(row=9, column=1)
fourth_preference = tkinter.StringVar(window)
fourth_preference.set("---")
tkinter.Label(window, text="Select 4th Preference").grid(row=10, column=0)
w_president = tkinter.OptionMenu(window, fourth_preference, *listOfCandFacultyOfficer)
w_president.grid(row=10, column=1)
tkinter.Button(window, command=submit, text='Submit').grid(row=11, column=2)
drop_down_menu = tkinter.StringVar(window)
drop_down_menu.set(listOfAvailablePositions[0])
drop_down_menu.trace("w", vote)
#Dropdown menu for the logged in voter to choose for which position is voting
tkinter.Label(window, text="Choose position").grid(row=6, column=0)
w = tkinter.OptionMenu(window, drop_down_menu, *listOfAvailablePositions)
w.grid(row=6, column=1)
#In case that we are out of the voting period
#Calculate how much time (in days) is left until we are in the period
else:
if electionStartDate > currentDate:
gap=(electionStartDate - currentDate).days
else:
gap = (currentDate - electionStartDate).days
gap = 365 - gap
tkinter.Label(window, text="You are out of voting period. Try again in " + str(gap) + ' days', fg='red').grid(row=3, column = 1)
#Messega appears if login details are wrong
else:
tkinter.Label(window, text="Wrong ID and/or password", fg="red").grid(row=3, column = 1)
tkinter.Button(window, command=login, text='Login').grid(columnspan = 2)
#Reading the candidates and the voters from the .txt files
candidates = open("GSUCandidates.txt","r")
voters = open("StudentVoters.txt","r")
#Saving the candidates and the voters into the empty lists
#that we created earlier using class for both Candidates and Voters
for candidate in candidates.readlines():
if len(candidate.split()) == 3:
candidatePosition = candidate.split()[0]
candidateFirstName = candidate.split()[1]
candidateLastName = candidate.split()[2]
if not any(candidate.firstName == candidateFirstName and candidate.lastName == candidateLastName for candidate in listOfCandidates):
listOfCandidates.append(Candidate(candidatePosition, candidateFirstName, candidateLastName))
if candidatePosition not in listOfAvailablePositions:
listOfAvailablePositions.append(candidatePosition)
if len(candidate.split()) == 4:
candidatePosition = candidate.split()[0] + ' ' + candidate.split()[1]
candidateFirstName = candidate.split()[2]
candidateLastName = candidate.split()[3]
if not any(candidate.firstName == candidateFirstName and candidate.lastName == candidateLastName for candidate in listOfCandidates):
listOfCandidates.append(Candidate(candidatePosition, candidateFirstName, candidateLastName))
if candidatePosition not in listOfAvailablePositions:
listOfAvailablePositions.append(candidatePosition)
for voter in voters.readlines():
voterStudentId = voter.split()[0]
voterPassword = voter.split()[1]
voterFirstName = voter.split()[2]
voterLastName = voter.split()[3]
listOfVoters.append(Voter(voterStudentId, voterPassword, voterFirstName, voterLastName))
currentDate = datetime.datetime.now().date()
#Terminate the program when exit button is clicked
def close():
exit()
tkinter.Button(window, command=close, text='Exit').grid(row = 5, column = 5)
window.mainloop() |
py | b40f2154a7b36b6fd18b635d856725aad91aa7db | import asyncio
import ssl
try:
import aiohttp
except ImportError: # pragma: no cover
aiohttp = None
import six
from . import client
from . import exceptions
from . import packet
from . import payload
class AsyncClient(client.Client):
"""An Engine.IO client for asyncio.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports, compatible with the asyncio
framework on Python 3.5 or newer.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
def is_asyncio_based(self):
return True
async def connect(self, url, headers={}, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Note: this method is a coroutine.
Example usage::
eio = engineio.Client()
await eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.text_type):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return await getattr(self, '_connect_' + self.transports[0])(
url, headers, engineio_path)
async def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
Note: this method is a coroutine.
"""
if self.read_loop_task:
await self.read_loop_task
async def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
Note: this method is a coroutine.
"""
await self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
async def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
Note: this method is a coroutine.
"""
if self.state == 'connected':
await self._send_packet(packet.Packet(packet.CLOSE))
await self.queue.put(None)
self.state = 'disconnecting'
await self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
await self.ws.close()
if not abort:
await self.read_loop_task
self.state = 'disconnected'
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
Note: this method is a coroutine.
"""
return asyncio.ensure_future(target(*args, **kwargs))
async def sleep(self, seconds=0):
"""Sleep for the requested amount of time.
Note: this method is a coroutine.
"""
return await asyncio.sleep(seconds)
def create_queue(self):
"""Create a queue object."""
q = asyncio.Queue()
q.Empty = asyncio.QueueEmpty
return q
def create_event(self):
"""Create an event object."""
return asyncio.Event()
def _reset(self):
if self.http: # pragma: no cover
asyncio.ensure_future(self.http.close())
super()._reset()
async def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if aiohttp is None: # pragma: no cover
self.logger.error('aiohttp not installed -- cannot make HTTP '
'requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status < 200 or r.status >= 300:
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status))
try:
p = payload.Payload(encoded_payload=await r.read())
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
await self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if await self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
async def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if aiohttp is None: # pragma: no cover
self.logger.error('aiohttp package not installed')
return False
websocket_url = self._get_engineio_url(url, engineio_path,
'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
if self.http is None or self.http.closed: # pragma: no cover
self.http = aiohttp.ClientSession()
try:
if not self.ssl_verify:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
ws = await self.http.ws_connect(
websocket_url + self._get_url_timestamp(),
headers=headers, ssl=ssl_context)
else:
ws = await self.http.ws_connect(
websocket_url + self._get_url_timestamp(),
headers=headers)
except (aiohttp.client_exceptions.WSServerHandshakeError,
aiohttp.client_exceptions.ServerConnectionError):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode(
always_bytes=False)
try:
await ws.send_str(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = (await ws.receive()).data
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode(always_bytes=False)
try:
await ws.send_str(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = (await ws.receive()).data
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
self.ws = ws
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
async def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
await self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.CLOSE:
await self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
async def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
await self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
async def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None or self.http.closed:
self.http = aiohttp.ClientSession()
http_method = getattr(self.http, method.lower())
try:
if not self.ssl_verify:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout), ssl=False)
else:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout))
except (aiohttp.ClientError, asyncio.TimeoutError) as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
async def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
ret = None
if event in self.handlers:
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
if run_async:
return self.start_background_task(self.handlers[event],
*args)
else:
try:
ret = await self.handlers[event](*args)
except asyncio.CancelledError: # pragma: no cover
pass
except:
self.logger.exception(event + ' async handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
else:
if run_async:
async def async_handler():
return self.handlers[event](*args)
return self.start_background_task(async_handler)
else:
try:
ret = self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
return ret
async def _ping_loop(self):
"""This background task sends a PING to the server at the requested
interval.
"""
self.pong_received = True
if self.ping_loop_event is None:
self.ping_loop_event = self.create_event()
else:
self.ping_loop_event.clear()
while self.state == 'connected':
if not self.pong_received:
self.logger.info(
'PONG response has not been received, aborting')
if self.ws:
await self.ws.close()
await self.queue.put(None)
break
self.pong_received = False
await self._send_packet(packet.Packet(packet.PING))
try:
await asyncio.wait_for(self.ping_loop_event.wait(),
self.ping_interval)
except (asyncio.TimeoutError,
asyncio.CancelledError): # pragma: no cover
pass
self.logger.info('Exiting ping task')
async def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
await self.queue.put(None)
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
await self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=await r.read())
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
await self.queue.put(None)
break
for pkt in p.packets:
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
await self.ping_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = (await self.ws.receive()).data
except aiohttp.client_exceptions.ServerDisconnectedError:
self.logger.info(
'Read loop: WebSocket connection was closed, aborting')
await self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error "%s", aborting', str(e))
await self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
pkt = packet.Packet(encoded_packet=p)
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
await self.ping_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [await asyncio.wait_for(self.queue.get(), timeout)]
except (self.queue.Empty, asyncio.TimeoutError,
asyncio.CancelledError):
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get_nowait())
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = await self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
if pkt.binary:
await self.ws.send_bytes(pkt.encode(
always_bytes=False))
else:
await self.ws.send_str(pkt.encode(
always_bytes=False))
self.queue.task_done()
except aiohttp.client_exceptions.ServerDisconnectedError:
self.logger.info(
'Write loop: WebSocket connection was closed, '
'aborting')
break
self.logger.info('Exiting write loop task')
|
py | b40f22622948e33205d9db9c588d962c528edec6 | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc0 = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, i_1)))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.GE(x_z, mgr.Plus(z, y)))
loc1 = Location(env, mgr.GE(z, i_3), mgr.GE(x, i_0))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, i_0)))
h_z = Hint("h_z2", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
return frozenset(res)
|
py | b40f2282184e2a0890add9bf503682ddce0f9ca3 | # coding: utf-8
"""
Samsara API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import samsara
from samsara.rest import ApiException
from samsara.models.humidity_response_sensors import HumidityResponseSensors
class TestHumidityResponseSensors(unittest.TestCase):
""" HumidityResponseSensors unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testHumidityResponseSensors(self):
"""
Test HumidityResponseSensors
"""
model = samsara.models.humidity_response_sensors.HumidityResponseSensors()
if __name__ == '__main__':
unittest.main()
|
py | b40f228f8b668dd8df55df27818f6af77e5614eb | # Copyright (c) 2015, Florian Jung and Timm Weber
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
def distance_point_line(p, l1, l2):
# (x - l1.x) * (l2.y-l1.y)/(l2.x-l1.x) + l1.y = y
# x * (l2.y-l1.y)/(l2.x-l1.x) - l1.x * (l2.y-l1.y)/(l2.x-l1.x) + l1.y - y = 0
# x * (l2.y-l1.y) - l1.x * (l2.y-l1.y) + l1.y * (l2.x-l1.x) - y * (l2.x-l1.x) = 0
# ax + by + c = 0
# with a = (l2.y-l1.y), b = -(l2.x-l1.x), c = l1.y * (l2.x-l1.x) - l1.x * (l2.y-l1.y)
a = (l2.y-l1.y)
b = -(l2.x-l1.x)
c = l1.y * (l2.x-l1.x) - l1.x * (l2.y-l1.y)
d = math.sqrt(a**2 + b**2)
a/=d
b/=d
c/=d
assert (abs(a*l1.x + b*l1.y + c) < 0.001)
assert (abs(a*l2.x + b*l2.y + c) < 0.001)
return abs(a*p.x + b*p.y + c)
def is_colinear(points, epsilon=1):
for point in points:
if distance_point_line(point, points[0], points[-1]) > epsilon:
return False
return True
|
py | b40f23329d9f44d2c75b08287b3a886b2422005c | # Test MSOffice
#
# Main purpose of test is to ensure that Dynamic COM objects
# work as expected.
# Assumes Word and Excel installed on your machine.
import win32com, sys, string, win32api, traceback
import win32com.client.dynamic
from win32com.test.util import CheckClean
import pythoncom
from win32com.client import gencache
from pywintypes import Unicode
error = "MSOffice test error"
# Test a few of the MSOffice components.
def TestWord():
# Try and load the object exposed by Word 8
# Office 97 - _totally_ different object model!
try:
# NOTE - using "client.Dispatch" would return an msword8.py instance!
print "Starting Word 8 for dynamic test"
word = win32com.client.dynamic.Dispatch("Word.Application")
TestWord8(word)
word = None
# Now we will test Dispatch without the new "lazy" capabilities
print "Starting Word 8 for non-lazy dynamic test"
dispatch = win32com.client.dynamic._GetGoodDispatch("Word.Application")
typeinfo = dispatch.GetTypeInfo()
attr = typeinfo.GetTypeAttr()
olerepr = win32com.client.build.DispatchItem(typeinfo, attr, None, 0)
word = win32com.client.dynamic.CDispatch(dispatch, olerepr)
dispatch = typeinfo = attr = olerepr = None
TestWord8(word)
except pythoncom.com_error:
print "Starting Word 7 for dynamic test"
word = win32com.client.Dispatch("Word.Basic")
TestWord7(word)
print "Starting MSWord for generated test"
from win32com.client import gencache
word = gencache.EnsureDispatch("Word.Application.8")
TestWord8(word)
def TestWord7(word):
word.FileNew()
# If not shown, show the app.
if not word.AppShow(): word._proc_("AppShow")
for i in xrange(12):
word.FormatFont(Color=i+1, Points=i+12)
word.Insert("Hello from Python %d\n" % i)
word.FileClose(2)
def TestWord8(word):
word.Visible = 1
doc = word.Documents.Add()
wrange = doc.Range()
for i in range(10):
wrange.InsertAfter("Hello from Python %d\n" % i)
paras = doc.Paragraphs
for i in range(len(paras)):
p = paras[i]()
p.Font.ColorIndex = i+1
p.Font.Size = 12 + (4 * i)
# XXX - note that
# for para in paras:
# para().Font...
# doesnt seem to work - no error, just doesnt work
# Should check if it works for VB!
doc.Close(SaveChanges = 0)
word.Quit()
win32api.Sleep(1000) # Wait for word to close, else we
# may get OA error.
def TestWord8OldStyle():
try:
import win32com.test.Generated4Test.msword8
except ImportError:
print "Can not do old style test"
def TextExcel(xl):
xl.Visible = 0
if xl.Visible: raise error, "Visible property is true."
xl.Visible = 1
if not xl.Visible: raise error, "Visible property not true."
if int(xl.Version[0])>=8:
xl.Workbooks.Add()
else:
xl.Workbooks().Add()
xl.Range("A1:C1").Value = (1,2,3)
xl.Range("A2:C2").Value = ('x','y','z')
xl.Range("A3:C3").Value = ('3','2','1')
for i in xrange(20):
xl.Cells(i+1,i+1).Value = "Hi %d" % i
if xl.Range("A1").Value <> "Hi 0":
raise error, "Single cell range failed"
if xl.Range("A1:B1").Value <> ((Unicode("Hi 0"),2),):
raise error, "flat-horizontal cell range failed"
if xl.Range("A1:A2").Value <> ((Unicode("Hi 0"),),(Unicode("x"),)):
raise error, "flat-vertical cell range failed"
if xl.Range("A1:C3").Value <> ((Unicode("Hi 0"),2,3),(Unicode("x"),Unicode("Hi 1"),Unicode("z")),(3,2,Unicode("Hi 2"))):
raise error, "square cell range failed"
xl.Range("A1:C3").Value =((3,2,1),("x","y","z"),(1,2,3))
if xl.Range("A1:C3").Value <> ((3,2,1),(Unicode("x"),Unicode("y"),Unicode("z")),(1,2,3)):
raise error, "Range was not what I set it to!"
# test dates out with Excel
xl.Cells(5,1).Value = "Excel time"
xl.Cells(5,2).Formula = "=Now()"
import time
xl.Cells(6,1).Value = "Python time"
xl.Cells(6,2).Value = pythoncom.MakeTime(time.time())
xl.Cells(6,2).NumberFormat = "d/mm/yy h:mm"
xl.Columns("A:B").EntireColumn.AutoFit()
xl.Workbooks(1).Close(0)
xl.Quit()
def TestAll():
try:
TestWord()
print "Starting Excel for Dynamic test..."
xl = win32com.client.dynamic.Dispatch("Excel.Application")
TextExcel(xl)
try:
print "Starting Excel 8 for generated excel8.py test..."
mod = gencache.EnsureModule("{00020813-0000-0000-C000-000000000046}", 0, 1, 2, bForDemand=1)
xl = win32com.client.Dispatch("Excel.Application")
TextExcel(xl)
except ImportError:
print "Could not import the generated Excel 97 wrapper"
try:
import xl5en32
mod = gencache.EnsureModule("{00020813-0000-0000-C000-000000000046}", 9, 1, 0)
xl = win32com.client.Dispatch("Excel.Application.5")
print "Starting Excel 95 for makepy test..."
TextExcel(xl)
except ImportError:
print "Could not import the generated Excel 95 wrapper"
except KeyboardInterrupt:
print "*** Interrupted MSOffice test ***"
except:
traceback.print_exc()
if __name__=='__main__':
TestAll()
CheckClean()
pythoncom.CoUninitialize()
|
py | b40f256cf03c5a58cfb906fa8085d4cb61c34455 | # Generated by Django 2.1.3 on 2018-12-02 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('User_id', models.IntegerField(max_length=11)),
('Post', models.CharField(max_length=2000)),
('Date', models.DateTimeField(auto_now_add=True)),
],
),
]
|
py | b40f27a48972b2c9a714ed78f4f152b51250d313 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from psycopg2.extras import DictCursor
from functools import partial
import psycopg2.extensions
import threading
import psycopg2
import datetime
import argparse
import logging
import termios
import select
import six
import six.moves.queue as Queue
import yaml
import time
import math
import sys
import tty
import os
import re
if six.PY2:
import types
list_type = types.ListType
else:
list_type = list
POISON = object()
TIMEOUT = 0.2
DELTA = datetime.timedelta(seconds=60)
MINXLOGFREE = 5
IGNORE_SERVER_CHECK = False
EXPLAIN_ROWS_RE=re.compile("\(.*rows=(\d+)\s+width=\d+\)")
tty_store = '\033[s'
tty_restore = '\033[u'
tty_up = '\033[{0}A'
tty_erase = '\033[K'
tty_forward = '\033[{0}C'
global_write = threading.Lock()
# accept 'any', any number of spaces, and either an expression within () with
# any number of heading spaces, letters and trailing spaces, and s after the ),
# or a simple %s with any number of heading and trailing spaces. Case is ignored.
# Example of type 1 expression: any ( %( name )s )
# Example of type 2 expression: any ( %s )
sql_param_pattern = re.compile('any\s*\(((\s*%\(\s*(\w+)\s*\))|\s*%)s\s*\)', re.I)
class PlaceholderParseException(Exception):
pass
SQL_GET_TIMEOUT = "select current_setting('statement_timeout')"
SQL_SET_TIMEOUT = 'set statement_timeout = %s'
SQL_SET_REPLICATION_ROLE = "set session_replication_role = 'replica'"
SQL_GETLOAD = "select val from public.nice_get_server_information('load1')"
SQL_GETXLOG = "select val/1024/1024/1024 as xlogfree from public.nice_get_server_information('xlogfreesize')"
SQL_DROP_PLPYTHONFUNC = 'drop function if exists public.nice_get_server_information(text)'
SQL_CREATE_PLPYTHONFUNC = \
"""
DO $INSTALL$
BEGIN
CREATE OR REPLACE FUNCTION public.nice_get_server_information(key IN text, val OUT real)
LANGUAGE plpythonu
AS
$BODY$
'''
This function provides two server diagnostic figures used by niceupdate.
For xlogfree it should not be called to often.
'''
import os
def get_load1_average():
return os.getloadavg()[0]
def get_xlog_directory():
"Get the xlog directory of the database cluster."
if 'stmt_setting' in SD:
plan = SD['stmt_setting']
else:
plan = plpy.prepare("SELECT name, setting FROM pg_catalog.pg_settings where name = 'data_directory'")
SD['stmt_setting']= plan
rv = plpy.execute(plan)
datadir = rv[0]['setting']
if not 'pg_ver' in SD:
rv = plpy.execute("select current_setting('server_version_num')::numeric / 1e4 as ver")
SD['pg_ver']= rv[0]["ver"]
return os.path.join(datadir, 'pg_wal' if SD['pg_ver'] >= 10 else 'pg_xlog')
def get_mount_point(realpathname):
"Get the mount point of the filesystem containing pathname"
pathname = os.path.normcase(realpathname)
parent_device = path_device = os.stat(pathname).st_dev
while parent_device == path_device:
mount_point = pathname
pathname = os.path.dirname(pathname)
if pathname == mount_point:
break
parent_device = os.stat(pathname).st_dev
return mount_point
def get_fs_space(pathname):
"Get the free space of the filesystem containing pathname in byte"
stat = os.statvfs(pathname)
free = stat.f_bavail * stat.f_bsize
return free
def get_xlog_free_space():
"Get size of available space on device for xlog directory"
xlogdir = get_xlog_directory()
realpath = os.path.realpath(xlogdir)
mount_point = get_mount_point(realpath)
return get_fs_space(mount_point)
if key == 'load1':
return get_load1_average()
elif key == 'xlogfreesize':
return get_xlog_free_space()
$BODY$
SECURITY DEFINER;
GRANT EXECUTE on FUNCTION public.nice_get_server_information(text) TO public;
END;
$INSTALL$;
"""
def getargs():
argp = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='executes a statement for the resultset of a query with load control of the server'
,
epilog="""The config file can look like this at minimum:
database: customer
getid: select myid as id from schema.table
update: update schema.table set value = value + 1 where myid = %(id)s
One of the following should be given and greater than 0:
commitrows: 20
chunksize: 0
Following attribute are optional and would be overwritten if a command line arg is provided:
maxload: 2.0
vacuum_cycles: 10
vacuum_delay: 100
delay: 0
vacuum_table: schema.table
"""
)
argp.add_argument('-f', '--file', dest='filename', required=True, help='yaml file for config data')
argp.add_argument('-H', '--host', dest='host', help='database host to connect to', required=True)
argp.add_argument('-p', '--port', dest='port', help='database port to connect to [5432]', default=5432, type=int)
argp.add_argument('-d', '--dbname', dest='dbname', help='database name to connect to', required=True)
argp.add_argument('-U', '--user', dest='user', help='user for database connect')
argp.add_argument('-l', '--maxload', dest='maxload', type=float, metavar='FLOAT',
help='update unless load is higher than FLOAT')
argp.add_argument('-C', '--commitrows', dest='commitrows', help='do commit every NUMBER of rows', metavar='NUMBER',
type=int)
argp.add_argument('-t', '--test', dest='test', action='store_true', help='test parameter substitution and roll back'
, default=False)
argp.add_argument('--delay', dest='delay', help='delay between executing consecutive statements', type=int)
argp.add_argument('-F', '--force', dest='force',
help='Do not perform neither xlog nor load check in cases of absence of server extension on target database'
, action='store_true')
argp.add_argument('-v', '--verbose', dest='verbose', help='set log level DEBUG', action='store_true')
argp.add_argument('-V', '--vacuumcycles', dest='vacuum_cycles', help='perform vacuum after NUMBER of commits',
metavar='NUMBER', type=int)
argp.add_argument('-X', '--xlogminimum', dest='xlogminimun', type=int, help='minimum size of xlog partition in GB',
default=MINXLOGFREE)
argp.add_argument('-Y', '--vacuumdelay', dest='vacuum_delay', help='vacuum_cost_delay (0...100) in ms', default=0,
type=int)
argp.add_argument('-u', '--uninstall', dest='uninstall', action='store_true',
help='Uninstall helper server function from specified database')
argp.add_argument('-i', '--install', dest='install', action='store_true',
help='Install helper server function on specified database ')
argp.add_argument('--disable-triggers', dest='disable_triggers', default=False, action='store_true',
help='disable triggers on all tables in the niceupdate database session')
return argp.parse_args()
def load_config(args):
if args.verbose:
logger.setLevel(logging.DEBUG)
global MINXLOGFREE, IGNORE_SERVER_CHECK
MINXLOGFREE = args.xlogminimun
IGNORE_SERVER_CHECK = args.force
with open(args.filename) as f:
document = yaml.safe_load(f)
logger.info('START: job file {} loaded.'.format(args.filename))
document['test'] = args.test
document['batchmode'] = not sys.stdout.isatty()
document['verbose'] = args.verbose
document['commitrows'] = args.commitrows or document.get('commitrows', 0)
document['maxload'] = args.maxload or document.get('maxload') or 2.0
document['vacuum_cycles'] = args.vacuum_cycles or document.get('vacuum_cycles') or 0
document['vacuum_delay'] = args.vacuum_delay or document.get('vacuum_delay') or 0
document['disable_triggers'] = args.disable_triggers or document.get('disable_triggers', False)
if bool(document.get('commitrows')) == bool(document.get('chunksize')) or document.get('commitrows', 0) \
+ document.get('chunksize', 0) < 1:
sys.stderr.write('Either commitrows or chunksize should be greater than 0.\n')
sys.exit(1)
if document['vacuum_cycles'] and not document['vacuum_table']:
sys.stderr.write('No vacuum table specified\n')
sys.exit(1)
if document['vacuum_delay'] and (document['vacuum_delay'] > 100 or document['vacuum_delay'] < 0):
sys.stderr.write('Specify value between 0...100 for vacuum delay\n')
sys.exit(1)
document['delay'] = args.delay or document.get('delay') or 0
document['user'] = args.user or os.getenv('PGUSER') or os.getenv('USER')
logger.debug(document)
return document
def connect(phost, pport, pdbname, pusername, disable_triggers=False):
"""Returns tupel (connection, cursor)"""
conn = psycopg2.connect(host=phost, database=pdbname, port=pport, user=pusername,
application_name='niceupdate')
cur = conn.cursor(cursor_factory=DictCursor)
if disable_triggers:
cur.execute(SQL_SET_REPLICATION_ROLE)
return conn, cur
def show(line, prefix, text):
if line == 0:
l_up = ''
else:
l_up = tty_up.format(line)
mess = tty_store + l_up + '\r' + tty_erase + prefix + text + tty_restore
global_write.acquire()
sys.stdout.write(mess)
sys.stdout.flush()
global_write.release()
class Query(object):
def __init__(self, conn, sql, name=None):
if conn:
if name:
self.cur = conn.cursor(name=name, cursor_factory=DictCursor)
self.cur.withhold = True
else:
self.cur = conn.cursor(cursor_factory=DictCursor)
self.sql = sql
self.test = False
self.verbose = False
self.queue = Queue.Queue()
self.canceled = False
self.queueIsEmpty = threading.Event()
self.updChunkSize = 0
def get_param_name(self, sql):
'''Extract the name of placeholder from the any expression for chunked updates'''
logger.debug(sql)
names = set()
anonymous = set()
name = ''
for m in sql_param_pattern.finditer(sql):
name = m.group(3) # name from %(%(name)s)
if not name: # # there is an anonymous placeholder
name = m.group(0) + str(m.__hash__()) # # which should occur only once
anonymous.add(name)
else:
names.add(name)
name_count = len(names)
anon_count = len(anonymous)
logger.debug(names)
logger.debug(anonymous)
if name_count == 1 and anon_count == 0:
return name
elif name_count == 0 and anon_count == 1:
return None
else:
raise PlaceholderParseException('Too many or few placeholders found')
def argexec(self, *args, **kwargs):
try:
if args:
self.cur.execute(self.sql, args)
else:
self.cur.execute(self.sql, kwargs)
except psycopg2.extensions.QueryCanceledError as p:
self.kindly.show_exception(p)
except BaseException as e:
self.kindly.cancel()
self.kindly.show_exception(e)
logger.debug(kwargs)
logger.debug(args)
logger.exception('query exception')
def exec_query(self, param):
"""keyboard interruptable query execution, param should be an dict"""
if self.test and self.verbose:
self.kindly.show_message(self.cur.mogrify(self.sql, param))
qth = threading.Thread(target=self.argexec, args=(), kwargs=param)
qth.start()
while True:
qth.join(TIMEOUT)
if not qth.isAlive():
break
return self.cur.rowcount > 0 # indicates that we were not canceled
def process_queue(self):
'''Execute give sql for each parameter from queue'''
try:
while not self.canceled:
param = self.queue.get(True)
if param == POISON or self.canceled:
break
self.argexec(**param)
if self.queue.empty():
self.queueIsEmpty.set()
self.queueIsEmpty.clear()
except BaseException as e:
self.kindly.cancel()
self.kindly.show_exception(e)
finally:
if not self.queueIsEmpty.isSet():
self.queueIsEmpty.set()
def chunked_process_queue(self):
'''Build a list of parameters and pass as array to server'''
chunkCount = 0
placeholder = self.get_param_name(self.sql)
logger.debug('Parametername found: {}'.format(placeholder))
chunk = []
try:
while not self.canceled:
param = self.queue.get(True)
if self.canceled:
break
if not param == POISON:
chunk.append(param[0])
chunkCount += 1
if chunk and (chunkCount >= self.updChunkSize or self.queue.empty() or param == POISON):
if not placeholder:
self.argexec(*(chunk, ))
else:
self.argexec(**{placeholder: chunk})
chunkCount = 0
del chunk[:]
if param == POISON:
break
if self.queue.empty():
self.queueIsEmpty.set()
self.queueIsEmpty.clear()
except BaseException as e:
logger.exception('Parameter: {}'.format(param))
self.kindly.cancel()
self.kindly.show_exception(e)
finally:
if not self.queueIsEmpty.isSet():
self.queueIsEmpty.set()
class KindlyUpdate(object):
def __init__(self, config, host, port, database):
self.canceled = False
self.commitrows = config['commitrows'] or config['chunksize']
self.config = config
self.conn, self.cur = connect(host, port, database, config['user'], config.get('disable_triggers', False))
self.dbname = database
self.delay = config.get('delay', 0)
self.eta = datetime.timedelta(seconds=0)
self.fn_show = sys.stdout.write
self.idcount = 0
self.last_exception = ''
self.max_load_average = self.config['maxload']
self.maxcount = 0
self.myquery = ''
self.next_xlog_fetch_time = datetime.datetime.now()
self.starttime = None
self.statusmessage = ''
self.test = self.config['test']
self.thresholdlock = threading.Lock()
self.vacuum_cycles = config.get('vacuum_cycles', 0)
self.verbose = self.config.get('verbose', False)
logger.debug('connected to {}'.format(database))
def build_query(self, sql, name=None):
q = Query(self.conn, sql, name)
q.kindly = self
return q
def build_update_query(self):
update_query = self.build_query(self.config.get('update'))
update_query.test = self.test
update_query.verbose = self.verbose
update_query.updChunkSize = self.config.get('chunksize', 0)
if update_query.updChunkSize:
update_query.get_param_name(update_query.sql) # there might be an exception, check before start thread
return update_query
def wait_and_rollback(self):
self.myquery.queueIsEmpty.wait()
self.cur.connection.rollback()
self.show_message('changes rolled back')
def on_commit(self, idcount):
self.conn.commit()
self.calc_eta()
self.show_message('committed')
if self.delay:
time.sleep(self.delay)
self.wait_for_load_stabilization()
if self.vacuum_cycles and not self.canceled and idcount % (self.commitrows * self.vacuum_cycles) == 0:
self.show_message('committed VACUUM')
self.perform_vacuum()
def build_update_thread(self):
self.myquery = self.build_update_query()
if self.myquery.updChunkSize > 0:
updater = threading.Thread(target=self.myquery.chunked_process_queue, name=self.dbname)
else:
updater = threading.Thread(target=self.myquery.process_queue, name=self.dbname)
return updater
def run(self):
try:
updater = self.build_update_thread()
updater.start()
self.idcount = 0
self.starttime = datetime.datetime.now()
for row in self.getids():
self.idcount += 1
if self.canceled:
self.show_message('canceled')
break
self.myquery.queue.put(row)
if self.test:
self.wait_and_rollback()
break
elif self.idcount % self.commitrows == 0:
self.myquery.queueIsEmpty.wait()
if self.canceled:
break
self.on_commit(self.idcount)
self.myquery.queue.put(POISON)
updater.join()
if not (self.canceled or self.test):
self.report_result()
except PlaceholderParseException as e:
self.cancel()
self.last_exception = e
logger.exception(e)
except Exception as e:
logger.exception(e)
self.last_exception = e
self.cancel()
if not self.conn.closed:
self.cur.connection.rollback()
finally:
if not self.conn.closed:
self.conn.commit()
self.cur.close()
self.conn.close()
def getids(self):
'''Generator for rows from getid sql parameter'''
self.maxcount = self.get_estimated_rowcount()
# build a named cursor withhold
q = self.build_query(self.config.get('getid'), 'getid')
self.cur.itersize = max(self.cur.itersize, self.commitrows)
self.show_message('')
q.exec_query(None)
self.conn.commit()
self.wait_for_load_stabilization()
for row in q.cur:
yield row
def get_estimated_rowcount(self):
'''We estimate the row count of getid query using explain.
Parse the first row in order to retrieve the number for rows.
Return int'''
explain = "explain " + self.config.get('getid')
q = self.build_query(explain)
s = None
r = 0
if q.exec_query(None):
s = q.cur.fetchone()
if s:
m = EXPLAIN_ROWS_RE.search(s[0])
if m:
r = int(m.group(1))
return r
def get_eta(self):
return self.eta
def calc_eta(self):
'''calculate estimate time to finish'''
if self.starttime == None:
return
chunksRemain = self.maxcount - self.idcount
avgDuration = ((datetime.datetime.now() - self.starttime) / self.idcount if self.idcount
> 0 else datetime.timedelta(seconds=0))
eta = chunksRemain * avgDuration
eta = datetime.timedelta(seconds=math.floor(eta.total_seconds()))
self.eta = eta
def get_message_column(self):
"""Messages start after progress information"""
countlen = len(str(self.maxcount))
namelen = len(self.dbname) + 2
return 2 * countlen + namelen + 2
def get_load_average(self):
'''Retrieve server load1 as float'''
self.cur.execute(SQL_GETLOAD)
self.conn.commit()
for row in self.cur:
return row['val']
def fetch_current_xlog(self):
'''Retrieve xlog partition free space in GB'''
self.cur.execute(SQL_GETXLOG)
for row in self.cur:
return row['xlogfree']
self.conn.commit()
def get_xlog_free(self):
'''Retrieve xlog free size from server cluster not more often then DELTA seconds'''
if self.next_xlog_fetch_time < datetime.datetime.now():
self.next_xlog_fetch_time = datetime.datetime.now() + DELTA
self.xlogfreesize = self.fetch_current_xlog()
return self.xlogfreesize
def check_precondition(self):
if IGNORE_SERVER_CHECK:
logger.warning('forced to ignore server check')
return True
try:
a = self.get_load_average()
if not type(a) is float:
self.show_message('result type of load check does not match')
return False
if not type(self.fetch_current_xlog()) is float:
logger.debug('xlog check failed')
self.show_message('result type of xlog check does not match')
return False
except StandardError as e:
self.show_message(str(e))
logger.exception(e)
return False
return not ('e' in locals() and e)
def wait_for_load_stabilization(self):
'''Checks if load and xlog is below thresholds'''
self.conn.autocommit = True
while not self.canceled:
if not self.is_load_ok():
self.status_message = 'Wait for load {0}: '
self.show_message('Wait for load {0}: currently {1}'.format(self.max_load_average, self.current_load))
time.sleep(5)
self.calc_eta()
elif not self.is_xlog_ok():
self.show_message('Waiting. xlog partition under {0}GB: {1}'.format(MINXLOGFREE, self.xlogfreesize))
time.sleep(5)
self.calc_eta()
else:
self.status_message = None
break
if not self.conn.closed:
self.conn.autocommit = False
def is_load_ok(self):
if IGNORE_SERVER_CHECK:
return True
else:
self.current_load = self.get_load_average()
return self.current_load < self.max_load_average
def is_xlog_ok(self):
if IGNORE_SERVER_CHECK:
return True
else:
return self.get_xlog_free() >= MINXLOGFREE
def cancel(self):
if not self.conn.closed:
self.conn.cancel()
self.canceled = True
if self.myquery:
self.myquery.canceled = True
self.myquery.queue.put(POISON)
def increment(self):
self.thresholdlock.acquire()
self.max_load_average += 1
self.thresholdlock.release()
self.show_message('increased threshold: {0}'.format(self.max_load_average))
def decrement(self):
self.thresholdlock.acquire()
self.max_load_average -= 1
self.thresholdlock.release()
self.show_message('decreased threshold: {0}'.format(self.max_load_average))
def perform_vacuum(self):
v = self.build_query('vacuum analyze {0}'.format(self.config.get('vacuum_table')))
self.conn.autocommit = True
c = self.conn.cursor()
c.execute(SQL_GET_TIMEOUT)
current_timeout = c.fetchone()[0]
c.execute(SQL_SET_TIMEOUT, ('0', ))
if self.config.get('vacuum_delay'):
delay = self.config.get('vacuum_delay')
c.execute('set vacuum_cost_delay to %s', (delay, ))
v.exec_query(None)
c.execute(SQL_SET_TIMEOUT, (current_timeout, ))
self.calc_eta()
try:
self.conn.autocommit = False
except:
pass
self.wait_for_load_stabilization()
def show_exception(self, exception):
self.last_exception = exception
self.show_message('')
logger.exception(exception)
def show_message(self, message):
if self.config.get('batchmode'):
return
self.statusmessage = message
pretty_exc = ' '.join(str(self.last_exception).splitlines())
myline = '{current}/{maxcount} {error} {status}'.format(current=self.idcount, maxcount=self.maxcount,
status=self.statusmessage, error=pretty_exc)
self.fn_show(myline)
def report_result(self):
# since maxcount is an estimation we correct todocount after successful finish
self.maxcount = self.idcount
msg = '{dbname}: {donecount} of {todocount} done.'.format(dbname=self.dbname, donecount=self.idcount,
todocount=self.maxcount)
logger.info(msg)
if self.config.get('batchmode'):
sys.stdout.write(msg)
sys.stdout.write('\n')
else:
self.show_message('Done')
class InputReader(object):
def __init__(self):
self.input_interrupt = None
self.input_queue = Queue.Queue()
def read_input(self):
while not self.input_interrupt.acquire(False):
readlist = select.select([sys.stdin], [], [], TIMEOUT)[0]
if len(readlist) > 0:
ch = readlist[0].read(1)
self.input_queue.put(ch)
def install(pusername, sql, db):
conn, cur = connect(db[1], db[2], db[3], pusername)
cur.execute(sql)
conn.commit()
conn.close()
def server_install(config, args, dblist):
'''Installs server extension'''
if args.install:
fn = partial(install, config['user'], SQL_CREATE_PLPYTHONFUNC)
elif args.uninstall:
fn = partial(install, config['user'], SQL_DROP_PLPYTHONFUNC)
else:
return
for db in dblist:
fn(db)
def makespace(count):
space = count * '\n'
sys.stdout.write(space)
def get_host_port(hostport):
host, _, port = hostport.partition(':')
return host, int(port or 5432)
def start_threads(dbs, config):
"""returns a list of tuples (thread, workerobject)"""
threadlist = []
for line, (_, host, port, dbname) in enumerate(dbs, start=1):
kindly = KindlyUpdate(config, host, port, dbname)
kindly.fn_show = partial(show, line, '{0}: '.format(kindly.dbname))
worker = threading.Thread(target=kindly.run, name=kindly.dbname)
if not kindly.check_precondition():
return []
threadlist.append((worker, kindly))
for thread in threadlist:
thread[0].start()
return threadlist
def setup_input_thread():
input = InputReader()
input.input_interrupt = threading.Lock()
input.input_interrupt.acquire()
t = threading.Thread(target=input.read_input, name='input-thread')
t.start()
return input
def show_status_line(config, p_eta=0):
if config.get('batchmode'):
return
vacuumon = ('ON' if config.get('vacuum_cycles') else 'OFF')
message = \
'i: increment d:decrement Threshold MaxLoad: {threshold} Vacuum: {is_vacuum} VacuumDelay: {vacuum} ETA: {eta}'.format(threshold=config.get('maxload'
), vacuum=config.get('vacuum_delay'), is_vacuum=vacuumon, eta=p_eta or '')
show(0, '', message)
def notify_input(input, threadlist, config):
method = None
if input == 'i':
method = KindlyUpdate.increment
config['maxload'] += 1
elif input == 'd':
method = KindlyUpdate.decrement
config['maxload'] -= 1
if method == None:
return
show_status_line(config, 2.3)
for _, u in threadlist:
method(u)
def setup_logger():
global logger
logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
fh = logging.FileHandler(filename='niceupdate.log')
formatter = logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
def check_threads_and_statusupdate(threadlist, config):
eta = datetime.timedelta(seconds=0)
result = ''
for t, k in threadlist:
t.join(TIMEOUT)
eta = (k.get_eta() if k.get_eta() > eta else eta)
if not t.isAlive():
if k.last_exception:
result += str(k.last_exception) + '\n'
threadlist.remove((t, k))
show_status_line(config, eta)
return result
def cancel_and_close(threadlist):
for t, k in threadlist:
k.cancel()
k.conn.rollback()
k.conn.close()
def process_dbs(dbs, config):
'''main procedure processing configuration. Returns errormessage if any'''
result = ''
batchmode = config.get('batchmode')
if not batchmode:
makespace(len(dbs))
threadlist = start_threads(dbs, config)
if not threadlist:
sys.exit('precondition not fulfilled. if you wish to run without CPU load and WAL size checking (AWS RDS etc.) use the --force flag')
if not batchmode:
old_settings = termios.tcgetattr(sys.stdin)
try:
if not batchmode:
tty.setcbreak(sys.stdin.fileno())
input = setup_input_thread()
while len(threadlist) > 0:
try:
if not batchmode:
notify_input(input.input_queue.get(timeout=TIMEOUT), threadlist, config)
except Queue.Empty:
pass
finally:
r = check_threads_and_statusupdate(threadlist, config)
if r:
result += r
except KeyboardInterrupt:
result += 'Interrupted '
cancel_and_close(threadlist)
finally:
if not batchmode:
input.input_interrupt.release() # cancel input thread
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
while len(threadlist) > 0:
result += check_threads_and_statusupdate(threadlist, config)
if not batchmode:
sys.stdout.write('\n')
if result:
return result
def main():
setup_logger()
args = getargs()
config = load_config(args)
dbs = [(None, args.host, args.port, args.dbname)]
if args.install or args.uninstall:
server_install(config, args, dbs)
logger.info('finished installing/uninstalling load helper. exit')
sys.exit(0)
result = process_dbs(dbs, config)
sys.exit(result)
if __name__ == '__main__':
main()
|
py | b40f2a5236fe4eeede5045acf6e926bbc9380b44 | """Implementation of treadmill-admin-install CLI plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import sys
import click
from treadmill import cli
from treadmill import context
from treadmill import yamlwrapper as yaml
def init():
"""Return top level command handler."""
@click.group(cls=cli.make_commands(__name__))
@click.option('--distro', required=True,
help='Path to treadmill distro.',
envvar='TREADMILL_DISTRO')
@click.option('--install-dir', required=True,
help='Target installation directory.',
envvar='TREADMILL_APPROOT')
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
is_eager=True,
expose_value=False)
@click.option('--config', required=False,
type=click.Path(exists=True, readable=True, allow_dash=True),
multiple=True)
@click.option('--override', required=False, type=cli.DICT)
@click.option('--profile', required=True,
envvar='TREADMILL_PROFILE',
callback=cli.handle_context_opt,
is_eager=True,
expose_value=False)
@click.pass_context
def install(ctx, distro, install_dir, config, override):
"""Installs Treadmill."""
cell = None if context.GLOBAL.cell == '-' else context.GLOBAL.cell
profile = context.GLOBAL.get_profile_name()
ctx.obj['PARAMS'] = {
'cell': cell,
'dns_domain': context.GLOBAL.dns_domain,
'ldap_suffix': context.GLOBAL.ldap_suffix,
'treadmill': distro,
'dir': install_dir,
'profile': profile,
'python': sys.executable,
'python_path': os.getenv('PYTHONPATH', ''),
'init_hook': os.getenv('TREADMILL_INIT_HOOK', ''),
}
install_data = {}
for conf in config:
if conf == '-':
conf_dict = yaml.load(stream=sys.stdin)
else:
with io.open(conf, 'r') as fd:
conf_dict = yaml.load(stream=fd)
ctx.obj['PARAMS'].update(conf_dict)
install_data.update(conf_dict.get('data', {}))
if override:
ctx.obj['PARAMS'].update(override)
install_data.update(override)
# Store the intall data in the context.
# TODO: This is a terrible terrible mess.
ctx.obj['PARAMS'].update(install_data)
ctx.obj['PARAMS']['data'] = install_data
# XXX: hack - templates use treadmillid, but it is defined as
# "username" in cell object.
ctx.obj['PARAMS']['treadmillid'] = ctx.obj['PARAMS'].get('username')
os.environ['TREADMILL'] = distro
return install
|
py | b40f2ae7c708b3ae4c274b584ba6229728460eb4 | #!/usr/bin/python3
## Tommy
from botbase import *
_lichtenfels_date = re.compile(r"Aktuell \(Stand: (\d\d?\.\d\d?\.20\d\d)")
def lichtenfels(sheets):
soup = get_soup("https://www.lkr-lif.de/landratsamt/gesundheit-und-verbraucherschutz/gesundheitswesen/informationen-fuer-die-buerger/coronavirus/7419.Aktuelle-Zahlen-zu-den-COVID-19-Infizierten-und-Impfungen-im-Landkreis-Lichtenfels.html#COVID-19-Zahlen")
date_text = next(x for x in soup.find_all("p") if "Aktuell (Stand:" in x.get_text()).get_text()
date = _lichtenfels_date.search(date_text).group(1)
check_date(date, "Lichtenfels")
tables = soup.find_all("table")
assert "Neuinfizierte" in tables[0].get_text()
cc = force_int(tables[0].find_all("td")[1].get_text())
rows = [[x.text.strip() for x in row.findAll(["td"])] for row in tables[1].findAll("tr")]
assert "Genesen" in rows[2][0]
assert "Fälle gesamt" in rows[3][0]
c = force_int(rows[3][1])
g = force_int(rows[2][1])
update(sheets, 9478, c=c, cc=cc, g=g, sig="Bot", ignore_delta=True)
return True
schedule.append(Task(15, 37, 18, 40, 360, lichtenfels, 9478))
if __name__ == '__main__': lichtenfels(googlesheets())
|
py | b40f2b027b592eabf864a4f76e3cb17e42003588 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import datetime
import logging
import multiprocessing
import os
import re
import sys
import time
import urllib.request
from tempfile import TemporaryDirectory
import argparse
import boto3
import pytest
from assertpy import assert_that
from framework.tests_configuration.config_renderer import dump_rendered_config_file, read_config_file
from framework.tests_configuration.config_utils import get_all_regions
from framework.tests_configuration.config_validator import assert_valid_config
from reports_generator import generate_cw_report, generate_json_report, generate_junitxml_merged_report
from retrying import retry
from utils import InstanceTypesData
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(module)s - %(message)s", level=logging.INFO)
START_TIME = time.time()
START_TIME_ISO = datetime.datetime.fromtimestamp(START_TIME).isoformat()
LOGS_DIR = "{0}.logs".format(START_TIME)
OUT_DIR = "{0}.out".format(START_TIME)
TEST_DEFAULTS = {
"parallelism": None,
"retry_on_failures": False,
"features": "", # empty string means all
"regions": [],
"oss": [],
"schedulers": [],
"instances": [],
"dry_run": False,
"reports": [],
"cw_region": "us-east-1",
"cw_namespace": "ParallelCluster/IntegrationTests",
"cw_timestamp_day_start": False,
"sequential": False,
"output_dir": "tests_outputs",
"custom_node_url": None,
"custom_cookbook_url": None,
"createami_custom_cookbook_url": None,
"cookbook_git_ref": None,
"node_git_ref": None,
"ami_owner": None,
"createami_custom_node_url": None,
"custom_awsbatchcli_url": None,
"custom_ami": None,
"pre_install": None,
"post_install": None,
"vpc_stack": None,
"api_uri": None,
"cluster": None,
"api_definition_s3_uri": None,
"api_infrastructure_s3_uri": None,
"public_ecr_image_uri": None,
"no_delete": False,
"benchmarks": False,
"benchmarks_target_capacity": 200,
"benchmarks_max_time": 30,
"stackname_suffix": "",
"delete_logs_on_success": False,
"tests_root_dir": "./tests",
"instance_types_data": None,
"use_default_iam_credentials": False,
"iam_user_role_stack_name": None,
"directory_stack_name": None,
"ldaps_nlb_stack_name": None,
}
def _init_argparser():
parser = argparse.ArgumentParser(
description="Run integration tests suite.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--key-name", help="Key to use for EC2 instances", required=True)
parser.add_argument("--key-path", help="Path to the key to use for SSH connections", required=True, type=_is_file)
parser.add_argument(
"-n", "--parallelism", help="Tests parallelism for every region.", default=TEST_DEFAULTS.get("parallelism")
)
parser.add_argument(
"--sequential",
help="Run tests in a single process. When not specified tests will spawn a process for each region under test.",
action="store_true",
default=TEST_DEFAULTS.get("sequential"),
)
parser.add_argument(
"--credential",
action="append",
help="STS credential to assume when running tests in a specific region."
"Credentials need to be in the format <region>,<endpoint>,<ARN>,<externalId> and can"
" be specified multiple times. <region> represents the region credentials are used for, <endpoint> is the sts "
" endpoint to contact in order to assume credentials, <account-id> is the id of the account where the role to "
" assume is defined, <externalId> is the id to use when assuming the role. "
"(e.g. ap-east-1,https://sts.us-east-1.amazonaws.com,arn:aws:iam::<account-id>:role/role-to-assume,externalId)",
required=False,
)
parser.add_argument(
"--use-default-iam-credentials",
help="Use the default IAM creds to run pcluster CLI commands. Skips the creation of pcluster CLI IAM role.",
action="store_true",
default=TEST_DEFAULTS.get("use_default_iam_credentials"),
)
parser.add_argument(
"--retry-on-failures",
help="Retry once more the failed tests after a delay of 60 seconds.",
action="store_true",
default=TEST_DEFAULTS.get("retry_on_failures"),
)
parser.add_argument(
"--tests-root-dir",
help="Root dir where integration tests are defined",
default=TEST_DEFAULTS.get("tests_root_dir"),
)
dimensions_group = parser.add_argument_group("Test dimensions")
dimensions_group.add_argument(
"-c",
"--tests-config",
help="Config file that specifies the tests to run and the dimensions to enable for each test. "
"Note that when a config file is used the following flags are ignored: instances, regions, oss, schedulers. "
"Refer to the docs for further details on the config format: "
"https://github.com/aws/aws-parallelcluster/blob/develop/tests/integration-tests/README.md",
)
dimensions_group.add_argument(
"-i",
"--instances",
help="AWS instances under test. Ignored when tests-config is used.",
default=TEST_DEFAULTS.get("instances"),
nargs="*",
)
dimensions_group.add_argument(
"-o",
"--oss",
help="OSs under test. Ignored when tests-config is used.",
default=TEST_DEFAULTS.get("oss"),
nargs="*",
)
dimensions_group.add_argument(
"-s",
"--schedulers",
help="Schedulers under test. Ignored when tests-config is used.",
default=TEST_DEFAULTS.get("schedulers"),
nargs="*",
)
dimensions_group.add_argument(
"-r",
"--regions",
help="AWS regions where tests are executed. Ignored when tests-config is used.",
default=TEST_DEFAULTS.get("regions"),
nargs="*",
)
dimensions_group.add_argument(
"-f",
"--features",
help="Run only tests for the listed features. Prepending the not keyword to the feature name causes the "
"feature to be excluded.",
default=TEST_DEFAULTS.get("features"),
nargs="+",
)
reports_group = parser.add_argument_group("Test reports")
reports_group.add_argument(
"--show-output",
help="Do not redirect tests stdout to file. Not recommended when running in multiple regions.",
action="store_true",
default=TEST_DEFAULTS.get("show_output"),
)
reports_group.add_argument(
"--reports",
help="create tests report files. junitxml creates a junit-xml style report file. html creates an html "
"style report file. json creates a summary with details for each dimensions. cw publishes tests metrics into "
"CloudWatch",
nargs="+",
choices=["html", "junitxml", "json", "cw"],
default=TEST_DEFAULTS.get("reports"),
)
reports_group.add_argument(
"--cw-region", help="Region where to publish CloudWatch metrics", default=TEST_DEFAULTS.get("cw_region")
)
reports_group.add_argument(
"--cw-namespace",
help="CloudWatch namespace where to publish metrics",
default=TEST_DEFAULTS.get("cw_namespace"),
)
reports_group.add_argument(
"--cw-timestamp-day-start",
action="store_true",
help="CloudWatch metrics pushed with at timestamp equal to the start of the current day (midnight)",
default=TEST_DEFAULTS.get("cw_timestamp_day_start"),
)
reports_group.add_argument(
"--output-dir", help="Directory where tests outputs are generated", default=TEST_DEFAULTS.get("output_dir")
)
custom_group = parser.add_argument_group("Custom packages and templates")
custom_group.add_argument(
"--custom-node-url",
help="URL to a custom node package.",
default=TEST_DEFAULTS.get("custom_node_url"),
type=_is_url,
)
custom_group.add_argument(
"--custom-cookbook-url",
help="URL to a custom cookbook package.",
default=TEST_DEFAULTS.get("custom_cookbook_url"),
type=_is_url,
)
custom_group.add_argument(
"--createami-custom-cookbook-url",
help="URL to a custom cookbook package for the createami command.",
default=TEST_DEFAULTS.get("createami_custom_cookbook_url"),
type=_is_url,
)
custom_group.add_argument(
"--createami-custom-node-url",
help="URL to a custom node package for the createami command.",
default=TEST_DEFAULTS.get("createami_custom_node_url"),
type=_is_url,
)
custom_group.add_argument(
"--custom-awsbatchcli-url",
help="URL to a custom awsbatch cli package.",
default=TEST_DEFAULTS.get("custom_awsbatchcli_url"),
type=_is_url,
)
custom_group.add_argument(
"--pre-install", help="URL to a pre install script", default=TEST_DEFAULTS.get("pre_install")
)
custom_group.add_argument(
"--post-install", help="URL to a post install script", default=TEST_DEFAULTS.get("post_install")
)
custom_group.add_argument(
"--instance-types-data",
help="Additional information about instance types used in the tests. The format is a JSON map "
"instance_type -> data, where data must respect the same structure returned by ec2 "
"describe-instance-types",
default=TEST_DEFAULTS.get("instance_types_data"),
)
ami_group = parser.add_argument_group("AMI selection parameters")
ami_group.add_argument(
"--custom-ami", help="custom AMI to use for all tests.", default=TEST_DEFAULTS.get("custom_ami")
)
ami_group.add_argument(
"--pcluster-git-ref",
help="Git ref of the custom cli package used to build the AMI.",
default=TEST_DEFAULTS.get("pcluster_git_ref"),
)
ami_group.add_argument(
"--cookbook-git-ref",
help="Git ref of the custom cookbook package used to build the AMI.",
default=TEST_DEFAULTS.get("cookbook_git_ref"),
)
ami_group.add_argument(
"--node-git-ref",
help="Git ref of the custom node package used to build the AMI.",
default=TEST_DEFAULTS.get("node_git_ref"),
)
ami_group.add_argument(
"--ami-owner",
help="Override the owner value when fetching AMIs to use with cluster. By default pcluster uses amazon.",
default=TEST_DEFAULTS.get("ami_owner"),
)
banchmarks_group = parser.add_argument_group("Benchmarks")
banchmarks_group.add_argument(
"--benchmarks",
help="run benchmarks tests. This disables the execution of all tests defined under the tests directory.",
action="store_true",
default=TEST_DEFAULTS.get("benchmarks"),
)
banchmarks_group.add_argument(
"--benchmarks-target-capacity",
help="set the target capacity for benchmarks tests",
default=TEST_DEFAULTS.get("benchmarks_target_capacity"),
type=int,
)
banchmarks_group.add_argument(
"--benchmarks-max-time",
help="set the max waiting time in minutes for benchmarks tests",
default=TEST_DEFAULTS.get("benchmarks_max_time"),
type=int,
)
api_group = parser.add_argument_group("API options")
api_group.add_argument(
"--api-definition-s3-uri",
help="URI of the Docker image for the Lambda of the ParallelCluster API",
default=TEST_DEFAULTS.get("api_definition_s3_uri"),
)
api_group.add_argument(
"--api-infrastructure-s3-uri",
help="URI of the CloudFormation template for the ParallelCluster API",
default=TEST_DEFAULTS.get("api_definition_s3_uri"),
)
api_group.add_argument(
"--public-ecr-image-uri",
help="S3 URI of the ParallelCluster API spec",
default=TEST_DEFAULTS.get("public_ecr_image_uri"),
)
api_group.add_argument(
"--api-uri",
help="URI of an existing ParallelCluster API",
default=TEST_DEFAULTS.get("api_uri"),
)
debug_group = parser.add_argument_group("Debugging/Development options")
debug_group.add_argument(
"--vpc-stack", help="Name of an existing vpc stack.", default=TEST_DEFAULTS.get("vpc_stack")
)
debug_group.add_argument(
"--cluster", help="Use an existing cluster instead of creating one.", default=TEST_DEFAULTS.get("cluster")
)
debug_group.add_argument(
"--no-delete",
action="store_true",
help="Don't delete stacks after tests are complete.",
default=TEST_DEFAULTS.get("no_delete"),
)
debug_group.add_argument(
"--delete-logs-on-success",
help="delete CloudWatch logs when a test succeeds",
action="store_true",
default=TEST_DEFAULTS.get("delete_logs_on_success"),
)
debug_group.add_argument(
"--stackname-suffix",
help="set a suffix in the integration tests stack names",
default=TEST_DEFAULTS.get("stackname_suffix"),
)
debug_group.add_argument(
"--dry-run",
help="Only show the list of tests that would run with specified options.",
action="store_true",
default=TEST_DEFAULTS.get("dry_run"),
)
debug_group.add_argument(
"--iam-user-role-stack-name",
help="Name of an existing IAM user role stack.",
default=TEST_DEFAULTS.get("iam_user_role_stack_name"),
)
debug_group.add_argument(
"--directory-stack-name",
help="Name of CFN stack providing AD domain to be used for testing AD integration feature.",
default=TEST_DEFAULTS.get("directory_stack_name"),
)
debug_group.add_argument(
"--ldaps-nlb-stack-name",
help="Name of CFN stack providing NLB to enable use of LDAPS with a Simple AD directory when testing AD "
"integration feature.",
default=TEST_DEFAULTS.get("ldaps_nlb_stack_name"),
)
return parser
def _is_file(value):
if not os.path.isfile(value):
raise argparse.ArgumentTypeError("'{0}' is not a valid file".format(value))
return value
@retry(stop_max_attempt_number=10, wait_fixed=5000)
def _is_url(value):
scheme = urllib.request.urlparse(value).scheme
if scheme in ["https", "s3", "file"]:
try:
if scheme == "s3":
match = re.match(r"s3://(.*?)/(.*)", value)
if not match or len(match.groups()) < 2:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid S3url")
else:
bucket_name, object_name = match.group(1), match.group(2)
boto3.client("s3").head_object(Bucket=bucket_name, Key=object_name)
else:
urllib.request.urlopen(value)
return value
except Exception as e:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid url:{e}")
else:
raise argparse.ArgumentTypeError("'{0}' is not a valid url".format(value))
def _test_config_file(value):
_is_file(value)
try:
config = read_config_file(value)
return config
except Exception:
raise argparse.ArgumentTypeError("'{0}' is not a valid test config".format(value))
def _join_with_not(args):
"""
Join 'not' with next token, so they
can be used together as single pytest marker
"""
it = iter(args)
while True:
try:
current = next(it)
except StopIteration:
break
if current == "not":
try:
current += " " + next(it)
except StopIteration:
raise Exception("'not' needs to be always followed by an item")
yield current
def _get_pytest_args(args, regions, log_file, out_dir): # noqa: C901
pytest_args = ["-s", "-vv", "-l"]
pytest_args.append("--tests-log-file={0}/{1}".format(args.output_dir, log_file))
pytest_args.append("--output-dir={0}/{1}".format(args.output_dir, out_dir))
pytest_args.append(f"--key-name={args.key_name}")
pytest_args.append(f"--key-path={args.key_path}")
pytest_args.extend(["--stackname-suffix", args.stackname_suffix])
pytest_args.extend(["--rootdir", args.tests_root_dir])
pytest_args.append("--ignore=./benchmarks")
if args.benchmarks:
pytest_args.append("--benchmarks")
# Show all tests durations
pytest_args.append("--durations=0")
# Run only tests with the given markers
if args.features:
pytest_args.append("-m")
pytest_args.append(" or ".join(list(_join_with_not(args.features))))
if args.tests_config:
_set_tests_config_args(args, pytest_args, out_dir)
if args.instance_types_data:
pytest_args.append("--instance-types-data-file={0}".format(args.instance_types_data))
if regions:
pytest_args.append("--regions")
pytest_args.extend(regions)
if args.instances:
pytest_args.append("--instances")
pytest_args.extend(args.instances)
if args.oss:
pytest_args.append("--oss")
pytest_args.extend(args.oss)
if args.schedulers:
pytest_args.append("--schedulers")
pytest_args.extend(args.schedulers)
if args.delete_logs_on_success:
pytest_args.append("--delete-logs-on-success")
if args.credential:
pytest_args.append("--credential")
pytest_args.extend(args.credential)
if args.use_default_iam_credentials:
pytest_args.append("--use-default-iam-credentials")
if args.retry_on_failures:
# Rerun tests on failures for one more time after 60 seconds delay
pytest_args.extend(["--reruns", "1", "--reruns-delay", "60"])
if args.parallelism:
pytest_args.extend(["-n", args.parallelism])
if args.dry_run:
pytest_args.append("--collect-only")
if any(report in ["junitxml", "json", "cw"] for report in args.reports):
pytest_args.append("--junit-xml={0}/{1}/results.xml".format(args.output_dir, out_dir))
if "html" in args.reports:
pytest_args.append("--html={0}/{1}/results.html".format(args.output_dir, out_dir))
_set_custom_packages_args(args, pytest_args)
_set_ami_args(args, pytest_args)
_set_custom_stack_args(args, pytest_args)
_set_api_args(args, pytest_args)
return pytest_args
def _set_custom_packages_args(args, pytest_args): # noqa: C901
if args.custom_node_url:
pytest_args.extend(["--custom-node-package", args.custom_node_url])
if args.custom_cookbook_url:
pytest_args.extend(["--custom-chef-cookbook", args.custom_cookbook_url])
if args.createami_custom_cookbook_url:
pytest_args.extend(["--createami-custom-chef-cookbook", args.createami_custom_cookbook_url])
if args.createami_custom_node_url:
pytest_args.extend(["--createami-custom-node-package", args.createami_custom_node_url])
if args.custom_awsbatchcli_url:
pytest_args.extend(["--custom-awsbatchcli-package", args.custom_awsbatchcli_url])
if args.pre_install:
pytest_args.extend(["--pre-install", args.pre_install])
if args.post_install:
pytest_args.extend(["--post-install", args.post_install])
def _set_ami_args(args, pytest_args):
if args.custom_ami:
pytest_args.extend(["--custom-ami", args.custom_ami])
if args.pcluster_git_ref:
pytest_args.extend(["--pcluster-git-ref", args.pcluster_git_ref])
if args.cookbook_git_ref:
pytest_args.extend(["--cookbook-git-ref", args.cookbook_git_ref])
if args.node_git_ref:
pytest_args.extend(["--node-git-ref", args.node_git_ref])
if args.ami_owner:
pytest_args.extend(["--ami-owner", args.ami_owner])
def _set_custom_stack_args(args, pytest_args):
if args.vpc_stack:
pytest_args.extend(["--vpc-stack", args.vpc_stack])
if args.cluster:
pytest_args.extend(["--cluster", args.cluster])
if args.no_delete:
pytest_args.append("--no-delete")
if args.iam_user_role_stack_name:
pytest_args.extend(["--iam-user-role-stack-name", args.iam_user_role_stack_name])
if args.directory_stack_name:
pytest_args.extend(["--directory-stack-name", args.directory_stack_name])
if args.ldaps_nlb_stack_name:
pytest_args.extend(["--ldaps-nlb-stack-name", args.ldaps_nlb_stack_name])
def _set_api_args(args, pytest_args):
if args.api_definition_s3_uri:
pytest_args.extend(["--api-definition-s3-uri", args.api_definition_s3_uri])
if args.public_ecr_image_uri:
pytest_args.extend(["--public-ecr-image-uri", args.public_ecr_image_uri])
if args.api_uri:
pytest_args.extend(["--api-uri", args.api_uri])
if args.api_infrastructure_s3_uri:
pytest_args.extend(["--api-infrastructure-s3-uri", args.api_infrastructure_s3_uri])
def _set_tests_config_args(args, pytest_args, out_dir):
# Dump the rendered file to avoid re-rendering in pytest processes
rendered_config_file = f"{args.output_dir}/{out_dir}/tests_config.yaml"
with open(rendered_config_file, "x", encoding="utf-8") as text_file:
text_file.write(dump_rendered_config_file(args.tests_config))
pytest_args.append(f"--tests-config-file={rendered_config_file}")
def _get_pytest_regionalized_args(region, args, our_dir, logs_dir):
return _get_pytest_args(
args=args,
regions=[region],
log_file="{0}/{1}.log".format(logs_dir, region),
out_dir="{0}/{1}".format(our_dir, region),
)
def _get_pytest_non_regionalized_args(args, out_dir, logs_dir):
return _get_pytest_args(
args=args, regions=args.regions, log_file="{0}/all_regions.log".format(logs_dir), out_dir=out_dir
)
def _run_test_in_region(region, args, out_dir, logs_dir):
out_dir_region = "{base_dir}/{out_dir}/{region}".format(base_dir=args.output_dir, out_dir=out_dir, region=region)
os.makedirs(out_dir_region, exist_ok=True)
# Redirect stdout to file
if not args.show_output:
sys.stdout = open("{0}/pytest.out".format(out_dir_region), "w")
pytest_args_regionalized = _get_pytest_regionalized_args(region, args, out_dir, logs_dir)
with TemporaryDirectory() as temp_dir:
pytest_args_regionalized.extend(["--basetemp", temp_dir])
logger.info("Starting pytest in region {0} with params {1}".format(region, pytest_args_regionalized))
pytest.main(pytest_args_regionalized)
def _make_logging_dirs(base_dir):
logs_dir = "{base_dir}/{logs_dir}".format(base_dir=base_dir, logs_dir=LOGS_DIR)
os.makedirs(logs_dir, exist_ok=True)
logger.info("Configured logs dir: {0}".format(logs_dir))
out_dir = "{base_dir}/{out_dir}".format(base_dir=base_dir, out_dir=OUT_DIR)
os.makedirs(out_dir, exist_ok=True)
logger.info("Configured tests output dir: {0}".format(out_dir))
def _run_parallel(args):
jobs = []
if args.regions:
enabled_regions = args.regions
else:
enabled_regions = get_all_regions(args.tests_config)
for region in enabled_regions:
p = multiprocessing.Process(target=_run_test_in_region, args=(region, args, OUT_DIR, LOGS_DIR))
jobs.append(p)
p.start()
for job in jobs:
job.join()
def _check_args(args):
# If --cluster is set only one os, scheduler, instance type and region can be provided
if args.cluster:
if len(args.oss) > 1 or len(args.schedulers) > 1 or len(args.instances) > 1 or len(args.regions) > 1:
logger.error(
"when cluster option is specified, you can have a single value for oss, regions, instances "
"and schedulers and you need to make sure they match the cluster specific ones"
)
exit(1)
if not args.tests_config:
assert_that(args.regions).described_as("--regions cannot be empty").is_not_empty()
assert_that(args.instances).described_as("--instances cannot be empty").is_not_empty()
assert_that(args.oss).described_as("--oss cannot be empty").is_not_empty()
assert_that(args.schedulers).described_as("--schedulers cannot be empty").is_not_empty()
else:
try:
args.tests_config = _test_config_file(args.tests_config)
assert_valid_config(args.tests_config, args.tests_root_dir)
logger.info("Found valid config file:\n%s", dump_rendered_config_file(args.tests_config))
except Exception:
raise argparse.ArgumentTypeError("'{0}' is not a valid test config".format(args.tests_config))
def _run_sequential(args):
# Redirect stdout to file
if not args.show_output:
sys.stdout = open("{0}/{1}/pytest.out".format(args.output_dir, OUT_DIR), "w")
pytest_args_non_regionalized = _get_pytest_non_regionalized_args(args, OUT_DIR, LOGS_DIR)
logger.info("Starting pytest with params {0}".format(pytest_args_non_regionalized))
pytest.main(pytest_args_non_regionalized)
def main():
"""Entrypoint for tests executor."""
if sys.version_info < (3, 7):
logger.error("test_runner requires python >= 3.7")
exit(1)
args = _init_argparser().parse_args()
# Load additional instance types data, if provided.
# This step must be done before loading test config files in order to resolve instance type placeholders.
if args.instance_types_data:
InstanceTypesData.load_additional_instance_types_data(args.instance_types_data)
_check_args(args)
logger.info("Parsed test_runner parameters {0}".format(args))
_make_logging_dirs(args.output_dir)
if args.sequential:
_run_sequential(args)
else:
_run_parallel(args)
logger.info("All tests completed!")
reports_output_dir = "{base_dir}/{out_dir}".format(base_dir=args.output_dir, out_dir=OUT_DIR)
if "junitxml" in args.reports:
generate_junitxml_merged_report(reports_output_dir)
if "json" in args.reports:
logger.info("Generating tests report")
generate_json_report(reports_output_dir)
if "cw" in args.reports:
logger.info("Publishing CloudWatch metrics")
generate_cw_report(reports_output_dir, args.cw_namespace, args.cw_region, args.cw_timestamp_day_start)
if __name__ == "__main__":
main()
|
py | b40f2cab4e01e1407bca51dcbbc1bd979e47a358 | """
Horten Wing Class Generator
N Goizueta Nov 18
"""
import numpy as np
import os
import h5py as h5
import sharpy.utils.geo_utils as geo_utils
import sharpy.utils.algebra as algebra
import configobj
import scipy.linalg as sclalg
class HortenWing:
"""
Horten Wing Class Generator
A ``HortenWing`` class contains the basic geometry and properties of a simplified Horten wing, as
described by Richards (2016)
This class allows the user to quickly obtain SHARPy cases for the purposes of parametric analyses.
"""
def __init__(self,
M,
N,
Mstarfactor,
u_inf,
rho=1.225,
alpha_deg=0.,
beta_deg=0.,
roll_deg=0.,
cs_deflection_deg=0.,
thrust=5.,
physical_time=10,
case_name_format=2,
case_remarks=None,
case_route='./cases/',
case_name='horten'):
# Discretisation
self.M = M
self.N = N
self.Mstarfactor = Mstarfactor
self.n_node_elem = 3
self.n_elem_wing = N
self.n_elem_fuselage = 1
self.n_surfaces = 4
# Case admin
if case_name_format == 0:
self.case_name = case_name + '_u_inf%.4d_a%.4d' % (int(u_inf*100), int(alpha_deg * 100))
elif case_name_format == 1:
self.case_name = case_name + '_u_inf%.4d' % int(u_inf*100)
elif case_name_format == 2:
self.case_name = case_name
else:
self.case_name = case_name + '_u_inf%.4d_%s' % (int(u_inf*100), case_remarks)
self.case_route = os.path.abspath(case_route + self.case_name + '/')
self.config = None
# Flight conditions
self.u_inf = u_inf
self.rho = rho
self.alpha = alpha_deg * np.pi / 180
self.roll = roll_deg * np.pi / 180
self.beta = beta_deg * np.pi / 180
self.cs_deflection = cs_deflection_deg * np.pi / 180
self.thrust = thrust
# Compute number of nodes
n_node = 0
self.n_node_wing = self.n_elem_wing * (self.n_node_elem - 1)
self.n_node_fuselage = self.n_elem_fuselage * self.n_node_elem
n_node += 2 * self.n_node_fuselage - 1 + 2 * self.n_node_wing
self.n_node = n_node
# Compute number of elements
self.n_elem = 2 * (self.n_elem_wing + self.n_elem_fuselage)
# Wing geometry
self.span = 20.0 # [m]
self.sweep_LE = 20 * np.pi / 180 # [rad] Leading Edge Sweep
# self.c_root = 1.0 # [m] Root chord - Richards
self.c_root = 1.5819 # [m] Root chord - Mardanpour 2014
self.taper_ratio = 0.17 # Mardanpour 2014
# self.taper_ratio = 0.25 # Richards
self.thrust_nodes = [self.n_node_fuselage - 1,
self.n_node_fuselage + self.n_node_wing + 1]
self.loc_cg = 0.45 # CG position wrt to LE (from sectional analysis)
# EA is the reference in NATASHA - defined with respect to the midchord. SHARPy is wrt to LE and as a pct of
# local chord
self.main_ea_root = 0.5 + 0.15*0.0254 / self.c_root
self.main_ea_tip = 0.5 + 0.21*0.0254 / (self.c_root*self.taper_ratio)
# FUSELAGE GEOMETRY
# self.fuselage_width = 1.
self.fuselage_width = 0.8248
self.c_fuselage = 84*0.0254
# WASH OUT
self.washout_root = 0*np.pi/180
self.washout_tip = -2 * np.pi / 180
# Horseshoe wake
self.horseshoe = False
self.wake_type = 2
self.dt_factor = 1
self.dt = 1 / self.M / self.u_inf * self.dt_factor
# Dynamics
self.physical_time = physical_time
self.n_tstep = int(physical_time/self.dt)
self.gust_intensity = 0.05
# Numerics
self.tolerance = 1e-12
self.fsi_tolerance = 1e-10
self.relaxation_factor = 0.2
# H5 Variables initialisation as class attributes
# coordinates
self.x = np.zeros((n_node,))
self.y = np.zeros((n_node,))
self.z = np.zeros((n_node,))
# beam number
self.beam_number = np.zeros(self.n_elem, dtype=int)
# frame of reference delta
self.frame_of_reference_delta = np.zeros((self.n_elem, self.n_node_elem, 3))
# connectivity of beams
self.connectivities = np.zeros((self.n_elem, self.n_node_elem), dtype=int)
# stiffness
self.n_stiffness = self.n_elem_wing + self.n_elem_fuselage
self.base_stiffness = np.zeros((self.n_stiffness, 6, 6))
self.elem_stiffness = np.zeros((self.n_elem,), dtype=int)
# mass
self.n_mass = self.n_elem_wing * 2 // 2
self.base_mass = np.zeros((self.n_mass, 6, 6))
self.elem_mass = np.zeros(self.n_elem, dtype=int)
# boundary conditions
self.boundary_conditions = np.zeros((n_node,), dtype=int)
# applied forces
self.app_forces = np.zeros((n_node, 6))
self.n_lumped_mass = 3
self.lumped_mass_nodes = np.zeros((self.n_lumped_mass), dtype=int)
self.lumped_mass = np.zeros(self.n_lumped_mass)
self.lumped_mass_inertia = np.zeros((self.n_lumped_mass, 3, 3))
self.lumped_mass_position = np.zeros((self.n_lumped_mass, 3))
# Aerodynamic properties
# H5 AERO FILE VARIABLES INITIALISATION
# airfoil distribution
self.airfoil_distribution = np.zeros((self.n_elem, self.n_node_elem), dtype=int)
# surface distribution
self.surface_distribution = np.zeros((self.n_elem,), dtype=int) - 1
self.surface_m = np.zeros((self.n_surfaces,), dtype=int)
self.m_distribution = 'uniform'
# aerodynamic nodes boolean
self.aero_nodes = np.zeros((self.n_node,), dtype=bool)
# aero twist
self.twist = np.zeros((self.n_elem, self.n_node_elem))
# chord
self.chord = np.zeros((self.n_elem, self.n_node_elem))
# elastic axis
self.elastic_axis = np.zeros((self.n_elem, self.n_node_elem))
# control surfaces attributes initialisation
self.n_control_surfaces = 1
self.control_surface = np.zeros((self.n_elem, self.n_node_elem), dtype=int) - 1
self.control_surface_type = np.zeros((self.n_control_surfaces,), dtype=int)
self.control_surface_deflection = np.zeros((self.n_control_surfaces,))
self.control_surface_chord = np.zeros((self.n_control_surfaces,), dtype=int)
self.control_surface_hinge_coord = np.zeros((self.n_control_surfaces,), dtype=float)
self.settings = dict()
def initialise(self):
if not os.path.exists(self.case_route):
os.makedirs(self.case_route)
# Compute number of nodes
n_node = 0
self.n_node_wing = self.n_elem_wing * (self.n_node_elem - 1)
self.n_node_fuselage = self.n_elem_fuselage * self.n_node_elem
n_node += 2 * self.n_node_fuselage - 1 + 2 * self.n_node_wing
self.n_node = n_node
# Compute number of elements
self.n_elem = 2 * (self.n_elem_wing + self.n_elem_fuselage)
self.dt = 1 / self.M / self.u_inf * self.dt_factor
# H5 Variables initialisation as class attributes
# coordinates
self.x = np.zeros((n_node,))
self.y = np.zeros((n_node,))
self.z = np.zeros((n_node,))
# beam number
self.beam_number = np.zeros(self.n_elem, dtype=int)
# frame of reference delta
self.frame_of_reference_delta = np.zeros((self.n_elem, self.n_node_elem, 3))
# connectivity of beams
self.connectivities = np.zeros((self.n_elem, self.n_node_elem), dtype=int)
# stiffness
self.n_stiffness = self.n_elem_wing + self.n_elem_fuselage
self.base_stiffness = np.zeros((self.n_stiffness, 6, 6))
self.elem_stiffness = np.zeros((self.n_elem,), dtype=int)
# mass
self.base_mass = np.zeros((self.n_mass, 6, 6))
self.elem_mass = np.zeros(self.n_elem, dtype=int)
# boundary conditions
self.boundary_conditions = np.zeros((n_node,), dtype=int)
# applied forces
self.app_forces = np.zeros((n_node, 6))
self.n_lumped_mass = 3
self.lumped_mass_nodes = np.zeros((self.n_lumped_mass), dtype=int)
self.lumped_mass = np.zeros(self.n_lumped_mass)
self.lumped_mass_inertia = np.zeros((self.n_lumped_mass, 3, 3))
self.lumped_mass_position = np.zeros((self.n_lumped_mass, 3))
# Aerodynamic properties
# H5 AERO FILE VARIABLES INITIALISATION
# airfoil distribution
self.airfoil_distribution = np.zeros((self.n_elem, self.n_node_elem), dtype=int)
# surface distribution
self.surface_distribution = np.zeros((self.n_elem,), dtype=int) - 1
self.surface_m = np.zeros((self.n_surfaces,), dtype=int)
self.m_distribution = 'uniform'
# aerodynamic nodes boolean
self.aero_nodes = np.zeros((self.n_node,), dtype=bool)
# aero twist
self.twist = np.zeros((self.n_elem, self.n_node_elem))
# chord
self.chord = np.zeros((self.n_elem, self.n_node_elem))
# elastic axis
self.elastic_axis = np.zeros((self.n_elem, self.n_node_elem))
# control surfaces attributes initialisation
self.n_control_surfaces = 1
self.control_surface = np.zeros((self.n_elem, self.n_node_elem), dtype=int) - 1
self.control_surface_type = np.zeros((self.n_control_surfaces,), dtype=int)
self.control_surface_deflection = np.zeros((self.n_control_surfaces,))
self.control_surface_chord = np.zeros((self.n_control_surfaces,), dtype=int)
self.control_surface_hinge_coord = np.zeros((self.n_control_surfaces,), dtype=float)
self.settings = dict()
def dynamic_control_surface(self, *delta):
"""
Generate dynamic control surface input files
Args:
delta (list): list of numpy arrays containing deflection time history
Returns:
"""
i = 0
for cs in delta:
self.control_surface_type[i] = 1
np.savetxt(self.case_route + '/' + self.case_name + '.input.txt', cs)
i += 1
def planform_area(self):
S_fus = 0.5 * (self.c_fuselage + self.c_root) * self.fuselage_width
S_wing = 0.5 * (self.c_root + self.c_root*self.taper_ratio) * self.span / 2
return 2*S_fus + 2*S_wing
def update_mass_stiffness(self, sigma=1.):
r"""
The mass and stiffness matrices are computed. Both vary over the span of the wing, hence a
dictionary is created that acts as a database of the different properties along the wing.
The variation of the stiffness is cubic along the span:
.. math:: \mathbf{K} = \mathbf{K}_0\bar{c}^3
where :math:`\mathbf{K}_0 is the stiffness of the wing root section and :math:`\bar{c}` is
the ratio between the local chord and the root chord.
The variation of the sectional mass is quadratic along the span:
.. math:: \mu = \mu_0\,\bar{c}^2
where :math:`\mu` is the mass per unit span and the zero subscript denotes the root value.
The sectional inertia is varied linearly along the span, based on the information by Mardanpour 2013.
Three lumped masses are included with the following properties (Richards, 2013)
===== ===== ================= ========= =============================== ============
No Node Relative Position Mass [kg] Inertia [kg m^2] Description
===== ===== ================= ========= =============================== ============
``0`` ``2`` ``[0,0,0]`` ``5.24`` ``[0.29547, 0.29322, 0.29547]`` Right Engine
``1`` ``S`` ``[0,0,0]`` ``5.24`` ``[0.29547, 0.29322, 0.29547]`` Left Engine
``2`` ``0`` ``[0,0,0]`` ``15.29`` ``[0.5, 1.0, 1.0]*15.29`` Fuselage
===== ===== ================= ========= =============================== ============
Args:
sigma (float): stiffening factor
Returns:
"""
n_elem_fuselage = self.n_elem_fuselage
n_elem_wing = self.n_elem_wing
n_node_wing = self.n_node_wing
n_node_fuselage = self.n_node_fuselage
c_root = self.c_root
taper_ratio = self.taper_ratio
# Local chord to root chord initialisation
c_bar_temp = np.linspace(c_root, taper_ratio * c_root, n_elem_wing)
# Structural properties at the wing root section from Richards 2016
ea = 1e6
ga = 1e6
gj = 4.24e5
eiy = 3.84e5
eiz = 2.46e7
# Sectional mass from Richards 2016
mu_0 = 9.761
# Bending inertia properties from Mardanpour 2013
j_root = 0.303
j_tip = 0.2e-2 * 4.448 / 9.81
# Number of stiffnesses used
n_stiffness = self.n_stiffness
# Initialise the stiffness database
base_stiffness = self.base_stiffness
# Wing root section stiffness properties
# stiffness_root = sigma * np.diag([ea, ga, ga, gj, eiy, eiz])
stiffness_root = np.array([[3.23624595e+09, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 1.00000000e+14, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+14,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
8.04987046e+07, -1.69971789e+05, 5.69905411e+07],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-1.69971789e+05, 5.03651190e+07, -6.70649560e+06],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
5.69905411e+07, -6.70649560e+06, 2.24864852e+09]]) * sigma
stiffness_tip = np.array([[5.86034256e+07, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 1.00000000e+15, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+15,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
2.93227100e+04, -2.11280834e+03, 2.52782426e+04],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.11280834e+03, 1.44883639e+05, -2.52913470e+04],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
2.52782426e+04, -2.52913470e+04, 3.02592902e+05]]) * sigma
stiffness_root = np.diag(np.diag(stiffness_root))
stiffness_tip = np.diag(np.diag(stiffness_tip))
# Rigid fuselage
sigma_fuselage = 1000
base_stiffness[0, :, :] = sigma_fuselage * stiffness_root
# Cubic variation of the stiffness along the span - Richards
# for stiff_entry in range(1, n_stiffness):
# base_stiffness[stiff_entry, :, :] = stiffness_root * c_bar_temp[stiff_entry - 1] ** 3
# Linear variation between root an tip as per Mardanpour 2014
alpha = np.linspace(0, 1, self.n_elem_wing)
for i_elem in range(0, self.n_elem_wing):
base_stiffness[i_elem + 1, :, :] = stiffness_root*(1-alpha[i_elem]) + stiffness_tip*alpha[i_elem]
# Mass variation along the span
n_mass = self.n_mass
sigma_mass = 1
# Initialise database
base_mass = self.base_mass
mu_root = 2.784472 * sigma_mass
chi_root_right = np.array([0, -5.29, 0.594]) * 0.0254 * 0
chi_root_left = np.array([0, +5.29, 0.594]) * 0.0254 * 0
m_root = np.eye(3) * mu_root
j_root = np.array([[0.30378797, 0., 0.],
[0., 0.0122422, 0.],
[0., 0., 0.30016065]])
j_root = np.diag([0.1, 0.1, 0.2])
mass_root_right = sclalg.block_diag(np.diag([mu_root, mu_root, mu_root]), j_root)
mass_root_left = sclalg.block_diag(np.diag([mu_root, mu_root, mu_root]), j_root)
mass_root_right[:3, -3:] = -mu_root * algebra.skew(chi_root_right)
mass_root_right[-3:, :3] = mu_root * algebra.skew(chi_root_right)
mass_root_left[:3, -3:] = -mu_root * algebra.skew(chi_root_left)
mass_root_left[-3:, :3] = mu_root * algebra.skew(chi_root_left)
mu_tip = 0.284084 * sigma_mass
chi_tip_right = np.array([0, -1.644, +0.563]) * 0.0254 * 0
chi_tip_left = np.array([0, +1.644, +0.563]) * 0.0254 * 0
j_tip = np.array([[9.06829766e-04, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 6.34780836e-05, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 8.16146789e-04]])
j_tip = np.diag([0.1, 0.1, 0.2])
mass_tip_right = sclalg.block_diag(np.diag([mu_tip, mu_tip, mu_tip]), j_tip)
mass_tip_left = sclalg.block_diag(np.diag([mu_tip, mu_tip, mu_tip]), j_tip)
mass_tip_right[:3, -3:] += -algebra.skew(chi_tip_right) * mu_tip
mass_tip_right[-3:, :3] += algebra.skew(chi_tip_right) * mu_tip
mass_tip_left[:3, -3:] += -algebra.skew(chi_tip_right) * mu_tip
mass_tip_left[-3:, :3] += algebra.skew(chi_tip_right) * mu_tip
mass_tip_left = mass_tip_right
mass_root_left = mass_root_right
for i_elem in range(self.n_elem_wing):
base_mass[i_elem, :, :] = mass_root_right*(1-alpha[i_elem]) + mass_tip_right*alpha[i_elem]
# for i_elem in range(self.n_elem_wing, 2*self.n_elem_wing):
# base_mass[i_elem, :, :] = mass_root_left*(1-alpha[i_elem-self.n_elem_wing]) + mass_tip_left*alpha[i_elem-self.n_elem_wing]
# Quadratic variation in the mass per unit span - Richards
# mu_temp = mu_0 * np.ones_like(c_bar_temp)
# j_temp = np.linspace(j_root, j_tip, n_elem_wing)
# for mass_entry in range(n_mass):
# base_mass[mass_entry, :, :] = np.diag([
# mu_temp[mass_entry],
# mu_temp[mass_entry],
# mu_temp[mass_entry],
# j_temp[mass_entry],
# j_temp[mass_entry],
# j_temp[mass_entry]
# ])
# Lumped mass initialisation
# 0 - Right engine
# 1 - Left engine
# 2 - Fuselage
lumped_mass_nodes = self.lumped_mass_nodes
lumped_mass = self.lumped_mass
lumped_mass_inertia = self.lumped_mass_inertia
lumped_mass_position = self.lumped_mass_position
# Lumped masses nodal position
lumped_mass_nodes[0] = 2
lumped_mass_nodes[1] = n_node_fuselage + n_node_wing + 1
lumped_mass_nodes[2] = 0
# Lumped mass value from Richards 2013
# lumped_mass[0:2] = 51.445 / 9.81
# lumped_mass[2] = 150 / 9.81
# Lumped masses from Mardanpour
# Pilot mass
lumped_mass[2] = 16.06
lumped_mass_position[2] = np.array([0., -0.254, -0.254])
# Engine mass
lumped_mass[0:2] = 0.535
lumped_mass_inertia[0, :, :] = np.diag([0.02994352, 0.02994352, 0.02994352])
lumped_mass_inertia[1, :, :] = np.diag([0.02994352, 0.02994352, 0.02994352])
# Lumped mass inertia
# lumped_mass_inertia[0, :, :] = np.diag([0.29547, 0.29322, 0.29547])
# lumped_mass_inertia[1, :, :] = np.diag([0.29547, 0.29322, 0.29547])
# lumped_mass_inertia[2, :, :] = np.diag([0.5, 1, 1]) * lumped_mass[2]
# Define class attributes
self.lumped_mass = lumped_mass * 0
self.lumped_mass_nodes = lumped_mass_nodes * 0
self.lumped_mass_inertia = lumped_mass_inertia * 0
self.lumped_mass_position = lumped_mass_position * 0
self.base_stiffness = base_stiffness
self.base_mass = base_mass
def update_fem_prop(self):
"""
Computes the FEM properties prior to analysis such as the connectivity matrix, coordinates, etc
Returns:
"""
# Obtain class attributes
n_node_elem = self.n_node_elem
n_elem = self.n_elem
n_elem_wing = self.n_elem_wing
n_elem_fuselage = self.n_elem_fuselage
n_node = self.n_node
n_node_wing = self.n_node_wing
n_node_fuselage = self.n_node_fuselage
fuselage_width = self.fuselage_width
thrust = self.thrust
thrust_nodes = self.thrust_nodes
span = self.span
sweep_LE = self.sweep_LE
# mass and stiffness matrices
stiffness = self.base_stiffness
mass = self.base_mass
n_stiffness = stiffness.shape[0]
n_mass = mass.shape[0]
# H5 FEM FILE VARIABLES INITIALISATION
# coordinates
x = np.zeros((n_node,))
y = np.zeros((n_node,))
z = np.zeros((n_node,))
# twist
structural_twist = np.zeros_like(x)
# beam number
beam_number = np.zeros(n_elem, dtype=int)
# frame of reference delta
frame_of_reference_delta = np.zeros((n_elem, n_node_elem, 3))
# connectivity of beams
conn = np.zeros((n_elem, n_node_elem), dtype=int)
# stiffness
stiffness = np.zeros((n_stiffness, 6, 6))
elem_stiffness = np.zeros((n_elem,), dtype=int)
# mass
mass = np.zeros((n_mass, 6, 6))
elem_mass = np.zeros(n_elem, dtype=int)
# boundary conditions
boundary_conditions = np.zeros((n_node,), dtype=int)
# applied forces
app_forces = np.zeros((n_node, 6))
# assemble connectivites
# worked elements
we = 0
# worked nodes
wn = 0
# RIGHT RIGID FUSELAGE
beam_number[we:we + 1] = 0
# coordinates
x[wn:wn + n_node_fuselage] = 0
y[wn:wn + n_node_fuselage] = np.linspace(0, fuselage_width / 2, n_node_fuselage)
# connectivities
elem_mass[0] = 0
conn[we, :] = [0, 2, 1]
# frame of reference change
frame_of_reference_delta[0, 0, :] = [-1.0, 0.0, 0.0]
frame_of_reference_delta[0, 1, :] = [-1.0, 0.0, 0.0]
frame_of_reference_delta[0, 2, :] = [-1.0, 0.0, 0.0]
# element stiffness
elem_stiffness[0] = 0
elem_mass[0] = 0
# boundary conditions
boundary_conditions[0] = 1
# applied forces - engine 1
app_forces[thrust_nodes[0]] = [0, thrust, 0,
0, 0, 0]
# updated worked nodes and elements
we += n_elem_fuselage
wn += n_node_fuselage
# RIGHT WING
beam_number[we:we + n_elem_wing] = 1
# y coordinate (positive right)
y[wn:wn + n_node_wing] = np.linspace(fuselage_width / 2,
span / 2,
n_node_wing + 1)[1:]
x[wn:wn + n_node_wing] = 0 + (y[wn:wn + n_node_wing] - fuselage_width / 2) * np.tan(sweep_LE)
# connectivities
for ielem in range(n_elem_wing):
conn[we + ielem, :] = (np.ones(n_node_elem) * (we + ielem) * (n_node_elem - 1) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
elem_mass[we + ielem] = ielem
elem_stiffness[we + ielem] = ielem + 1
# element stiffness and mass
# elem_stiffness[we:we+n_elem_wing] = 0
# elem_mass[we:we+n_elem_wing] = 0
# boundary conditions of free end
boundary_conditions[wn + n_node_wing - 1] = -1
# update worked elements and nodes
we += n_elem_wing
wn += n_node_wing
# LEFT FUSELAGE
beam_number[we:we + n_elem_fuselage] = 2
# coordinates
y[wn:wn + n_node_fuselage - 1] = np.linspace(0,
-fuselage_width / 2,
n_node_fuselage)[1:]
x[wn:wn + n_node_fuselage - 1] = 0
# connectivity
conn[we, :] = [0, wn + 1, wn]
# frame of reference delta
for ielem in range(n_elem_fuselage):
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]
# element stiffness and mass
elem_stiffness[we:we + n_elem_fuselage] = 0
elem_mass[we:we + n_elem_fuselage] = self.n_elem_wing
# applied forces - engine 2
app_forces[thrust_nodes[1]] = [0, -thrust, 0,
0, 0, 0]
# update worked elements and nodes
we += n_elem_fuselage
wn += n_node_fuselage - 1
# LEFT WING
# coordinates
beam_number[we:we + n_elem_wing] = 3
y[wn:wn + n_node_wing] = np.linspace(-fuselage_width / 2,
-span / 2,
n_node_wing + 1)[1:]
x[wn:wn + n_node_wing] = 0 + -1 * (y[wn:wn + n_node_wing] + fuselage_width / 2) * np.tan(sweep_LE)
# left wing connectivities
for ielem in range(n_elem_wing):
conn[we + ielem, :] = np.ones(n_node_elem) * (we + ielem) * (n_node_elem - 1) + [0, 2, 1]
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]
elem_mass[we + ielem] = ielem + self.n_elem_wing
elem_stiffness[we + ielem] = ielem + 1
# element stiffness and mass
# elem_stiffness[we:we+n_node_wing] = 0
# boundary conditions at the free end
boundary_conditions[wn + n_node_wing - 1] = -1
# update worked elements and nodes
we += n_elem_wing
wn += n_node_wing
# set attributes
self.x = x
self.y = y
self.z = z
self.connectivities = conn
self.elem_stiffness = elem_stiffness
self.elem_mass = elem_mass
self.frame_of_reference_delta = frame_of_reference_delta
self.boundary_conditions = boundary_conditions
self.beam_number = beam_number
self.app_forces = app_forces
def generate_fem_file(self):
"""
Generates the ``.fem.h5`` folder containing the structural information of the problem
The file is written to ``self.case_route / self.case_name .fem.h5``
"""
with h5.File(self.case_route + '/' + self.case_name + '.fem.h5', 'a') as h5file:
coordinates = h5file.create_dataset('coordinates',
data=np.column_stack((self.x, self.y, self.z)))
connectivities = h5file.create_dataset('connectivities', data=self.connectivities)
num_nodes_elem_handle = h5file.create_dataset(
'num_node_elem', data=self.n_node_elem)
num_nodes_handle = h5file.create_dataset(
'num_node', data=self.n_node)
num_elem_handle = h5file.create_dataset(
'num_elem', data=self.n_elem)
stiffness_db_handle = h5file.create_dataset(
'stiffness_db', data=self.base_stiffness)
stiffness_handle = h5file.create_dataset(
'elem_stiffness', data=self.elem_stiffness)
mass_db_handle = h5file.create_dataset(
'mass_db', data=self.base_mass)
mass_handle = h5file.create_dataset(
'elem_mass', data=self.elem_mass)
frame_of_reference_delta_handle = h5file.create_dataset(
'frame_of_reference_delta', data=self.frame_of_reference_delta)
structural_twist_handle = h5file.create_dataset(
'structural_twist', data=np.zeros((self.n_elem, self.n_node_elem)))
bocos_handle = h5file.create_dataset(
'boundary_conditions', data=self.boundary_conditions)
beam_handle = h5file.create_dataset(
'beam_number', data=self.beam_number)
app_forces_handle = h5file.create_dataset(
'app_forces', data=self.app_forces)
lumped_mass_nodes_handle = h5file.create_dataset(
'lumped_mass_nodes', data=self.lumped_mass_nodes)
lumped_mass_handle = h5file.create_dataset(
'lumped_mass', data=self.lumped_mass)
lumped_mass_inertia_handle = h5file.create_dataset(
'lumped_mass_inertia', data=self.lumped_mass_inertia)
lumped_mass_position_handle = h5file.create_dataset(
'lumped_mass_position', data=self.lumped_mass_position)
def create_linear_simulation(self, delta_e=None, delta_dot=None):
Kpanels = self.M * (self.n_node - 1)
Kvertices = (self.M + 1) * self.n_node
Kpanels_wake = Kpanels * self.Mstarfactor
n_states_aero = 3 * Kpanels + Kpanels_wake
# n_inputs_aero = 2 * 3 * Kvertices
n_states_struct = 2*(6 * (self.n_node - 1) + 9)
n_inputs_struct = n_states_struct // 2
x0 = np.zeros((n_states_aero + n_states_struct))
# x0[-7] = 0.05
# x0[-4:] = (algebra.euler2quat([ -5*np.pi/180, 0, 0]))
u = np.zeros((self.n_tstep, n_states_struct + n_inputs_struct + 2 * self.n_control_surfaces))
# u[0:3, -7] = -1000
if delta_e is not None:
u[:, n_states_struct:n_states_struct+self.n_control_surfaces] = delta_e
u[:, n_states_struct + self.n_control_surfaces:n_states_struct+self.n_control_surfaces + self.n_control_surfaces] = delta_dot
# u[10:15, -8] = 100
self.generate_linear_sim_files(x0, u)
def generate_linear_sim_files(self, x0, input_vec):
if not os.path.exists(self.case_route):
os.makedirs(self.case_route)
with h5.File(self.case_route + '/' + self.case_name + '.lininput.h5', 'a') as h5file:
x0 = h5file.create_dataset(
'x0', data=x0)
u = h5file.create_dataset(
'u', data=input_vec)
def clean_test_files(self):
"""
Clears previously generated files
"""
case_name = self.case_name
route = self.case_route
# FEM
fem_file_name = route + '/' + case_name + '.fem.h5'
if os.path.isfile(fem_file_name):
os.remove(fem_file_name)
# Dynamics File
dyn_file_name = route + '/' + case_name + '.dyn.h5'
if os.path.isfile(dyn_file_name):
os.remove(dyn_file_name)
# Aerodynamics File
aero_file_name = route + '/' + case_name + '.aero.h5'
if os.path.isfile(aero_file_name):
os.remove(aero_file_name)
# Solver file
solver_file_name = route + '/' + case_name + '.sharpy'
if os.path.isfile(solver_file_name):
os.remove(solver_file_name)
# Flight conditions file
flightcon_file_name = route + '/' + case_name + '.flightcon.txt'
if os.path.isfile(flightcon_file_name):
os.remove(flightcon_file_name)
# Linear inputs file
lin_file_name = self.case_route + '/' + self.case_name + '.lininput.h5'
if os.path.isfile(lin_file_name):
os.remove(lin_file_name)
# if os.path.isdir(route):
# os.system('rm -r %s' %route)
def update_aero_properties(self):
"""
Updates the aerodynamic properties of the horten wing
"""
# Retrieve attributes
n_elem = self.n_elem
n_node_elem = self.n_node_elem
n_node_wing = self.n_node_wing
n_node_fuselage = self.n_node_fuselage
n_elem_fuselage = self.n_elem_fuselage
n_elem_wing = self.n_elem_wing
c_root = self.c_root
taper_ratio = self.taper_ratio
washout_root = self.washout_root
washout_tip = self.washout_tip
n_control_surfaces = self.n_control_surfaces
cs_deflection = self.cs_deflection
m = self.M
main_ea_root = self.main_ea_root
main_ea_tip = self.main_ea_tip
airfoil_distribution = self.airfoil_distribution
chord = self.chord
surface_distribution = self.surface_distribution
surface_m = self.surface_m
aero_nodes = self.aero_nodes
elastic_axis = self.elastic_axis
twist = self.twist
control_surface = self.control_surface
control_surface_type = self.control_surface_type
control_surface_deflection = self.control_surface_deflection
control_surface_chord = self.control_surface_chord
control_surface_hinge_coord = self.control_surface_hinge_coord
self.dt = 1 / self.M / self.u_inf
# control surface type: 0 = static
# control surface type: 1 = dynamic
control_surface_type[0] = self.control_surface_type[0]
control_surface_deflection[0] = cs_deflection
control_surface_chord[0] = 2 # m
control_surface_hinge_coord[0] = 0.25
# RIGHT FUSELAGE (Surface 0, Beam 0)
we = 0
wn = 0
i_surf = 0
airfoil_distribution[we:we + n_elem_fuselage] = 0
surface_distribution[we:we + n_elem_fuselage] = i_surf
surface_m[i_surf] = m
aero_nodes[wn:wn + n_node_fuselage] = True
temp_chord = np.linspace(self.c_fuselage, self.c_root, self.n_node_fuselage)
temp_washout = washout_root
# apply chord and elastic axis at each node
node_counter = 0
for ielem in range(we, we + n_elem_fuselage):
for i_local_node in range(n_node_elem):
if not i_local_node == 0:
node_counter += 1
chord[ielem, i_local_node] = temp_chord[node_counter]
elastic_axis[ielem, i_local_node] = main_ea_root
twist[ielem, i_local_node] = -temp_washout
we += n_elem_fuselage
wn += n_node_fuselage
# RIGHT WING (Surface 1, Beam 1)
# surface_id
i_surf = 1
airfoil_distribution[we:we + n_elem_wing, :] = 0
surface_distribution[we:we + n_elem_wing] = i_surf
surface_m[i_surf] = m
# specify aerodynamic characteristics of wing nodes
aero_nodes[wn:wn + n_node_wing - 1] = True
# linear taper initialisation
temp_chord = np.linspace(c_root, taper_ratio * c_root, n_node_wing + 1)
# linear wash out initialisation
temp_washout = np.linspace(washout_root, washout_tip, n_node_wing + 1)
# elastic axis variation
temp_ea = np.linspace(main_ea_root, main_ea_tip, n_node_wing + 1)
# apply chord and elastic axis at each node
node_counter = 0
for ielem in range(we, we + n_elem_wing):
for i_local_node in range(n_node_elem):
if not i_local_node == 0:
node_counter += 1
chord[ielem, i_local_node] = temp_chord[node_counter]
elastic_axis[ielem, i_local_node] = temp_ea[node_counter]
twist[ielem, i_local_node] = -temp_washout[node_counter]
if ielem >= round(((we + n_elem_wing) / 2)):
control_surface[ielem, i_local_node] = 0
# update working element and node
we += n_elem_wing
wn += n_node_wing - 1
# LEFT FUSELAGE (Surface 2, Beam 2)
i_surf = 2
airfoil_distribution[we:we + n_elem_fuselage] = 0
surface_distribution[we:we + n_elem_fuselage] = i_surf
surface_m[i_surf] = m
aero_nodes[wn:wn + n_node_fuselage] = True
temp_chord = np.linspace(self.c_fuselage, self.c_root, self.n_node_fuselage)
temp_washout = washout_root
# apply chord and elastic axis at each node
node_counter = 0
for ielem in range(we, we + n_elem_fuselage):
for i_local_node in range(n_node_elem):
if not i_local_node == 0:
node_counter += 1
chord[ielem, i_local_node] = temp_chord[node_counter]
elastic_axis[ielem, i_local_node] = main_ea_root
twist[ielem, i_local_node] = -temp_washout
we += n_elem_fuselage
wn += n_node_fuselage
# LEFT WING (Surface 3, Beam 3)
i_surf = 3
airfoil_distribution[we:we + n_elem_wing, :] = 0
surface_distribution[we: we + n_elem_wing] = i_surf
surface_m[i_surf] = m
# linear taper initialisation
temp_chord = np.linspace(c_root, taper_ratio * c_root, n_node_wing + 1)
# linear wash out initialisation
temp_washout = np.linspace(washout_root, washout_tip, n_node_wing + 1)
# specify aerodynamic characterisics of wing nodes
aero_nodes[wn:wn + n_node_wing] = True
# linear taper initialisation
# apply chord and elastic axis at each node
node_counter = 0
for ielem in range(we, we + n_elem_wing):
for i_local_node in range(n_node_elem):
if not i_local_node == 0:
node_counter += 1
chord[ielem, i_local_node] = temp_chord[node_counter]
elastic_axis[ielem, i_local_node] = temp_ea[node_counter]
twist[ielem, i_local_node] = -temp_washout[node_counter]
if ielem >= round((we + n_elem_wing / 2)):
control_surface[ielem, i_local_node] = 0
# update working element and node
we += n_elem_wing
wn += n_node_wing
# end node is the middle node
mid_chord = np.array(chord[:, 1], copy=True)
chord[:, 1] = chord[:, 2]
chord[:, 2] = mid_chord
mid_ea = np.array(elastic_axis[:, 1], copy=True)
elastic_axis[:, 1] = elastic_axis[:, 2]
elastic_axis[:, 2] = mid_ea
# Update aerodynamic attributes of class
self.chord = chord
self.twist = twist
self.aero_nodes = aero_nodes
self.elastic_axis = elastic_axis
self.control_surface = control_surface
def generate_aero_file(self, route=None, case_name=None):
"""
Generates the ``.aero.h5`` file with the aerodynamic properties of the wing
Args:
route (str): route to write case file. If None is specified the default will be used
case_name (str): name of file. If None is specified the default will be used
"""
if not route:
route = self.case_route
if not case_name:
case_name = self.case_name
if not os.path.isdir(self.case_route):
os.makedirs(self.case_route)
chord = self.chord
twist = self.twist
airfoil_distribution = self.airfoil_distribution
surface_distribution = self.surface_distribution
surface_m = self.surface_m
m_distribution = self.m_distribution
aero_nodes = self.aero_nodes
elastic_axis = self.elastic_axis
control_surface = self.control_surface
control_surface_deflection = self.control_surface_deflection
control_surface_chord = self.control_surface_chord
control_surface_hinge_coord = self.control_surface_hinge_coord
control_surface_type = self.control_surface_type
control_surface_deflection[0] = self.cs_deflection
with h5.File(route + '/' + case_name + '.aero.h5', 'a') as h5file:
airfoils_group = h5file.create_group('airfoils')
# add one airfoil
naca_airfoil_main = airfoils_group.create_dataset('0', data=np.column_stack(
geo_utils.generate_naca_camber(P=0, M=0)))
naca_airfoil_tail = airfoils_group.create_dataset('1', data=np.column_stack(
geo_utils.generate_naca_camber(P=0, M=0)))
naca_airfoil_fin = airfoils_group.create_dataset('2', data=np.column_stack(
geo_utils.generate_naca_camber(P=0, M=0)))
# chord
chord_input = h5file.create_dataset('chord', data=chord)
dim_attr = chord_input.attrs['units'] = 'm'
# twist
twist_input = h5file.create_dataset('twist', data=twist)
dim_attr = twist_input.attrs['units'] = 'rad'
# airfoil distribution
airfoil_distribution_input = h5file.create_dataset('airfoil_distribution', data=airfoil_distribution)
surface_distribution_input = h5file.create_dataset('surface_distribution', data=surface_distribution)
surface_m_input = h5file.create_dataset('surface_m', data=surface_m)
m_distribution_input = h5file.create_dataset('m_distribution',
data=m_distribution.encode('ascii', 'ignore'))
aero_node_input = h5file.create_dataset('aero_node', data=aero_nodes)
elastic_axis_input = h5file.create_dataset('elastic_axis', data=elastic_axis)
control_surface_input = h5file.create_dataset('control_surface', data=control_surface)
control_surface_deflection_input = h5file.create_dataset('control_surface_deflection',
data=control_surface_deflection)
control_surface_chord_input = h5file.create_dataset('control_surface_chord', data=control_surface_chord)
control_surface_hinge_coord_input = h5file.create_dataset('control_surface_hinge_coord',
data=control_surface_hinge_coord)
control_surface_types_input = h5file.create_dataset('control_surface_type', data=control_surface_type)
def set_default_config_dict(self, route=None, case_name=None):
"""
Generates default solver configuration file
Returns:
"""
if not route:
route = self.case_route
if not case_name:
case_name = self.case_name
u_inf = self.u_inf
rho = self.rho
dt = self.dt
tolerance = self.tolerance
alpha = self.alpha
beta = self.beta
thrust = self.thrust
thrust_nodes = self.thrust_nodes
cs_deflection = self.cs_deflection
fsi_tolerance = self.fsi_tolerance
n_tstep = self.n_tstep
gust_intensity = self.gust_intensity
relaxation_factor = self.relaxation_factor
file_name = route + '/' + case_name + '.sharpy'
settings = dict()
settings['SHARPy'] = {'case': case_name,
'route': route,
'flow': ['BeamLoader',
'AerogridLoader',
'StaticCoupled',
'Modal',
'AerogridPlot',
'BeamPlot',
'SaveData'],
'write_screen': 'on',
'write_log': 'on',
'log_folder': route + '/output/' + case_name + '/',
'log_file': case_name + '.log'}
settings['BeamLoader'] = {'unsteady': 'off',
'orientation': algebra.euler2quat(np.array([self.roll,
self.alpha,
self.beta]))}
settings['StaticUvlm'] = {'print_info': 'on',
'horseshoe': self.horseshoe,
'num_cores': 4,
'n_rollup': 1,
'rollup_dt': dt,
'rollup_aic_refresh': 1,
'rollup_tolerance': 1e-4,
'velocity_field_generator': 'SteadyVelocityField',
'velocity_field_input': {'u_inf': u_inf,
'u_inf_direction': [1., 0, 0]},
'rho': rho}
settings['StaticCoupled'] = {'print_info': 'on',
'structural_solver': 'NonLinearStatic',
'structural_solver_settings': {'print_info': 'off',
'max_iterations': 200,
'num_load_steps': 1,
'delta_curved': 1e-5,
'min_delta': tolerance,
'gravity_on': 'on',
'gravity': 9.81},
'aero_solver': 'StaticUvlm',
'aero_solver_settings': {'print_info': 'on',
'horseshoe': self.horseshoe,
'num_cores': 4,
'n_rollup': int(0),
'rollup_dt': dt, #self.c_root / self.M / self.u_inf,
'rollup_aic_refresh': 1,
'rollup_tolerance': 1e-4,
'velocity_field_generator': 'SteadyVelocityField',
'velocity_field_input': {'u_inf': u_inf,
'u_inf_direction': [1., 0, 0]},
'rho': rho},
'max_iter': 200,
'n_load_steps': 1,
'tolerance': tolerance,
'relaxation_factor': 0.2}
if self.horseshoe is True:
settings['AerogridLoader'] = {'unsteady': 'off',
'aligned_grid': 'on',
'mstar': 1,
'freestream_dir': ['1', '0', '0'],
'control_surface_deflection': ['']}
else:
settings['AerogridLoader'] = {'unsteady': 'off',
'aligned_grid': 'on',
'mstar': int(self.M * self.Mstarfactor),
'freestream_dir': ['1', '0', '0'],
'control_surface_deflection': ['']}
settings['NonLinearStatic'] = {'print_info': 'off',
'max_iterations': 150,
'num_load_steps': 1,
'delta_curved': 1e-8,
'min_delta': tolerance,
'gravity_on': True,
'gravity': 9.81}
settings['StaticTrim'] = {'solver': 'StaticCoupled',
'solver_settings': settings['StaticCoupled'],
'thrust_nodes': thrust_nodes,
'initial_alpha': alpha,
'initial_deflection': cs_deflection,
'initial_thrust': thrust,
'max_iter': 200,
'fz_tolerance': 1e-2,
'fx_tolerance': 1e-2,
'm_tolerance': 1e-2}
settings['Trim'] = {'solver': 'StaticCoupled',
'solver_settings': settings['StaticCoupled'],
'initial_alpha': alpha,
'initial_beta': beta,
'cs_indices': [0],
'initial_cs_deflection': [cs_deflection],
'thrust_nodes': thrust_nodes,
'initial_thrust': [thrust, thrust]}
settings['NonLinearDynamicCoupledStep'] = {'print_info': 'off',
'initial_velocity_direction': [-1., 0., 0.],
'max_iterations': 950,
'delta_curved': 1e-6,
'min_delta': tolerance,
'newmark_damp': 5e-3,
'gravity_on': True,
'gravity': 9.81,
'num_steps': n_tstep,
'dt': dt,
'initial_velocity': u_inf * 1}
settings['NonLinearDynamicPrescribedStep'] = {'print_info': 'on',
'initial_velocity_direction': [-1., 0., 0.],
'max_iterations': 950,
'delta_curved': 1e-6,
'min_delta': self.tolerance,
'newmark_damp': 5e-3,
'gravity_on': True,
'gravity': 9.81,
'num_steps': self.n_tstep,
'dt': self.dt}
settings['StepLinearUVLM'] = {'dt': self.dt,
'integr_order': 1,
'remove_predictor': True,
'use_sparse': True,
'velocity_field_generator': 'GustVelocityField',
'velocity_field_input': {'u_inf': u_inf,
'u_inf_direction': [1., 0., 0.],
'gust_shape': '1-cos',
'gust_length': 1.,
'gust_intensity': self.gust_intensity * u_inf,
'offset': 30.,
'span': self.span}}
settings['StepUvlm'] = {'print_info': 'on',
'horseshoe': self.horseshoe,
'num_cores': 4,
'n_rollup': 1,
'convection_scheme': self.wake_type,
'rollup_dt': dt,
'rollup_aic_refresh': 1,
'rollup_tolerance': 1e-4,
'velocity_field_generator': 'GustVelocityField',
'velocity_field_input': {'u_inf': u_inf * 0,
'u_inf_direction': [1., 0, 0],
'gust_shape': '1-cos',
'gust_length': 5.,
'gust_intensity': gust_intensity * u_inf,
'offset': 15.0,
'span': self.span,
'relative_motion': True},
# 'velocity_field_generator': 'SteadyVelocityField',
# 'velocity_field_input': {'u_inf': u_inf*1,
# 'u_inf_direction': [1., 0., 0.]},
'rho': rho,
'n_time_steps': n_tstep,
'dt': dt,
'gamma_dot_filtering': 3}
settings['DynamicCoupled'] = {'print_info': 'on',
# 'structural_substeps': 1,
# 'dynamic_relaxation': 'on',
# 'clean_up_previous_solution': 'on',
'structural_solver': 'NonLinearDynamicCoupledStep',
'structural_solver_settings': settings['NonLinearDynamicCoupledStep'],
'aero_solver': 'StepUvlm',
'aero_solver_settings': settings['StepUvlm'],
'fsi_substeps': 200,
'fsi_tolerance': fsi_tolerance,
'relaxation_factor': relaxation_factor,
'minimum_steps': 1,
'relaxation_steps': 150,
'final_relaxation_factor': 0.5,
'n_time_steps': n_tstep,
'dt': dt,
'include_unsteady_force_contribution': 'off',
'postprocessors': ['BeamLoads', 'StallCheck', 'BeamPlot', 'AerogridPlot'],
'postprocessors_settings': {'BeamLoads': {'folder': './output/',
'csv_output': 'off'},
'StallCheck': {'output_degrees': True,
'stall_angles': {
'0': [-12 * np.pi / 180,
6 * np.pi / 180],
'1': [-12 * np.pi / 180,
6 * np.pi / 180],
'2': [-12 * np.pi / 180,
6 * np.pi / 180]}},
'BeamPlot': {'folder': route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on'},
'AerogridPlot': {
'u_inf': u_inf,
'folder': './output/',
'include_rbm': 'on',
'include_applied_forces': 'on',
'minus_m_star': 0},
# 'WriteVariablesTime': {
# # 'delimeter': ',',
# # 'structure_nodes': [0],
# # 'structure_variables': ['Z']
# # settings['WriteVariablesTime'] = {'delimiter': ' ',
# 'FoR_variables': ['GFoR_pos', 'GFoR_vel', 'GFoR_acc'],
# 'FoR_number': [],
# 'structure_variables': ['AFoR_steady_forces', 'AFoR_unsteady_forces','AFoR_position'],
# 'structure_nodes': [0,-1],
# 'aero_panels_variables': ['gamma', 'gamma_dot'],
# 'aero_panels_isurf': [0,1,2],
# 'aero_panels_im': [1,1,1],
# 'aero_panels_in': [-2,-2,-2],
# 'aero_nodes_variables': ['GFoR_steady_force', 'GFoR_unsteady_force'],
# 'aero_nodes_isurf': [0,1,2],
# 'aero_nodes_im': [1,1,1],
# 'aero_nodes_in': [-2,-2,-2]
# }}}
}}
settings['Modal'] = {'print_info': True,
'use_undamped_modes': True,
'NumLambda': 30,
'rigid_body_modes': True,
'write_modes_vtk': 'on',
'print_matrices': 'on',
'write_data': 'on',
'continuous_eigenvalues': 'off',
'dt': dt,
'plot_eigenvalues': False}
settings['LinearAssembler'] = {'linear_system': 'LinearAeroelastic',
'linear_system_settings': {
'beam_settings': {'modal_projection': False,
'inout_coords': 'nodes',
'discrete_time': True,
'newmark_damp': 0.5,
'discr_method': 'newmark',
'dt': dt,
'proj_modes': 'undamped',
'use_euler': 'off',
'num_modes': 40,
'print_info': 'on',
'gravity': 'on',
'remove_dofs': []},
'aero_settings': {'dt': dt,
'integr_order': 2,
'density': rho,
'remove_predictor': False,
'use_sparse': True,
'rigid_body_motion': True,
'use_euler': False,
'remove_inputs': ['u_gust']},
'rigid_body_motion': True}}
settings['AsymptoticStability'] = {'sys_id': 'LinearAeroelastic',
'print_info': 'on',
'display_root_locus':'off',
'frequency_cutoff': 0,
'export_eigenvalues': 'on',
'num_evals':100,
'folder': './output/'}
settings['LinDynamicSim'] = {'dt': dt,
'n_tsteps': self.n_tstep,
'sys_id': 'LinearAeroelastic',
'postprocessors': ['BeamPlot', 'AerogridPlot'],
'postprocessors_settings': {'AerogridPlot': {
'u_inf': u_inf,
'folder': './output',
'include_rbm': 'on',
'include_applied_forces': 'on',
'minus_m_star': 0},
'BeamPlot': {'folder': './output/',
'include_rbm': 'on',
'include_applied_forces': 'on'}}}
settings['AerogridPlot'] = {'folder': './output/',
'include_rbm': 'off',
'include_applied_forces': 'on',
'minus_m_star': 0,
'u_inf': u_inf
}
settings['AeroForcesCalculator'] = {'folder': './output/',
'write_text_file': 'off',
'text_file_name': case_name + '_aeroforces.csv',
'screen_output': 'on',
'unsteady': 'off',
'coefficients': True,
'q_ref': 0.5 * rho * u_inf ** 2,
'S_ref': self.planform_area()
}
settings['BeamPlot'] = {'folder': './output/',
'include_rbm': 'on',
'include_applied_forces': 'on',
'include_FoR': 'on'}
settings['BeamLoads'] = {'folder': './output/',
'csv_output': 'off'}
settings['SaveData'] = {'folder': './output/',
'save_aero': 'on',
'save_structure': 'on',
'save_linear': 'off'}
settings['StabilityDerivatives'] = {'u_inf': self.u_inf,
'S_ref': 12.809,
'b_ref': self.span,
'c_ref': 0.719}
config = configobj.ConfigObj()
config.filename = file_name
for k, v in settings.items():
config[k] = v
config.write()
self.settings = settings
self.config = config
if __name__=='__main__':
ws = HortenWing(M=4,
N=11,
Mstarfactor=5,
u_inf=28,
rho=1.225,
alpha_deg=4)
|
py | b40f2cbe8c88dbbf25f622147cf0ea455849957e | # amt_bowl, amt_size, amt_stack = int(input()), list(()), 0
# for each_bowl in range(amt_bowl): amt_size.append(int(input()))
# amt_max = sorted(amt_size)[-1]
# def new_stack():
# global amt_size, amt_stack
# amt_size.sort()
# amt_min = 0
# for each_bowl in amt_size.copy():
# if each_bowl > amt_min:
# amt_size.remove(each_bowl)
# amt_min = each_bowl
# amt_stack += 1
# if len(amt_size) > 0: new_stack()
# new_stack()
# print(amt_stack)
# Passed
# 60% (T สี่กรณี)
# amt_bowl, amt_size, amt_max = int(input()), list(()), 1
# for each_bowl in range(amt_bowl): amt_size.append(int(input()))
# last_bowl, last_stack = sorted(amt_size)[0], 1
# for each_size in sorted(amt_size):
# if each_size == last_bowl: last_stack += 1
# else:
# amt_max = max(last_stack, amt_max)
# last_bowl, last_stack = each_size, 1
# print(amt_max)
# Passed
# 100%
amt_bowl, amt_size = int(input()), list(())
for each_bowl in range(amt_bowl): amt_size.append(int(input()))
print(max([amt_size.count(each_size) for each_size in set(amt_size)]))
# Passed
# 100% |
py | b40f2d41ba97db415d67527912268483bae1eecb | """
Module for reading config files (xknx.yaml).
* it will parse the given file
* and add the found devices to the devies vector of XKNX.
"""
from enum import Enum
import logging
from typing import TYPE_CHECKING
from .config_v1 import ConfigV1
from .yaml_loader import load_yaml
if TYPE_CHECKING:
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
class Version(Enum):
"""The used xknx.yaml structure version."""
VERSION_1 = 1
VERSION_2 = 2
class Config:
"""Class for parsing xknx.yaml."""
def __init__(self, xknx: "XKNX") -> None:
"""Initialize Config class."""
self.xknx = xknx
def read(self, file: str = "xknx.yaml") -> None:
"""Read config."""
logger.debug("Reading %s", file)
doc = load_yaml(file)
self.parse(doc)
@staticmethod
def parse_version(doc) -> Version:
"""Parse the version of the xknx.yaml."""
if "version" in doc:
return Version(doc["version"])
return Version.VERSION_1
def parse(self, doc) -> None:
"""Parse the config from the YAML."""
version = Config.parse_version(doc)
if version is Version.VERSION_1:
ConfigV1(xknx=self.xknx).parse(doc)
elif version is Version.VERSION_2:
raise NotImplementedError("Version 2 not yet implemented.")
|
py | b40f2db5698cf8a98042bbd05a1d27af0994631f | # Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
import tensorflow as tf
import ray
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.filter import get_filter
def rollout(policy, env, timestep_limit=None, add_noise=False):
"""Do a rollout.
If add_noise is True, the rollout will take noisy actions with
noise drawn from that stream. Otherwise, no action noise will be added.
"""
env_timestep_limit = env.spec.max_episode_steps
timestep_limit = (env_timestep_limit if timestep_limit is None else min(
timestep_limit, env_timestep_limit))
rews = []
t = 0
observation = env.reset()
for _ in range(timestep_limit or 999999):
ac = policy.compute(observation, add_noise=add_noise)[0]
observation, rew, done, _ = env.step(ac)
rews.append(rew)
t += 1
if done:
break
rews = np.array(rews, dtype=np.float32)
return rews, t
class GenericPolicy(object):
def __init__(self, sess, action_space, preprocessor, observation_filter,
model_options, action_noise_std):
self.sess = sess
self.action_space = action_space
self.action_noise_std = action_noise_std
self.preprocessor = preprocessor
self.observation_filter = get_filter(observation_filter,
self.preprocessor.shape)
self.inputs = tf.placeholder(tf.float32,
[None] + list(self.preprocessor.shape))
# Policy network.
dist_class, dist_dim = ModelCatalog.get_action_dist(
self.action_space, model_options, dist_type="deterministic")
model = ModelCatalog.get_model(self.inputs, dist_dim, model_options)
dist = dist_class(model.outputs)
self.sampler = dist.sample()
self.variables = ray.experimental.TensorFlowVariables(
model.outputs, self.sess)
self.num_params = sum(
np.prod(variable.shape.as_list())
for _, variable in self.variables.variables.items())
self.sess.run(tf.global_variables_initializer())
def compute(self, observation, add_noise=False, update=True):
observation = self.preprocessor.transform(observation)
observation = self.observation_filter(observation[None], update=update)
action = self.sess.run(
self.sampler, feed_dict={self.inputs: observation})
if add_noise and isinstance(self.action_space, gym.spaces.Box):
action += np.random.randn(*action.shape) * self.action_noise_std
return action
def set_weights(self, x):
self.variables.set_flat(x)
def get_weights(self):
return self.variables.get_flat()
|
py | b40f2dec1a353ad5482f1779aebf13d649343bb3 | import pathlib
import unittest
from unittest import TestCase
from td.utils import StatePath
class StatePathSession(TestCase):
"""Will perform a unit test for the StatePath Object."""
def setUp(self) -> None:
"""Set up the StatePath Instance."""
self.state_path = StatePath()
def test_creates_instance_of_session(self) -> None:
"""Create an instance and make sure it's a StatePath Object."""
# Make sure it's a state path.
self.assertIsInstance(self.state_path, StatePath)
# make sure our default name matches.
self.assertEqual(
self.state_path.credenitals_file_name, 'td_state.json'
)
# Make sure the Credentials File is a Windows Path.
self.assertIsInstance(
self.state_path.credentials_file,
pathlib.WindowsPath
)
def test_home_path(self) -> None:
"""Tests creating a homepath."""
truth = pathlib.Path.home()
self.assertEqual(truth, self.state_path.path_home())
def test_home_directory(self) -> None:
"""Tests grabbing the home directory."""
truth = pathlib.Path(__file__).parents[2].joinpath('td')
self.assertEqual(truth, self.state_path.library_directory)
def test_settings_directory(self) -> None:
"""Tests grabbing the settings directory."""
truth = pathlib.Path().home().joinpath('.td_python_library')
self.assertEqual(truth, self.state_path.settings_directory)
def test_library_directory(self) -> None:
"""Tests grabbing the home directory."""
truth = pathlib.Path.home()
self.assertEqual(truth, self.state_path.home_directory)
def test_json_library_path(self) -> None:
"""Test grabbing the Library JSON file path."""
truth = pathlib.Path(__file__).parents[2].joinpath(
'td/td_state.json'
)
self.assertEqual(truth, self.state_path.json_library_path())
def test_json_setting_path(self) -> None:
"""Test grabbing the Setting JSON file path."""
truth = pathlib.Path().home().joinpath('.td_python_library/td_state.json')
self.assertEqual(truth, self.state_path.json_settings_path())
def test_write_to_settings(self) -> None:
"""Test writing the credentials to Settings Folder."""
# Set the fake name.
self.state_path.credenitals_file_name = 'fake_td_state.json'
# Determine our base.
truth = pathlib.Path().home().joinpath('.td_python_library/fake_td_state.json')
# Get the JSON settings paht.
json_settings = self.state_path.json_settings_path()
# Write the credentials.
check = self.state_path.write_credentials(
file_path=json_settings,
state={'value': 'settings'}
)
# Make sure they are equal.
self.assertEqual(truth, check)
def test_write_to_library(self) -> None:
"""Test writing the credentials to Library Folder."""
# Set the fake name.
self.state_path.credenitals_file_name = 'fake_td_state.json'
# Determine our base.
truth = pathlib.Path(__file__).parents[2].joinpath(
'td/fake_td_state.json'
)
# Get the JSON settings paht.
json_settings = self.state_path.json_library_path()
# Write the credentials.
check = self.state_path.write_credentials(
file_path=json_settings,
state={'value': 'library'}
)
# Make sure they are equal.
self.assertEqual(truth, check)
def test_write_to_custom(self) -> None:
"""Test writing to a User Provided Path."""
# Define the file path.
file_path = 'config/td_state_custom.json'
# Define the truth.
truth = pathlib.Path(__file__).parents[2].joinpath(file_path)
# Write and check.
self.assertEqual(truth, self.state_path.write_credentials(
file_path=file_path,
state={'value': 'custom'}
)
)
def test_read_from_settings(self) -> None:
"""Test writing the credentials to Settings Folder."""
# Set the fake name.
self.state_path.credenitals_file_name = 'fake_td_state.json'
truth = {'value': 'settings'}
file_path = self.state_path.json_settings_path()
check = self.state_path.read_credentials(file_path=file_path)
# Make sure they are equal.
self.assertEqual(truth, check)
def test_read_from_library(self) -> None:
"""Test writing the credentials to Library Folder."""
# Set the fake name.
self.state_path.credenitals_file_name = 'fake_td_state.json'
truth = {'value': 'library'}
file_path = self.state_path.json_library_path()
check = self.state_path.read_credentials(file_path=file_path)
self.assertEqual(truth, check)
def test_read_from_custom(self) -> None:
"""Test writing to a User Provided Path."""
truth = {'value': 'custom'}
file_path = pathlib.Path('config/td_state_custom.json')
check = self.state_path.read_credentials(file_path=file_path)
# Make sure they are equal.
self.assertEqual(truth, check)
def test_read_from_non_exist(self) -> None:
"""Test writing to a User Provided Path."""
truth = 'Credentials File does not exist.'
file_path = pathlib.Path('config/no/td_state_custom.json')
with self.assertRaises(FileNotFoundError) as context:
self.state_path.read_credentials(file_path=file_path)
# Make sure they are equal.
self.assertEqual(truth, str(context.exception))
def tearDown(self) -> None:
"""Teardown the StatePath Object."""
self.state_path = None
if __name__ == '__main__':
unittest.main()
|
py | b40f2e5ddd3ac67e4f94ab5c874d07707322102c | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for 3d rotation matrix."""
from absl.testing import flagsaver
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.geometry.transformation import axis_angle
from tensorflow_graphics.geometry.transformation import quaternion
from tensorflow_graphics.geometry.transformation import rotation_matrix_3d
from tensorflow_graphics.geometry.transformation.tests import test_data as td
from tensorflow_graphics.geometry.transformation.tests import test_helpers
from tensorflow_graphics.util import test_case
class RotationMatrix3dTest(test_case.TestCase):
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_assert_rotation_matrix_normalized_passthrough(self):
"""Checks that the assert is a passthrough when the flag is False."""
angles = test_helpers.generate_preset_test_euler_angles()
matrix_input = rotation_matrix_3d.from_euler(angles)
matrix_output = rotation_matrix_3d.assert_rotation_matrix_normalized(
matrix_input)
self.assertTrue(matrix_input is matrix_output) # pylint: disable=g-generic-assert
@parameterized.parameters((np.float32), (np.float64))
def test_assert_rotation_matrix_normalized_preset(self, dtype):
"""Checks that assert_normalized function works as expected."""
angles = test_helpers.generate_preset_test_euler_angles().astype(dtype)
matrix = rotation_matrix_3d.from_euler(angles)
matrix_rescaled = matrix * 1.01
matrix_normalized = rotation_matrix_3d.assert_rotation_matrix_normalized(
matrix)
self.evaluate(matrix_normalized)
with self.assertRaises(tf.errors.InvalidArgumentError): # pylint: disable=g-error-prone-assert-raises
self.evaluate(rotation_matrix_3d.assert_rotation_matrix_normalized(
matrix_rescaled))
@parameterized.parameters(
((3, 3),),
((None, 3, 3),),
)
def test_assert_rotation_matrix_normalized_exception_not_raised(
self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
rotation_matrix_3d.assert_rotation_matrix_normalized, shapes)
@parameterized.parameters(
("must have a rank greater than 1", (3,)),
("must have exactly 3 dimensions in axis -1", (3, None)),
("must have exactly 3 dimensions in axis -2", (None, 3)),
)
def test_assert_rotation_matrix_normalized_exception_raised(
self, error_msg, *shapes):
"""Tests that the shape exceptions are raised."""
self.assert_exception_is_raised(
rotation_matrix_3d.assert_rotation_matrix_normalized, error_msg, shapes)
@parameterized.parameters(
((3,), (1,)),
((None, 3), (None, 1)),
((1, 3), (1, 1)),
((2, 3), (2, 1)),
((1, 3), (1,)),
((3,), (1, 1)),
)
def test_from_axis_angle_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(rotation_matrix_3d.from_axis_angle,
shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (None,), (1,)),
("must have exactly 1 dimensions in axis -1", (3,), (None,)),
)
def test_from_axis_angle_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(rotation_matrix_3d.from_axis_angle,
error_msg, shapes)
def test_from_axis_angle_normalized_preset(self):
"""Tests that axis-angles can be converted to rotation matrices."""
euler_angles = test_helpers.generate_preset_test_euler_angles()
axis, angle = axis_angle.from_euler(euler_angles)
matrix_axis_angle = rotation_matrix_3d.from_axis_angle(axis, angle)
self.assertAllEqual(
rotation_matrix_3d.is_valid(matrix_axis_angle),
np.ones(euler_angles.shape[0:-1] + (1,)))
def test_from_axis_angle_normalized_random(self):
"""Tests that axis-angles can be converted to rotation matrices."""
tensor_shape = np.random.randint(1, 10, size=np.random.randint(3)).tolist()
random_axis = np.random.normal(size=tensor_shape + [3])
random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True)
random_angle = np.random.normal(size=tensor_shape + [1])
matrix_axis_angle = rotation_matrix_3d.from_axis_angle(
random_axis, random_angle)
self.assertAllEqual(
rotation_matrix_3d.is_valid(matrix_axis_angle),
np.ones(tensor_shape + [1]))
@parameterized.parameters(
((td.AXIS_3D_X, td.ANGLE_45), (td.MAT_3D_X_45,)),
((td.AXIS_3D_Y, td.ANGLE_45), (td.MAT_3D_Y_45,)),
((td.AXIS_3D_Z, td.ANGLE_45), (td.MAT_3D_Z_45,)),
((td.AXIS_3D_X, td.ANGLE_90), (td.MAT_3D_X_90,)),
((td.AXIS_3D_Y, td.ANGLE_90), (td.MAT_3D_Y_90,)),
((td.AXIS_3D_Z, td.ANGLE_90), (td.MAT_3D_Z_90,)),
((td.AXIS_3D_X, td.ANGLE_180), (td.MAT_3D_X_180,)),
((td.AXIS_3D_Y, td.ANGLE_180), (td.MAT_3D_Y_180,)),
((td.AXIS_3D_Z, td.ANGLE_180), (td.MAT_3D_Z_180,)),
)
def test_from_axis_angle_preset(self, test_inputs, test_outputs):
"""Tests that an axis-angle maps to correct matrix."""
self.assert_output_is_correct(rotation_matrix_3d.from_axis_angle,
test_inputs, test_outputs)
def test_from_axis_angle_random(self):
"""Tests conversion to matrix."""
tensor_shape = np.random.randint(1, 10, size=np.random.randint(3)).tolist()
random_axis = np.random.normal(size=tensor_shape + [3])
random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True)
random_angle = np.random.normal(size=tensor_shape + [1])
matrix_axis_angle = rotation_matrix_3d.from_axis_angle(
random_axis, random_angle)
random_quaternion = quaternion.from_axis_angle(random_axis, random_angle)
matrix_quaternion = rotation_matrix_3d.from_quaternion(random_quaternion)
self.assertAllClose(matrix_axis_angle, matrix_quaternion, rtol=1e-3)
# Checks that resulting rotation matrices are normalized.
self.assertAllEqual(
rotation_matrix_3d.is_valid(matrix_axis_angle),
np.ones(tensor_shape + [1]))
@parameterized.parameters(
((td.AXIS_3D_X, td.ANGLE_90, td.AXIS_3D_X), (td.AXIS_3D_X,)),
((td.AXIS_3D_X, td.ANGLE_90, td.AXIS_3D_Y), (td.AXIS_3D_Z,)),
((td.AXIS_3D_X, -td.ANGLE_90, td.AXIS_3D_Z), (td.AXIS_3D_Y,)),
((td.AXIS_3D_Y, -td.ANGLE_90, td.AXIS_3D_X), (td.AXIS_3D_Z,)),
((td.AXIS_3D_Y, td.ANGLE_90, td.AXIS_3D_Y), (td.AXIS_3D_Y,)),
((td.AXIS_3D_Y, td.ANGLE_90, td.AXIS_3D_Z), (td.AXIS_3D_X,)),
((td.AXIS_3D_Z, td.ANGLE_90, td.AXIS_3D_X), (td.AXIS_3D_Y,)),
((td.AXIS_3D_Z, -td.ANGLE_90, td.AXIS_3D_Y), (td.AXIS_3D_X,)),
((td.AXIS_3D_Z, td.ANGLE_90, td.AXIS_3D_Z), (td.AXIS_3D_Z,)),
)
def test_from_axis_angle_rotate_vector_preset(self, test_inputs,
test_outputs):
"""Tests the directionality of axis-angle rotations."""
def func(axis, angle, point):
matrix = rotation_matrix_3d.from_axis_angle(axis, angle)
return rotation_matrix_3d.rotate(point, matrix)
self.assert_output_is_correct(func, test_inputs, test_outputs)
@parameterized.parameters(
((3,),),
((None, 3),),
((2, 3),),
)
def test_from_euler_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(rotation_matrix_3d.from_euler, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (None,)),)
def test_from_euler_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(rotation_matrix_3d.from_euler, error_msg,
shapes)
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_from_euler_jacobian_preset(self):
"""Test the Jacobian of the from_euler function."""
x_init = test_helpers.generate_preset_test_euler_angles()
self.assert_jacobian_is_correct_fn(rotation_matrix_3d.from_euler, [x_init])
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_from_euler_jacobian_random(self):
"""Test the Jacobian of the from_euler function."""
x_init = test_helpers.generate_random_test_euler_angles()
self.assert_jacobian_is_correct_fn(rotation_matrix_3d.from_euler, [x_init])
def test_from_euler_normalized_preset(self):
"""Tests that euler angles can be converted to rotation matrices."""
euler_angles = test_helpers.generate_preset_test_euler_angles()
matrix = rotation_matrix_3d.from_euler(euler_angles)
self.assertAllEqual(
rotation_matrix_3d.is_valid(matrix),
np.ones(euler_angles.shape[0:-1] + (1,)))
def test_from_euler_normalized_random(self):
"""Tests that euler angles can be converted to rotation matrices."""
random_euler_angles = test_helpers.generate_random_test_euler_angles()
matrix = rotation_matrix_3d.from_euler(random_euler_angles)
self.assertAllEqual(
rotation_matrix_3d.is_valid(matrix),
np.ones(random_euler_angles.shape[0:-1] + (1,)))
@parameterized.parameters(
((td.AXIS_3D_0,), (td.MAT_3D_ID,)),
((td.ANGLE_45 * td.AXIS_3D_X,), (td.MAT_3D_X_45,)),
((td.ANGLE_45 * td.AXIS_3D_Y,), (td.MAT_3D_Y_45,)),
((td.ANGLE_45 * td.AXIS_3D_Z,), (td.MAT_3D_Z_45,)),
((td.ANGLE_90 * td.AXIS_3D_X,), (td.MAT_3D_X_90,)),
((td.ANGLE_90 * td.AXIS_3D_Y,), (td.MAT_3D_Y_90,)),
((td.ANGLE_90 * td.AXIS_3D_Z,), (td.MAT_3D_Z_90,)),
((td.ANGLE_180 * td.AXIS_3D_X,), (td.MAT_3D_X_180,)),
((td.ANGLE_180 * td.AXIS_3D_Y,), (td.MAT_3D_Y_180,)),
((td.ANGLE_180 * td.AXIS_3D_Z,), (td.MAT_3D_Z_180,)),
)
def test_from_euler_preset(self, test_inputs, test_outputs):
"""Tests that Euler angles create the expected matrix."""
self.assert_output_is_correct(rotation_matrix_3d.from_euler, test_inputs,
test_outputs)
def test_from_euler_random(self):
"""Tests that Euler angles produce the same result as axis-angle."""
angles = test_helpers.generate_random_test_euler_angles()
matrix = rotation_matrix_3d.from_euler(angles)
tensor_tile = angles.shape[:-1]
x_axis = np.tile(td.AXIS_3D_X, tensor_tile + (1,))
y_axis = np.tile(td.AXIS_3D_Y, tensor_tile + (1,))
z_axis = np.tile(td.AXIS_3D_Z, tensor_tile + (1,))
x_angle = np.expand_dims(angles[..., 0], axis=-1)
y_angle = np.expand_dims(angles[..., 1], axis=-1)
z_angle = np.expand_dims(angles[..., 2], axis=-1)
x_rotation = rotation_matrix_3d.from_axis_angle(x_axis, x_angle)
y_rotation = rotation_matrix_3d.from_axis_angle(y_axis, y_angle)
z_rotation = rotation_matrix_3d.from_axis_angle(z_axis, z_angle)
expected_matrix = tf.matmul(z_rotation, tf.matmul(y_rotation, x_rotation))
self.assertAllClose(expected_matrix, matrix, rtol=1e-3)
@parameterized.parameters(
((3,),),
((None, 3),),
)
def test_from_euler_with_small_angles_approximation_exception_not_raised(
self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
rotation_matrix_3d.from_euler_with_small_angles_approximation, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (None,)),)
def test_from_euler_with_small_angles_approximation_exception_raised(
self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(
rotation_matrix_3d.from_euler_with_small_angles_approximation,
error_msg, shapes)
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_from_euler_with_small_angles_approximation_jacobian_random(self):
"""Test the Jacobian of from_euler_with_small_angles_approximation."""
x_init = test_helpers.generate_random_test_euler_angles(
min_angle=-0.17, max_angle=0.17)
self.assert_jacobian_is_correct_fn(
rotation_matrix_3d.from_euler_with_small_angles_approximation, [x_init])
def test_from_euler_with_small_angles_approximation_random(self):
"""Tests small_angles approximation by comparing to exact calculation."""
# Only generate small angles. For a test tolerance of 1e-3, 0.16 was found
# empirically to be the range where the small angle approximation works.
random_euler_angles = test_helpers.generate_random_test_euler_angles(
min_angle=-0.16, max_angle=0.16)
exact_matrix = rotation_matrix_3d.from_euler(random_euler_angles)
approximate_matrix = (
rotation_matrix_3d.from_euler_with_small_angles_approximation(
random_euler_angles))
self.assertAllClose(exact_matrix, approximate_matrix, atol=1e-3)
@parameterized.parameters(
((4,),),
((None, 4),),
)
def test_from_quaternion_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(rotation_matrix_3d.from_quaternion,
shapes)
@parameterized.parameters(
("must have exactly 4 dimensions in axis -1", (None,)),)
def test_from_quaternion_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(rotation_matrix_3d.from_quaternion,
error_msg, shapes)
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_from_quaternion_jacobian_preset(self):
"""Test the Jacobian of the from_quaternion function."""
x_init = test_helpers.generate_preset_test_quaternions()
self.assert_jacobian_is_correct_fn(rotation_matrix_3d.from_quaternion,
[x_init])
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_from_quaternion_jacobian_random(self):
"""Test the Jacobian of the from_quaternion function."""
x_init = test_helpers.generate_random_test_quaternions()
self.assert_jacobian_is_correct_fn(rotation_matrix_3d.from_quaternion,
[x_init])
def test_from_quaternion_normalized_preset(self):
"""Tests that quaternions can be converted to rotation matrices."""
euler_angles = test_helpers.generate_preset_test_euler_angles()
quat = quaternion.from_euler(euler_angles)
matrix_quat = rotation_matrix_3d.from_quaternion(quat)
self.assertAllEqual(
rotation_matrix_3d.is_valid(matrix_quat),
np.ones(euler_angles.shape[0:-1] + (1,)))
def test_from_quaternion_normalized_random(self):
"""Tests that random quaternions can be converted to rotation matrices."""
random_quaternion = test_helpers.generate_random_test_quaternions()
tensor_shape = random_quaternion.shape[:-1]
random_matrix = rotation_matrix_3d.from_quaternion(random_quaternion)
self.assertAllEqual(
rotation_matrix_3d.is_valid(random_matrix),
np.ones(tensor_shape + (1,)))
def test_from_quaternion_preset(self):
"""Tests that a quaternion maps to correct matrix."""
preset_quaternions = test_helpers.generate_preset_test_quaternions()
preset_matrices = test_helpers.generate_preset_test_rotation_matrices_3d()
self.assertAllClose(preset_matrices,
rotation_matrix_3d.from_quaternion(preset_quaternions))
def test_from_quaternion_random(self):
"""Tests conversion to matrix."""
random_euler_angles = test_helpers.generate_random_test_euler_angles()
random_quaternions = quaternion.from_euler(random_euler_angles)
random_rotation_matrices = rotation_matrix_3d.from_euler(
random_euler_angles)
self.assertAllClose(random_rotation_matrices,
rotation_matrix_3d.from_quaternion(random_quaternions))
@parameterized.parameters(
((3, 3),),
((None, 3, 3),),
((2, 3, 3),),
)
def test_inverse_exception_not_raised(self, *shapes):
"""Checks the inputs of the rotate function."""
self.assert_exception_is_not_raised(rotation_matrix_3d.inverse, shapes)
@parameterized.parameters(
("must have a rank greater than 1", (3,)),
("must have exactly 3 dimensions in axis -1", (3, None)),
("must have exactly 3 dimensions in axis -2", (None, 3)),
)
def test_inverse_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(rotation_matrix_3d.inverse, error_msg,
shapes)
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_inverse_jacobian_preset(self):
"""Test the Jacobian of the inverse function."""
x_init = test_helpers.generate_preset_test_rotation_matrices_3d()
self.assert_jacobian_is_correct_fn(rotation_matrix_3d.inverse, [x_init])
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_inverse_jacobian_random(self):
"""Test the Jacobian of the inverse function."""
x_init = test_helpers.generate_random_test_rotation_matrix_3d()
self.assert_jacobian_is_correct_fn(rotation_matrix_3d.inverse, [x_init])
def test_inverse_normalized_random(self):
"""Checks that inverted rotation matrices are valid rotations."""
random_euler_angle = test_helpers.generate_random_test_euler_angles()
tensor_tile = random_euler_angle.shape[:-1]
random_matrix = rotation_matrix_3d.from_euler(random_euler_angle)
predicted_invert_random_matrix = rotation_matrix_3d.inverse(random_matrix)
self.assertAllEqual(
rotation_matrix_3d.is_valid(predicted_invert_random_matrix),
np.ones(tensor_tile + (1,)))
def test_inverse_random(self):
"""Checks that inverting rotated points results in no transformation."""
random_euler_angle = test_helpers.generate_random_test_euler_angles()
tensor_tile = random_euler_angle.shape[:-1]
random_matrix = rotation_matrix_3d.from_euler(random_euler_angle)
random_point = np.random.normal(size=tensor_tile + (3,))
rotated_random_points = rotation_matrix_3d.rotate(random_point,
random_matrix)
predicted_invert_random_matrix = rotation_matrix_3d.inverse(random_matrix)
predicted_invert_rotated_random_points = rotation_matrix_3d.rotate(
rotated_random_points, predicted_invert_random_matrix)
self.assertAllClose(
random_point, predicted_invert_rotated_random_points, rtol=1e-6)
@parameterized.parameters(
((3, 3),),
((None, 3, 3),),
((2, 3, 3),),
)
def test_is_valid_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(rotation_matrix_3d.is_valid, shapes)
@parameterized.parameters(
("must have a rank greater than 1", (3,)),
("must have exactly 3 dimensions in axis -1", (3, None)),
("must have exactly 3 dimensions in axis -2", (None, 3)),
)
def test_is_valid_exception_raised(self, error_msg, *shape):
"""Tests that the shape exceptions are raised."""
self.assert_exception_is_raised(rotation_matrix_3d.is_valid, error_msg,
shape)
def test_is_valid_random(self):
"""Tests that is_valid works as intended."""
random_euler_angle = test_helpers.generate_random_test_euler_angles()
tensor_tile = random_euler_angle.shape[:-1]
rotation_matrix = rotation_matrix_3d.from_euler(random_euler_angle)
pred_normalized = rotation_matrix_3d.is_valid(rotation_matrix)
with self.subTest(name="all_normalized"):
self.assertAllEqual(pred_normalized,
np.ones(shape=tensor_tile + (1,), dtype=bool))
with self.subTest(name="non_orthonormal"):
test_matrix = np.array([[2., 0., 0.], [0., 0.5, 0], [0., 0., 1.]])
pred_normalized = rotation_matrix_3d.is_valid(test_matrix)
self.assertAllEqual(pred_normalized, np.zeros(shape=(1,), dtype=bool))
with self.subTest(name="negative_orthonormal"):
test_matrix = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
pred_normalized = rotation_matrix_3d.is_valid(test_matrix)
self.assertAllEqual(pred_normalized, np.zeros(shape=(1,), dtype=bool))
@parameterized.parameters(
((3,), (3, 3)),
((None, 3), (None, 3, 3)),
((1, 3), (1, 3, 3)),
((2, 3), (2, 3, 3)),
((3,), (1, 3, 3)),
((1, 3), (3, 3)),
)
def test_rotate_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(rotation_matrix_3d.rotate, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (None,), (3, 3)),
("must have a rank greater than 1", (3,), (3,)),
("must have exactly 3 dimensions in axis -1", (3,), (3, None)),
("must have exactly 3 dimensions in axis -2", (3,), (None, 3)),
)
def test_rotate_exception_raised(self, error_msg, *shapes):
"""Checks the inputs of the rotate function."""
self.assert_exception_is_raised(rotation_matrix_3d.rotate, error_msg,
shapes)
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_rotate_jacobian_preset(self):
"""Test the Jacobian of the rotate function."""
x_matrix_init = test_helpers.generate_preset_test_rotation_matrices_3d()
tensor_shape = x_matrix_init.shape[:-1]
x_point_init = np.random.uniform(size=tensor_shape)
self.assert_jacobian_is_correct_fn(rotation_matrix_3d.rotate,
[x_point_init, x_matrix_init])
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_rotate_jacobian_random(self):
"""Test the Jacobian of the rotate function."""
x_matrix_init = test_helpers.generate_random_test_rotation_matrix_3d()
tensor_shape = x_matrix_init.shape[:-1]
x_point_init = np.random.uniform(size=tensor_shape)
self.assert_jacobian_is_correct_fn(rotation_matrix_3d.rotate,
[x_point_init, x_matrix_init])
@parameterized.parameters(
((td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_X), (td.AXIS_3D_X,)),
((td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_Y), (td.AXIS_3D_Z,)),
((-td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_Z), (td.AXIS_3D_Y,)),
((-td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_X), (td.AXIS_3D_Z,)),
((td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_Y), (td.AXIS_3D_Y,)),
((td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_Z), (td.AXIS_3D_X,)),
((td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_X), (td.AXIS_3D_Y,)),
((-td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_Y), (td.AXIS_3D_X,)),
((td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_Z), (td.AXIS_3D_Z,)),
)
def test_rotate_vector_preset(self, test_inputs, test_outputs):
"""Tests that the rotate function produces the expected results."""
def func(angles, point):
matrix = rotation_matrix_3d.from_euler(angles)
return rotation_matrix_3d.rotate(point, matrix)
self.assert_output_is_correct(func, test_inputs, test_outputs)
def test_rotate_vs_rotate_quaternion_random(self):
"""Tests that the rotate provide the same results as quaternion.rotate."""
random_euler_angle = test_helpers.generate_random_test_euler_angles()
tensor_tile = random_euler_angle.shape[:-1]
random_matrix = rotation_matrix_3d.from_euler(random_euler_angle)
random_quaternion = quaternion.from_rotation_matrix(random_matrix)
random_point = np.random.normal(size=tensor_tile + (3,))
ground_truth = quaternion.rotate(random_point, random_quaternion)
prediction = rotation_matrix_3d.rotate(random_point, random_matrix)
self.assertAllClose(ground_truth, prediction, rtol=1e-6)
if __name__ == "__main__":
test_case.main()
|
py | b40f2e6a22de8b8e3e910dfba4b5a8bedebcb0de | from django.contrib import admin
from todos_app.todos.models import Todo
from todos_app.todos.models.todo import Person, Category
# Option two
# @admin.register(Todo)
class TodoAdmin(admin.ModelAdmin):
list_display = ['title', 'owner']
list_filter = ['owner']
sortable_by = ['text']
# def has_change_permission(self, request, obj=None):
# return False
# Option one
admin.site.register(Todo, TodoAdmin)
admin.site.register(Person)
admin.site.register(Category)
|
py | b40f2e7b0c211a8cc35ea258f8e0cf5b746aff09 | ##########################################################################
#
# Copyright (c) 2007-2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import sys
import six
import unittest
import imath
import IECore
import IECoreImage
class ImageReaderTest( unittest.TestCase ) :
def testFactoryConstruction( self ) :
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "AllHalfValues.exr" ) )
self.assertEqual( type( r ), IECoreImage.ImageReader )
def testCanReadAndIsComplete( self ) :
self.assertTrue( IECoreImage.ImageReader.canRead( os.path.join( "test", "IECoreImage", "data", "exr", "AllHalfValues.exr" ) ) )
self.assertFalse( IECoreImage.ImageReader.canRead( "thisFileDoesntExist.exr" ) )
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "AllHalfValues.exr" ) )
self.assertTrue( r.isComplete() )
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "incomplete.exr" ) )
self.assertFalse( r.isComplete() )
r = IECoreImage.ImageReader( "thisFileDoesntExist.exr" )
self.assertFalse( r.isComplete() )
def testDeep( self ) :
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "primitives.exr" ) )
self.assertEqual( r.channelNames(), IECore.StringVectorData( [] ) )
self.assertTrue( r.isComplete() )
h = r.readHeader()
self.assertEqual( h[ "deep" ], IECore.BoolData( True ) )
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "manyChannels.exr" ) )
self.assertNotEqual( r.channelNames(), IECore.StringVectorData( [] ) )
h = r.readHeader()
self.assertTrue( h[ "deep" ], IECore.BoolData( False ) )
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "deepIncomplete.exr" ) )
self.assertFalse( r.isComplete() )
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "tiledDeepComplete.exr" ) )
self.assertTrue( r.isComplete() )
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "tiledDeepIncomplete.exr" ) )
self.assertFalse( r.isComplete() )
def testSupportedExtensions( self ) :
e = IECore.Reader.supportedExtensions( IECoreImage.ImageReader.staticTypeId() )
for ee in e :
self.assertTrue( type( ee ) is str )
# we don't need to validate the full OIIO extension list, but
# make sure a reasonable list of image files are supported
expectedImageReaderExtensions = [ "exr", "cin", "dpx", "sgi", "rgba", "rgb", "tga", "tif", "tiff", "tx", "jpg", "jpeg", "png" ]
self.assertTrue( set( expectedImageReaderExtensions ).issubset( e ) )
# non image files aren't supported
self.assertTrue( not "pdc" in e )
self.assertTrue( not "cob" in e )
self.assertTrue( not "obj" in e )
def testChannelNames( self ) :
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "AllHalfValues.exr" ) )
c = r.channelNames()
self.assertEqual( c.staticTypeId(), IECore.StringVectorData.staticTypeId() )
self.assertEqual( len( c ), 3 )
self.assertTrue( "R" in c )
self.assertTrue( "G" in c )
self.assertTrue( "B" in c )
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "manyChannels.exr" ) )
c = r.channelNames()
self.assertEqual( c.staticTypeId(), IECore.StringVectorData.staticTypeId() )
self.assertEqual( len( c ), 7 )
self.assertTrue( "R" in c )
self.assertTrue( "G" in c )
self.assertTrue( "B" in c )
self.assertTrue( "A" in c )
self.assertTrue( "diffuse.red" in c )
self.assertTrue( "diffuse.green" in c )
self.assertTrue( "diffuse.blue" in c )
r = IECoreImage.ImageReader( "thisFileDoesntExist.exr" )
self.assertRaises( Exception, r.channelNames )
def testReadHeader( self ):
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "manyChannels.exr" ) )
h = r.readHeader()
c = h['channelNames']
self.assertEqual( c.staticTypeId(), IECore.StringVectorData.staticTypeId() )
self.assertEqual( len( c ), 7 )
self.assertTrue( "R" in c )
self.assertTrue( "G" in c )
self.assertTrue( "B" in c )
self.assertTrue( "A" in c )
self.assertTrue( "diffuse.red" in c )
self.assertTrue( "diffuse.green" in c )
self.assertTrue( "diffuse.blue" in c )
self.assertEqual( h['displayWindow'], IECore.Box2iData( imath.Box2i( imath.V2i(0,0), imath.V2i(255,255) ) ) )
self.assertEqual( h['dataWindow'], IECore.Box2iData( imath.Box2i( imath.V2i(0,0), imath.V2i(255,255) ) ) )
def testDataAndDisplayWindows( self ) :
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "AllHalfValues.exr" ) )
self.assertEqual( r.dataWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 255 ) ) )
self.assertEqual( r.displayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 255 ) ) )
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "uvMapWithDataWindow.100x100.exr" ) )
self.assertEqual( r.dataWindow(), imath.Box2i( imath.V2i( 25 ), imath.V2i( 49 ) ) )
self.assertEqual( r.displayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 99 ) ) )
r = IECoreImage.ImageReader( "thisFileDoesntExist.exr" )
self.assertRaises( Exception, r.dataWindow )
self.assertRaises( Exception, r.displayWindow )
def testReadImage( self ) :
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "uvMap.256x256.exr" ) )
i = r.read()
self.assertEqual( i.typeId(), IECoreImage.ImagePrimitive.staticTypeId() )
self.assertEqual( i.dataWindow, imath.Box2i( imath.V2i( 0 ), imath.V2i( 255 ) ) )
self.assertEqual( i.displayWindow, imath.Box2i( imath.V2i( 0 ), imath.V2i( 255 ) ) )
self.assertTrue( i.channelsValid() )
self.assertEqual( len( i ), 3 )
for c in ["R", "G", "B"] :
self.assertEqual( i[c].typeId(), IECore.FloatVectorData.staticTypeId() )
r = i["R"]
self.assertEqual( r[0], 0 )
self.assertEqual( r[-1], 1 )
g = i["G"]
self.assertEqual( g[0], 0 )
self.assertEqual( g[-1], 1 )
for b in i["B"] :
self.assertEqual( b, 0 )
def testNonZeroDataWindowOrigin( self ) :
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "uvMapWithDataWindow.100x100.exr" ) )
i = r.read()
self.assertEqual( i.dataWindow, imath.Box2i( imath.V2i( 25 ), imath.V2i( 49 ) ) )
self.assertEqual( i.displayWindow, imath.Box2i( imath.V2i( 0 ), imath.V2i( 99 ) ) )
self.assertTrue( i.channelsValid() )
def testOrientation( self ) :
img = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "uvMap.512x256.exr" ) ).read()
indexedColors = {
0 : imath.V3f( 0, 0, 0 ),
511 : imath.V3f( 1, 0, 0 ),
512 * 255 : imath.V3f( 0, 1, 0 ),
512 * 255 + 511 : imath.V3f( 1, 1, 0 ),
}
for index, expectedColor in indexedColors.items() :
color = imath.V3f( img["R"][index], img["G"][index], img["B"][index] )
self.assertTrue( ( color - expectedColor).length() < 1.e-6 )
def testIncompleteImage( self ) :
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "incomplete.exr" ) )
six.assertRaisesRegex( self, Exception, "Error reading pixel data from image file", r.read )
def testHeaderToBlindData( self ) :
dictHeader = {
'channelNames': IECore.StringVectorData( [ "R", "G", "B" ] ),
'oiio:ColorSpace': IECore.StringData( "Linear" ),
'compression': IECore.StringData( "piz" ),
'screenWindowCenter': IECore.V2fData( imath.V2f(0,0) ),
'displayWindow': IECore.Box2iData( imath.Box2i( imath.V2i(0,0), imath.V2i(511,255) ) ),
'dataWindow': IECore.Box2iData( imath.Box2i( imath.V2i(0,0), imath.V2i(511,255) ) ),
'PixelAspectRatio': IECore.FloatData( 1 ),
'screenWindowWidth': IECore.FloatData( 1 ),
'deep': IECore.BoolData( False ),
}
if IECoreImage.OpenImageIOAlgo.version() >= 20206 :
dictHeader['oiio:subimages'] = IECore.IntData( 1 )
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "uvMap.512x256.exr" ) )
header = r.readHeader()
self.assertEqual( header, IECore.CompoundObject(dictHeader) )
img = r.read()
del dictHeader['channelNames']
del dictHeader['deep']
self.assertEqual( img.blindData(), IECore.CompoundData(dictHeader) )
def testTimeCodeInHeader( self ) :
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "uvMap.512x256.exr" ) )
header = r.readHeader()
self.assertTrue( "smpte:TimeCode" not in header )
img = r.read()
self.assertTrue( "smpte:TimeCode" not in img.blindData() )
td = IECore.TimeCodeData( IECore.TimeCode( 12, 5, 3, 15, dropFrame = True, bgf1 = True, binaryGroup6 = 12 ) )
img2 = img.copy()
img2.blindData()["smpte:TimeCode"] = td
w = IECore.Writer.create( img2, os.path.join( "test", "IECoreImage", "data", "exr", "output.exr" ) )
w["formatSettings"]["openexr"]["compression"].setValue( img2.blindData()["compression"] )
w.write()
r2 = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "output.exr" ) )
header = r2.readHeader()
self.assertTrue( "smpte:TimeCode" in header )
self.assertEqual( header["smpte:TimeCode"], td )
img3 = r2.read()
self.assertTrue( "smpte:TimeCode" in img3.blindData() )
self.assertEqual( img3.blindData()["smpte:TimeCode"], td )
del img3.blindData()["Software"]
del img3.blindData()["HostComputer"]
del img3.blindData()["DateTime"]
self.assertEqual( img2.blindData(), img3.blindData() )
del img3.blindData()["smpte:TimeCode"]
self.assertEqual( img.blindData(), img3.blindData() )
def testTilesWithLeftovers( self ) :
r = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "tiff", "tilesWithLeftovers.tif" ) )
i = r.read()
i2 = IECoreImage.ImageReader( os.path.join( "test", "IECoreImage", "data", "exr", "tiffTileTestExpectedResults.exr" ) ).read()
op = IECoreImage.ImageDiffOp()
res = op(
imageA = i,
imageB = i2,
maxError = 0.004,
skipMissingChannels = False
)
self.assertFalse( res.value )
def testTiff( self ) :
for ( fileName, rawType ) in (
( os.path.join( "test", "IECoreImage", "data", "tiff", "uvMap.200x100.rgba.8bit.tif" ), IECore.UCharVectorData ),
( os.path.join( "test", "IECoreImage", "data", "tiff", "uvMap.200x100.rgba.16bit.tif" ), IECore.UShortVectorData ),
( os.path.join( "test", "IECoreImage", "data", "tiff", "uvMap.200x100.rgba.32bit.tif" ), IECore.FloatVectorData ),
) :
r = IECore.Reader.create( fileName )
self.assertIsInstance( r, IECoreImage.ImageReader )
img = r.read()
r["rawChannels"] = IECore.BoolData( True )
imgRaw = r.read()
self.assertEqual( img.displayWindow, imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 199, 99 ) ) )
self.assertEqual( img.dataWindow, imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 199, 99 ) ) )
self.assertTrue( img.channelsValid() )
self.assertTrue( imgRaw.channelsValid() )
self.assertEqual( img.keys(), imgRaw.keys() )
for c in img.keys() :
self.assertEqual( type(img[c]), IECore.FloatVectorData )
self.assertEqual( type(imgRaw[c]), rawType )
def testRawDPX( self ) :
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "dpx", "uvMap.512x256.dpx" ) )
self.assertIsInstance( r, IECoreImage.ImageReader )
r['rawChannels'] = True
img = r.read()
self.assertEqual( type(img), IECoreImage.ImagePrimitive )
self.assertEqual( type(img['R']), IECore.UShortVectorData )
# OIIO 2.2.15.1 is no longer flagging errors for the truncated file.
# Likely culprit is https://github.com/OpenImageIO/oiio/commit/0bd647e00a25951c39c54173578831f5f81780dc.
@unittest.expectedFailure
def testJPG( self ) :
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "jpg", "uvMap.512x256.jpg" ) )
self.assertIsInstance( r, IECoreImage.ImageReader )
self.assertTrue( r.isComplete() )
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "jpg", "uvMap.512x256.truncated.jpg" ) )
self.assertIsInstance( r, IECoreImage.ImageReader )
self.assertFalse( r.isComplete() )
def testPNG( self ) :
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "png", "uvMap.512x256.png" ) )
self.assertIsInstance( r, IECoreImage.ImageReader )
self.assertTrue( r.isComplete() )
self.assertTrue( r.read().channelsValid() )
def testFramesPerSecond( self ):
# read an image that have the FramesPerSecond set and ensure the values are correctly identified
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "rationalFramesPerSecond.exr" ) )
h1 = r.readHeader()
self.assertTrue( "framesPerSecond" in h1 )
self.assertEqual( h1["framesPerSecond"].getInterpretation(), IECore.GeometricData.Interpretation.Rational )
img = r.read()
# write the image to filesystem and read it again to check that the value was set correctly
w = IECore.Writer.create( img, os.path.join( "test", "IECoreImage", "data", "exr", "output.exr" ) )
w.write()
r2 = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "exr", "output.exr" ) )
h2 = r2.readHeader()
self.assertTrue( "framesPerSecond" in h2 )
self.assertEqual( h2["framesPerSecond"].getInterpretation(), IECore.GeometricData.Interpretation.Rational )
self.assertEqual( h1["framesPerSecond"], h2["framesPerSecond"] )
def testMiplevel( self ) :
# Test miplevel access for mipped files
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "tx", "uvMap.512x256.tx" ) )
r["miplevel"] = IECore.IntData( 0 )
self.assertEqual( r.dataWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 511, 255 ) ) )
self.assertEqual( r.displayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 511, 255 ) ) )
r["miplevel"] = IECore.IntData( 1 )
self.assertEqual( r.dataWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 255, 127 ) ) )
self.assertEqual( r.displayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 255, 127 ) ) )
# Test miplevel access for files without mips (OIIO creates mips on the fly)
r = IECore.Reader.create( os.path.join( "test", "IECoreImage", "data", "jpg", "uvMap.512x256.jpg" ) )
r["miplevel"] = IECore.IntData( 0 )
self.assertEqual( r.dataWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 511, 255 ) ) )
self.assertEqual( r.displayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 511, 255 ) ) )
r["miplevel"] = IECore.IntData( 1 )
self.assertEqual( r.dataWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 255, 127 ) ) )
self.assertEqual( r.displayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 255, 127 ) ) )
def setUp( self ) :
if os.path.isfile( os.path.join( "test", "IECoreImage", "data", "exr", "output.exr" ) ) :
os.remove( os.path.join( "test", "IECoreImage", "data", "exr", "output.exr" ) )
def tearDown( self ) :
if os.path.isfile( os.path.join( "test", "IECoreImage", "data", "exr", "output.exr" )) :
os.remove( os.path.join( "test", "IECoreImage", "data", "exr", "output.exr" ) )
if __name__ == "__main__":
unittest.main()
|
py | b40f2f39718407fa478c09786af93c759a7f28d8 | """
pyemby.constants
~~~~~~~~~~~~~~~~~~~~
Constants list
Copyright (c) 2017-2021 John Mihalic <https://github.com/mezz64>
Licensed under the MIT license.
"""
MAJOR_VERSION = 1
MINOR_VERSION = 8
__version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION)
DEFAULT_TIMEOUT = 10
DEFAULT_HEADERS = {
'Content-Type': "application/json",
'Accept': "application/json",
}
API_URL = 'api'
SOCKET_URL = 'socket'
STATE_PLAYING = 'Playing'
STATE_PAUSED = 'Paused'
STATE_IDLE = 'Idle'
STATE_OFF = 'Off'
|
py | b40f306bf020161dcf9f25d3b5ba996c795061fe | """Support for the Airzone sensors."""
from __future__ import annotations
from typing import Any, Final
from aioairzone.const import AZD_HUMIDITY, AZD_NAME, AZD_TEMP, AZD_TEMP_UNIT, AZD_ZONES
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import AirzoneEntity
from .const import DOMAIN, TEMP_UNIT_LIB_TO_HASS
from .coordinator import AirzoneUpdateCoordinator
SENSOR_TYPES: Final[tuple[SensorEntityDescription, ...]] = (
SensorEntityDescription(
device_class=SensorDeviceClass.TEMPERATURE,
key=AZD_TEMP,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
device_class=SensorDeviceClass.HUMIDITY,
key=AZD_HUMIDITY,
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Add Airzone sensors from a config_entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
sensors = []
for system_zone_id, zone_data in coordinator.data[AZD_ZONES].items():
for description in SENSOR_TYPES:
if description.key in zone_data:
sensors.append(
AirzoneSensor(
coordinator,
description,
entry,
system_zone_id,
zone_data,
)
)
async_add_entities(sensors)
class AirzoneSensor(AirzoneEntity, SensorEntity):
"""Define an Airzone sensor."""
def __init__(
self,
coordinator: AirzoneUpdateCoordinator,
description: SensorEntityDescription,
entry: ConfigEntry,
system_zone_id: str,
zone_data: dict[str, Any],
) -> None:
"""Initialize."""
super().__init__(coordinator, entry, system_zone_id, zone_data)
self._attr_name = f"{zone_data[AZD_NAME]} {description.name}"
self._attr_unique_id = f"{entry.entry_id}_{system_zone_id}_{description.key}"
self.entity_description = description
if description.key == AZD_TEMP:
self._attr_native_unit_of_measurement = TEMP_UNIT_LIB_TO_HASS.get(
self.get_zone_value(AZD_TEMP_UNIT)
)
@property
def native_value(self):
"""Return the state."""
return self.get_zone_value(self.entity_description.key)
|
py | b40f313dcd439c9020478dc30754ff5f5e6cd215 | import pytest
import parsl
import time
from parsl import python_app, ThreadPoolExecutor
from parsl.config import Config
from parsl.data_provider.files import File
from parsl.data_provider.staging import Staging
@python_app
def observe_input_local_path(f):
"""Returns the local_path that is seen by the app,
so that the test suite can also see that path.
"""
return f.local_path
@python_app
def wait_and_create(outputs=[]):
# for test purposes, this doesn't actually need to create the output
# file as nothing ever touches file content - the test only deals with
# names and Futures.
time.sleep(10)
class SP2(Staging):
def can_stage_in(self, file):
return file.scheme == 'sp2'
def stage_in(self, dm, executor, file, parent_fut):
file.local_path = "./test1.tmp"
return None # specify no tasks inserted in graph so parsl will preserve stageout dependency
def can_stage_out(self, file):
return file.scheme == 'sp2'
def stage_out(self, dm, executor, file, app_fu):
file.local_path = "./test1.tmp"
return None # no tasks inserted so parsl should use app completion for DataFuture completion
@pytest.mark.local
def test_1316_local_path_on_execution_side_sp2():
"""This test demonstrates the ability of a StagingProvider to set the
local_path of a File on the execution side, but that the change does not
modify the local_path of the corresponding submit side File, even when
running in a single python process.
"""
config = Config(executors=[ThreadPoolExecutor(storage_access=[SP2()])])
file = File("sp2://test")
parsl.load(config)
p = observe_input_local_path(file).result()
assert p == "./test1.tmp", "File object on the execution side gets the local_path set by the staging provider"
assert not file.local_path, "The local_path on the submit side should not be set"
parsl.dfk().cleanup()
parsl.clear()
@pytest.mark.local
def test_1316_local_path_setting_preserves_dependency_sp2():
config = Config(executors=[ThreadPoolExecutor(storage_access=[SP2()])])
file = File("sp2://test")
parsl.load(config)
wc_app_future = wait_and_create(outputs=[file])
data_future = wc_app_future.outputs[0]
p = observe_input_local_path(data_future).result()
assert wc_app_future.done(), "wait_and_create should finish before observe_input_local_path finishes"
assert p == "./test1.tmp", "File object on the execution side gets the local_path set by the staging provider"
assert not file.local_path, "The local_path on the submit side should not be set"
parsl.dfk().cleanup()
parsl.clear()
|
py | b40f3214c3772499096df6ea1c553c125d53092f | import numpy as np
import env_utils as envu
class Reward(object):
"""
Minimizes Velocity Field Tracking Error
"""
def __init__(self, reward_scale=1.0, ts_coeff=-0.0, fuel_coeff=-0.05, term_fuel_coeff=-0.0,
landing_rlimit=5.0, landing_vlimit=2.0,
tracking_coeff=-0.01, tracking_bias=0.0, scaler=None):
self.reward_scale = reward_scale
self.ts_coeff = ts_coeff
self.fuel_coeff = fuel_coeff
self.landing_rlimit = landing_rlimit
self.landing_vlimit = landing_vlimit
self.tracking_coeff = tracking_coeff
self.tracking_bias = tracking_bias
self.scaler = scaler
print('dvec vc 3')
def get(self, lander, action, done, steps, shape_constraint, glideslope_constraint):
pos = lander.state['position']
vel = lander.state['velocity']
prev_pos = lander.prev_state['position']
prev_vel = lander.prev_state['velocity']
state = np.hstack((pos,vel))
prev_state = np.hstack((prev_pos,prev_vel))
r_gs = glideslope_constraint.get_reward()
r_sc, sc_margin = shape_constraint.get_reward(lander.state)
error, t_go = lander.track_func(pos,vel)
r_tracking = self.tracking_bias + self.tracking_coeff * np.linalg.norm(error)
r_fuel = self.fuel_coeff * np.linalg.norm(action) / lander.max_thrust
r_landing = 0.
landing_margin = 0.
gs_penalty = 0.0
sc_penalty = 0.0
if done:
gs_penalty = glideslope_constraint.get_term_reward()
sc_penalty = shape_constraint.get_term_reward(lander.state)
landing_margin = np.maximum(np.linalg.norm(pos) - self.landing_rlimit , np.linalg.norm(vel) - self.landing_vlimit)
reward_info = {}
reward_info['fuel'] = r_fuel
reward = (sc_penalty + gs_penalty + r_gs + r_sc + r_landing + r_tracking + r_fuel + self.ts_coeff) * self.reward_scale
lander.trajectory['reward'].append(reward)
lander.trajectory['glideslope'].append(glideslope_constraint.get())
lander.trajectory['glideslope_reward'].append(r_gs)
lander.trajectory['glideslope_penalty'].append(gs_penalty)
lander.trajectory['sc_penalty'].append(sc_penalty)
lander.trajectory['sc_margin'].append(sc_margin)
lander.trajectory['sc_reward'].append(r_sc)
lander.trajectory['landing_reward'].append(0.0)
lander.trajectory['tracking_reward'].append(r_tracking)
lander.trajectory['landing_margin'].append(landing_margin)
lander.trajectory['range_reward'].append(0.0)
lander.trajectory['fuel_reward'].append(r_fuel)
return reward, reward_info
|
py | b40f32eb6a59d5d32b352eee2094eeeb3d47703d | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import sys
import json
import tempfile
import subprocess
import collections
import util
import conll
from bert import tokenization
class DocumentState(object):
def __init__(self, key):
self.doc_key = key
self.sentence_end = []
self.token_end = []
self.tokens = []
self.subtokens = []
self.info = []
self.segments = []
self.subtoken_map = []
self.segment_subtoken_map = []
self.sentence_map = []
self.pronouns = []
self.clusters = collections.defaultdict(list)
self.coref_stacks = collections.defaultdict(list)
self.speakers = []
self.segment_info = []
def finalize(self):
# finalized: segments, segment_subtoken_map
# populate speakers from info
subtoken_idx = 0
for segment in self.segment_info:
speakers = []
for i, tok_info in enumerate(segment):
if tok_info is None and (i == 0 or i == len(segment) - 1):
speakers.append('[SPL]')
elif tok_info is None:
speakers.append(speakers[-1])
else:
speakers.append(tok_info[9])
if tok_info[4] == 'PRP':
self.pronouns.append(subtoken_idx)
subtoken_idx += 1
self.speakers += [speakers]
# populate sentence map
# populate clusters
first_subtoken_index = -1
for seg_idx, segment in enumerate(self.segment_info):
speakers = []
for i, tok_info in enumerate(segment):
first_subtoken_index += 1
coref = tok_info[-2] if tok_info is not None else '-'
if coref != "-":
last_subtoken_index = first_subtoken_index + tok_info[-1] - 1
for part in coref.split("|"):
if part[0] == "(":
if part[-1] == ")":
cluster_id = int(part[1:-1])
self.clusters[cluster_id].append((first_subtoken_index, last_subtoken_index))
else:
cluster_id = int(part[1:])
self.coref_stacks[cluster_id].append(first_subtoken_index)
else:
cluster_id = int(part[:-1])
start = self.coref_stacks[cluster_id].pop()
self.clusters[cluster_id].append((start, last_subtoken_index))
# merge clusters
merged_clusters = []
for c1 in self.clusters.values():
existing = None
for m in c1:
for c2 in merged_clusters:
if m in c2:
existing = c2
break
if existing is not None:
break
if existing is not None:
print("Merging clusters (shouldn't happen very often.)")
existing.update(c1)
else:
merged_clusters.append(set(c1))
merged_clusters = [list(c) for c in merged_clusters]
all_mentions = util.flatten(merged_clusters)
sentence_map = get_sentence_map(self.segments, self.sentence_end)
subtoken_map = util.flatten(self.segment_subtoken_map)
assert len(all_mentions) == len(set(all_mentions))
num_words = len(util.flatten(self.segments))
assert num_words == len(util.flatten(self.speakers))
assert num_words == len(subtoken_map), (num_words, len(subtoken_map))
assert num_words == len(sentence_map), (num_words, len(sentence_map))
return {
"doc_key": self.doc_key,
"sentences": self.segments,
"speakers": self.speakers,
"constituents": [],
"ner": [],
"clusters": merged_clusters,
'sentence_map':sentence_map,
"subtoken_map": subtoken_map,
'pronouns': self.pronouns
}
def normalize_word(word, language):
if language == "arabic":
word = word[:word.find("#")]
if word == "/." or word == "/?":
return word[1:]
else:
return word
# first try to satisfy constraints1, and if not possible, constraints2.
def split_into_segments(document_state, max_segment_len, constraints1, constraints2):
current = 0
previous_token = 0
while current < len(document_state.subtokens):
end = min(current + max_segment_len - 1 - 2, len(document_state.subtokens) - 1)
while end >= current and not constraints1[end]:
end -= 1
if end < current:
end = min(current + max_segment_len - 1 - 2, len(document_state.subtokens) - 1)
while end >= current and not constraints2[end]:
end -= 1
if end < current:
raise Exception("Can't find valid segment")
document_state.segments.append(['[CLS]'] + document_state.subtokens[current:end + 1] + ['[SEP]'])
subtoken_map = document_state.subtoken_map[current : end + 1]
document_state.segment_subtoken_map.append([previous_token] + subtoken_map + [subtoken_map[-1]])
info = document_state.info[current : end + 1]
document_state.segment_info.append([None] + info + [None])
current = end + 1
previous_token = subtoken_map[-1]
def get_sentence_map(segments, sentence_end):
current = 0
sent_map = []
sent_end_idx = 0
assert len(sentence_end) == sum([len(s) -2 for s in segments])
for segment in segments:
sent_map.append(current)
for i in range(len(segment) - 2):
sent_map.append(current)
current += int(sentence_end[sent_end_idx])
sent_end_idx += 1
sent_map.append(current)
return sent_map
def get_document(document_lines, tokenizer, language, segment_len):
document_state = DocumentState(document_lines[0])
word_idx = -1
for line in document_lines[1]:
row = line.split()
sentence_end = len(row) == 0
if not sentence_end:
assert len(row) >= 12
word_idx += 1
word = normalize_word(row[3], language)
subtokens = tokenizer.tokenize(word)
document_state.tokens.append(word)
document_state.token_end += ([False] * (len(subtokens) - 1)) + [True]
for sidx, subtoken in enumerate(subtokens):
document_state.subtokens.append(subtoken)
info = None if sidx != 0 else (row + [len(subtokens)])
document_state.info.append(info)
document_state.sentence_end.append(False)
document_state.subtoken_map.append(word_idx)
else:
document_state.sentence_end[-1] = True
# split_into_segments(document_state, segment_len, document_state.token_end)
# split_into_segments(document_state, segment_len, document_state.sentence_end)
constraints1 = document_state.sentence_end if language != 'arabic' else document_state.token_end
split_into_segments(document_state, segment_len, constraints1, document_state.token_end)
stats["max_sent_len_{}".format(language)] = max(max([len(s) for s in document_state.segments]), stats["max_sent_len_{}".format(language)])
document = document_state.finalize()
return document
def skip(doc_key):
# if doc_key in ['nw/xinhua/00/chtb_0078_0', 'wb/eng/00/eng_0004_1']: #, 'nw/xinhua/01/chtb_0194_0', 'nw/xinhua/01/chtb_0157_0']:
# return True
return False
def minimize_partition(name, language, extension, labels, stats, tokenizer, seg_len, input_dir, output_dir):
input_path = "{}/{}.{}.{}".format(input_dir, name, language, extension)
output_path = "{}/{}.{}.{}.jsonlines".format(output_dir, name, language, seg_len)
count = 0
print("Minimizing {}".format(input_path))
documents = []
with open(input_path, "r") as input_file:
for line in input_file.readlines():
begin_document_match = re.match(conll.BEGIN_DOCUMENT_REGEX, line)
if begin_document_match:
doc_key = conll.get_doc_key(begin_document_match.group(1), begin_document_match.group(2))
documents.append((doc_key, []))
elif line.startswith("#end document"):
continue
else:
documents[-1][1].append(line)
with open(output_path, "w") as output_file:
for document_lines in documents:
if skip(document_lines[0]):
continue
document = get_document(document_lines, tokenizer, language, seg_len)
output_file.write(json.dumps(document))
output_file.write("\n")
count += 1
print("Wrote {} documents to {}".format(count, output_path))
def minimize_language(language, labels, stats, vocab_file, seg_len, input_dir, output_dir, do_lower_case):
# do_lower_case = True if 'chinese' in vocab_file else False
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
minimize_partition("dev", language, "v4_gold_conll", labels, stats, tokenizer, seg_len, input_dir, output_dir)
minimize_partition("train", language, "v4_gold_conll", labels, stats, tokenizer, seg_len, input_dir, output_dir)
minimize_partition("test", language, "v4_gold_conll", labels, stats, tokenizer, seg_len, input_dir, output_dir)
if __name__ == "__main__":
vocab_file = sys.argv[1]
input_dir = sys.argv[2]
output_dir = sys.argv[3]
do_lower_case = sys.argv[4].lower() == 'true'
print(do_lower_case)
labels = collections.defaultdict(set)
stats = collections.defaultdict(int)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# for seg_len in [128, 256, 384, 512, 1024]:
for seg_len in [768,]:
minimize_language("english", labels, stats, vocab_file, seg_len, input_dir, output_dir, do_lower_case)
# minimize_language("chinese", labels, stats, vocab_file, seg_len)
# minimize_language("es", labels, stats, vocab_file, seg_len)
# minimize_language("arabic", labels, stats, vocab_file, seg_len)
for k, v in labels.items():
print("{} = [{}]".format(k, ", ".join("\"{}\"".format(label) for label in v)))
for k, v in stats.items():
print("{} = {}".format(k, v))
|
py | b40f33b7f1a6cc3a8c74a992d57a881805f43522 | """ Python 3 compatibility tools. """
from __future__ import division, print_function
import itertools
import sys
import os
from io import BytesIO, IOBase
if sys.version_info[0] < 3:
input = raw_input
range = xrange
filter = itertools.ifilter
map = itertools.imap
zip = itertools.izip
def is_it_local():
script_dir = str(os.getcwd()).split('/')
username = "dipta007"
return username in script_dir
def READ(fileName):
if is_it_local():
sys.stdin = open(f'./{fileName}', 'r')
# region fastio
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
if not is_it_local():
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
def input1(type=int):
return type(input())
def input2(type=int):
[a, b] = list(map(type, input().split()))
return a, b
def input3(type=int):
[a, b, c] = list(map(type, input().split()))
return a, b, c
def input_array(type=int):
return list(map(type, input().split()))
def input_string():
s = input()
return list(s)
##############################################################
class Node2:
def __init__(self):
self.val = 0
self.prop = 0
class SegTree:
def __init__(self):
self._MX = 100004
self._tree = [Node2() for i in range(self._MX * 3)]
def merge(self, n1, n2):
res = Node2()
res.val = n1.val + n2.val
res.prop = 0
return res
def init(self, node, b, e):
if b == e:
self._tree[node].val = 0
self._tree[node].prop = 0
return
left = node * 2
right = left + 1
mid = (b + e) // 2
self.init(left, b, mid)
self.init(right, mid+1, e)
self._tree[node] = self.merge(self._tree[left], self._tree[right])
def propagate(self, node, b, e):
if b == e:
self._tree[node].prop = 0
return
left = node << 1
right = left + 1
mid = (b+e) >> 1
self._tree[left].prop += self._tree[node].prop
self._tree[right].prop += self._tree[node].prop
self._tree[node].prop = 0
# Update tree[left].val
left_range = mid - b + 1
self._tree[left].val += self._tree[left].prop * left_range
# Update tree[right].val
right_range = e - mid
self._tree[right].val += self._tree[right].prop * left_range
def query(self, node, b, e, i, j):
if i > e or j < b:
return 0
if self._tree[node].prop:
self.propagate(node, b, e)
if b >= i and e <= j:
return self._tree[node].val
left = node << 1
right = left + 1
mid = (b+e) >> 1
p1 = self.query(left, b, mid, i, j)
p2 = self.query(right, mid+1, e, i, j)
return p1 + p2
def update(self, node, b, e, i, j, new_val):
if self._tree[node].prop:
self.propagate(node, b, e)
if i > e or j < b:
return 0
if b >= i and e <= j:
curr_range = e - b + 1
self._tree[node].val += (new_val * curr_range)
self._tree[node].prop += new_val
return
left = node << 1
right = left + 1
mid = (b+e) >> 1
self.update(left, b, mid, i, j, new_val)
self.update(right, mid+1, e, i, j, new_val)
self._tree[node] = self.merge(self._tree[left], self._tree[right])
def main():
pass
if __name__ == '__main__':
READ('in.txt')
main()
https://codeforces.com/blog/entry/22616?locale=en |
py | b40f35507f4ef15c35fe46bb7cfad2f0d45d27bf | class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
result = []
for i in range(1 << len(nums)):
path = []
for j in range(len(nums)):
if i & (1 << j):
path.append(nums[j])
result.append(path)
return result
|
py | b40f359be52995e15f2274cb32cd28f1d23fba86 | class PostcodeValidationError(Exception):
pass
class InvalidPostcodeFormatValidationError(PostcodeValidationError):
"""Given postcode format is invalid"""
pass
class InvalidSingleDigitDistrictValidationError(PostcodeValidationError):
"""Given postcode area requires single district digit"""
pass
class InvalidDoubleDigitDistrictValidationError(PostcodeValidationError):
"""Given postcode area requires double district digit"""
pass
class InvalidZeroDigitForDistrictAreaValidationError(PostcodeValidationError):
"""Given postcode area does not allows 0 as district code digit"""
pass
class InvalidTenDigitForDistrictAreaValidationError(PostcodeValidationError):
"""Given postcode area does not allows 10 as district code digit"""
pass
class InvalidFirstPositionLetterValidationError(PostcodeValidationError):
"""Given postcode first position letter is invalid"""
pass
class InvalidSecondPositionLetterValidationError(PostcodeValidationError):
"""Given postcode second position letter is invalid"""
pass
class InvalidThirdPositionLetterValidationError(PostcodeValidationError):
"""Given postcode third position letter is invalid"""
pass
class InvalidFourthPositionLetterValidationError(PostcodeValidationError):
"""Given postcode fourth position letter is invalid"""
pass
class InvalidFinalTwoLettersError(PostcodeValidationError):
"""Given postcode two final letters are invalid"""
pass
class InvalidCentralLondonSingleDigitDistrictValidationError(PostcodeValidationError):
"""Given Central London single digit district is invalid"""
pass
|
py | b40f36e892d37ea06a47c36cc02cfd20955a8934 | from __future__ import division
import numpy as np
from tigre.utilities.Ax import Ax
from tigre.utilities.Atb import Atb
from tigre.utilities.order_subsets import order_subsets
from tigre.utilities.init_multigrid import init_multigrid
from tigre.utilities.Measure_Quality import Measure_Quality as MQ
from tigre.utilities.im3Dnorm import im3DNORM
from tigre.algorithms.single_pass_algorithms import FDK
from _minTV import minTV
from _AwminTV import AwminTV
import time
import copy
"""
This module is where the umbrella class IterativeReconAlg is located
which is the umbrella class to all the other algorithms apart from
the single pass type algorithms.
"""
# coding: utf8
if hasattr(time, 'perf_counter'):
default_timer = time.perf_counter
else:
default_timer = time.clock
class IterativeReconAlg(object):
"""
Parameters
----------
:param proj: (np.ndarray, dtype=np.float32)
Input data, shape = (geo.nDector, nangles)
:param geo: (tigre.geometry)
Geometry of detector and image (see examples/Demo code)
:param angles: (np.ndarray , dtype=np.float32)
angles of projection, shape = (nangles,3)
:param niter: (int)
number of iterations for reconstruction algorithm
:param kwargs: (dict)
optional parameters
Keyword Arguments
-----------------
:keyword blocksize: (int)
number of angles to be included in each iteration
of proj and backproj for OS_SART
:keyword lmbda: (np.float64)
Sets the value of the hyperparameter.
:keyword lmbda_red: (np.float64)
Reduction of lambda every iteration
lambda=lambdared*lambda. Default is 0.99
:keyword init: (str)
Describes different initialization techniques.
None : Initializes the image to zeros (default)
"FDK" : intializes image to FDK reconstrucition
:keyword verbose: (Boolean)
Feedback print statements for algorithm progress
default=True
:keyword OrderStrategy : (str)
Chooses the subset ordering strategy. Options are:
"ordered" : uses them in the input order, but
divided
"random" : orders them randomply
:keyword tviter: (int)
For algorithms that make use of a tvdenoising step in their
iterations. This includes:
OS_SART_TV
ASD_POCS
AWASD_POCS
FISTA
:keyword tvlambda: (float)
For algorithms that make use of a tvdenoising step in their
iterations.
OS_SART_TV
FISTA
Usage
--------
>>> import numpy as np
>>> import tigre
>>> import tigre.algorithms as algs
>>> from tigre.demos.Test_data import data_loader
>>> geo = tigre.geometry(mode='cone',default_geo=True,
>>> nVoxel=np.array([64,64,64]))
>>> angles = np.linspace(0,2*np.pi,100)
>>> src_img = data_loader.load_head_phantom(geo.nVoxel)
>>> proj = tigre.Ax(src_img,geo,angles)
>>> output = algs.iterativereconalg(proj,geo,angles,niter=50
>>> blocksize=20)
tigre.demos.run() to launch ipython notebook file with examples.
--------------------------------------------------------------------
This file is part of the TIGRE Toolbox
Copyright (c) 2015, University of Bath and
CERN-European Organization for Nuclear Research
All rights reserved.
License: Open Source under BSD.
See the full license at
https://github.com/CERN/TIGRE/license.txt
Contact: [email protected]
Codes: https://github.com/CERN/TIGRE/
--------------------------------------------------------------------
Coded by: MATLAB (original code): Ander Biguri
PYTHON : Reuben Lindroos
"""
def __init__(self, proj, geo, angles, niter, **kwargs):
self.proj = proj
self.angles = angles
self.geo = geo
self.niter = niter
options = dict(blocksize=20, lmbda=1, lmbda_red=0.99,
OrderStrategy=None, Quameasopts=None,
init=None, verbose=True, noneg=True,
computel2=False, dataminimizing='art_data_minimizing',
name='Iterative Reconstruction', sup_kw_warning=False)
allowed_keywords = [
'V',
'W',
'log_parameters',
'angleblocks',
'angle_index',
'delta',
'regularisation',
'tviter',
'tvlambda',
'hyper']
self.__dict__.update(options)
self.__dict__.update(**kwargs)
for kw in kwargs.keys():
if kw not in options and (kw not in allowed_keywords):
if self.verbose:
if not kwargs.get('sup_kw_warning'):
# Note: might not want this warning (typo checking).
print(
"Warning: " +
kw +
" not recognised as default parameter for instance of IterativeReconAlg.")
if self.angles.ndim == 1:
a1 = self.angles
a2 = np.zeros(self.angles.shape[0], dtype=np.float32)
setattr(self, 'angles', np.vstack((a1, a2, a2)).T)
if not all([hasattr(self, 'angleindex'),
hasattr(self, 'angleblocks')]):
self.set_angle_index()
if not hasattr(self, 'W'):
self.set_w()
if not hasattr(self, 'V'):
self.set_v()
if not hasattr(self, 'res'):
self.set_res()
setattr(self, 'lq', []) # quameasoptslist
setattr(self, 'l2l', []) # l2list
def set_w(self):
"""
Calculates value of W if this is not given.
:return: None
"""
geox = copy.deepcopy(self.geo)
geox.sVoxel[0:] = self.geo.DSD - self.geo.DSO
geox.sVoxel[2] = max(geox.sDetector[1], geox.sVoxel[2])
geox.nVoxel = np.array([2, 2, 2])
geox.dVoxel = geox.sVoxel / geox.nVoxel
W = Ax(
np.ones(
geox.nVoxel,
dtype=np.float32),
geox,
self.angles,
"ray-voxel")
W[W <= min(self.geo.dVoxel / 4)] = np.inf
W = 1. / W
setattr(self, 'W', W)
def set_v(self):
"""
Computes value of V parameter if this is not given.
:return: None
"""
geo = self.geo
if geo.mode != 'parallel':
if len(geo.offOrigin.shape)==1:
offY=geo.offOrigin[1]
offZ=geo.offOrigin[2]
else:
offY=geo.offOrigin[1,0]
offZ=geo.offOrigin[2,0]
start = geo.sVoxel[1] / 2 - geo.dVoxel[1] / 2 + offY
stop = -geo.sVoxel[1] / 2 + geo.dVoxel[1] / 2 + offY
step = -geo.dVoxel[1]
xv = np.arange(start, stop + step, step)
start = geo.sVoxel[2] / 2 - geo.dVoxel[2] / 2 + offZ
stop = -geo.sVoxel[2] / 2 + geo.dVoxel[2] / 2 + offZ
step = -geo.dVoxel[2]
yv = -1 * np.arange(start, stop + step, step)
(yy, xx) = np.meshgrid(yv, xv)
A = (self.angles[:, 0] + np.pi / 2)
V = np.empty((self.angles.shape[0], geo.nVoxel[1], geo.nVoxel[2]))
for i in range(self.angles.shape[0]):
if hasattr(geo.DSO, 'shape') and len(geo.DSO.shape) >= 1:
DSO = geo.DSO[i]
else:
DSO = geo.DSO
V[i] = (DSO / (DSO + (yy * np.sin(-A[i])) -
(xx * np.cos(-A[i])))) ** 2
else:
V = np.ones((self.angles.shape[0]), dtype=np.float32)
if self.blocksize > 1:
v_list = [np.sum(V[self.angle_index[i]], axis=0)
for i in range(len(self.angleblocks))]
V = np.stack(v_list, 0)
V = np.array(V, dtype=np.float32)
setattr(self, 'V', V)
def set_res(self):
"""
Calulates initial value for res if this is not given.
:return: None
"""
setattr(self, 'res', np.zeros(self.geo.nVoxel, dtype=np.float32))
init = self.init
verbose = self.verbose
if init == 'multigrid':
if verbose:
print('init multigrid in progress...')
print('default blocksize=1 for init_multigrid(OS_SART)')
self.res = init_multigrid(
self.proj, self.geo, self.angles, alg='SART')
if verbose:
print('init multigrid complete.')
if init == 'FDK':
self.res = FDK(self.proj, self.geo, self.angles)
if isinstance(init, np.ndarray):
if (self.geo.nVoxel == init.shape).all():
self.res = init
else:
raise ValueError('wrong dimension of array for initialisation')
def set_angle_index(self):
"""
sets angle_index and angleblock if this is not given.
:return: None
"""
angleblocks, angle_index = order_subsets(
self.angles, self.blocksize, self.OrderStrategy)
setattr(self, 'angleblocks', angleblocks)
setattr(self, 'angle_index', angle_index)
def run_main_iter(self):
"""
Goes through the main iteration for the given configuration.
:return: None
"""
Quameasopts = self.Quameasopts
for i in range(self.niter):
res_prev = None
if Quameasopts is not None:
res_prev = copy.deepcopy(self.res)
if self.verbose:
if i == 0:
print(str(self.name).upper() +
' ' + "algorithm in progress.")
toc = default_timer()
if i == 1:
tic = default_timer()
print('Esitmated time until completetion (s): ' +
str((self.niter - 1) * (tic - toc)))
getattr(self, self.dataminimizing)()
self.error_measurement(res_prev, i)
def art_data_minimizing(self):
geo = copy.deepcopy(self.geo)
for j in range(len(self.angleblocks)):
if self.blocksize == 1:
angle = np.array([self.angleblocks[j]], dtype=np.float32)
else:
angle = self.angleblocks[j]
if geo.offOrigin.shape[0] == self.angles.shape[0]:
geo.offOrigin = self.geo.offOrigin[j]
if geo.offDetector.shape[0] == self.angles.shape[0]:
geo.offOrin = self.geo.offDetector[j]
if geo.rotDetector.shape[0] == self.angles.shape[0]:
geo.rotDetector = self.geo.rotDetector[j]
if hasattr(geo.DSD, 'shape') and len((geo.DSD.shape)):
if geo.DSD.shape[0] == self.angles.shape[0]:
geo.DSD = self.geo.DSD[j]
if hasattr(geo.DSO, 'shape') and len((geo.DSD.shape)):
if geo.DSO.shape[0] == self.angles.shape[0]:
geo.DSO = self.geo.DSO[j]
self.update_image(geo, angle, j)
if self.noneg:
self.res = self.res.clip(min=0)
def minimizeTV(self, res_prev, dtvg):
return minTV(res_prev, dtvg, self.numiter_tv)
def minimizeAwTV(self, res_prev, dtvg):
return AwminTV(res_prev, dtvg, self.numiter_tv, self.delta)
def error_measurement(self, res_prev, iter):
if self.Quameasopts is not None and iter > 0:
self.lq.append(MQ(self.res, res_prev, self.Quameasopts))
if self.computel2:
# compute l2 borm for b-Ax
errornow = im3DNORM(
self.proj - Ax(self.res, self.geo, self.angles, 'ray-voxel'), 2)
self.l2l.append(errornow)
def update_image(self, geo, angle, iteration):
"""
VERBOSE:
for j in range(angleblocks):
angle = np.array([alpha[j]], dtype=np.float32)
proj_err = proj[angle_index[j]] - Ax(res, geo, angle, 'ray-voxel')
weighted_err = W[angle_index[j]] * proj_err
backprj = Atb(weighted_err, geo, angle, 'FDK')
weighted_backprj = 1 / V[angle_index[j]] * backprj
res += weighted_backprj
res[res<0]=0
:return: None
"""
self.res += self.lmbda * 1. / self.V[iteration] * Atb(self.W[self.angle_index[iteration]] * (
self.proj[self.angle_index[iteration]] - Ax(self.res, geo, angle, 'interpolated')), geo, angle, 'FDK')
def getres(self):
return self.res
def geterrors(self):
return self.l2l, self.lq
def __str__(self):
parameters = []
for item in self.__dict__:
if item == 'geo':
pass
elif hasattr(self.__dict__.get(item), 'shape'):
if self.__dict__.get(item).ravel().shape[0] > 100:
parameters.append(item + ' shape: ' +
str(self.__dict__.get(item).shape))
else:
parameters.append(item + ': ' + str(self.__dict__.get(item)))
return '\n'.join(parameters)
def decorator(IterativeReconAlg, name=None, docstring=None):
"""
Calls run_main_iter when parameters are given to it.
:param IterativeReconAlg: obj, class
instance of IterativeReconAlg
:param name: str
for name of func
:param docstring: str
other documentation that may need to be included from external source.
:return: func
Examples
--------
>>> import tigre
>>> from tigre.demos.Test_data.data_loader import load_head_phantom
>>> geo = tigre.geometry_defaut(high_quality=False)
>>> src = load_head_phantom(number_of_voxels=geo.nVoxel)
>>> proj = Ax(src,geo,angles)
>>> angles = np.linspace(0,2*np.pi,100)
>>> iterativereconalg = decorator(IterativeReconAlg)
>>> output = iterativereconalg(proj,geo,angles, niter=50)
"""
def iterativereconalg(proj, geo, angles, niter, **kwargs):
alg = IterativeReconAlg(proj, geo, angles, niter, **kwargs)
if name is not None:
alg.name = name
alg.run_main_iter()
if alg.computel2:
return alg.getres(), alg.geterrors()
else:
return alg.getres()
if docstring is not None:
setattr(
iterativereconalg,
'__doc__',
docstring +
IterativeReconAlg.__doc__)
else:
setattr(iterativereconalg, '__doc__', IterativeReconAlg.__doc__)
if name is not None:
setattr(iterativereconalg, '__name__', name)
return iterativereconalg
iterativereconalg = decorator(IterativeReconAlg)
|
py | b40f377229eaf9b4bf8c846e0177338f990cedf0 | from fastai.conv_learner import *
from fastai.dataset import *
from tensorboard_cb_old import *
import pandas as pd
import numpy as np
import os
from sklearn.metrics import f1_score
import scipy.optimize as opt
from random import shuffle
import pickle as pkl
import warnings
warnings.filterwarnings("ignore")
# =======================================================================================================================
# Something
# =======================================================================================================================
fold = 0 # specify
archt = 'res18' # specify for output folder creation
SUB_DIR = 'subs/' + archt
if not os.path.exists(SUB_DIR):
os.makedirs(SUB_DIR)
#=======================================================================================================================
PATH = './'
TRAIN = '../input/train/' #train images
TEST = '../input/test/' # test images
LABELS_org = '../input/train.csv'
LABELS_ext = '../input/HPAv18RBGY_wodpl.csv'
LABELS_all = '../input/train_org_ext.csv'
SAMPLE = '../input/sample_submission.csv'
if fold == 0:
train_fld = '../input/train_fld0.csv'
valid_fld = '../input/val_fld0.csv'
elif fold == 1:
train_fld = '../input/train_fld1.csv'
valid_fld = '../input/val_fld1.csv'
elif fold == 2:
train_fld = '../input/train_fld2.csv'
valid_fld = '../input/val_fld2.csv'
elif fold == 3:
train_fld = '../input/train_fld3.csv'
valid_fld = '../input/val_fld3.csv'
else:
print('please specify fold..')
#=======================================================================================================================
name_label_dict = {
0: 'Nucleoplasm',
1: 'Nuclear membrane',
2: 'Nucleoli',
3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles',
5: 'Nuclear bodies',
6: 'Endoplasmic reticulum',
7: 'Golgi apparatus',
8: 'Peroxisomes',
9: 'Endosomes',
10: 'Lysosomes',
11: 'Intermediate filaments',
12: 'Actin filaments',
13: 'Focal adhesion sites',
14: 'Microtubules',
15: 'Microtubule ends',
16: 'Cytokinetic bridge',
17: 'Mitotic spindle',
18: 'Microtubule organizing center',
19: 'Centrosome',
20: 'Lipid droplets',
21: 'Plasma membrane',
22: 'Cell junctions',
23: 'Mitochondria',
24: 'Aggresome',
25: 'Cytosol',
26: 'Cytoplasmic bodies',
27: 'Rods & rings'}
nw = 6 # number of workers for data loader
arch = resnet18 # specify target architecture
# =======================================================================================================================
# Data
# =======================================================================================================================
df_train_fld = pd.read_csv(train_fld)
df_val_fld = pd.read_csv(valid_fld)
df_train_ext = pd.read_csv(LABELS_ext)
# remove broken images
df_train_fld = df_train_fld[(df_train_fld.Id != 'dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0') &
(df_train_fld.Id != 'c861eb54-bb9f-11e8-b2b9-ac1f6b6435d0') &
(df_train_fld.Id != '7a88f200-bbc3-11e8-b2bc-ac1f6b6435d0')
]
df_val_fld = df_val_fld[(df_val_fld.Id != 'dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0') &
(df_val_fld.Id != 'c861eb54-bb9f-11e8-b2b9-ac1f6b6435d0') &
(df_val_fld.Id != '7a88f200-bbc3-11e8-b2bc-ac1f6b6435d0')
]
df_train_ext = df_train_ext[df_train_ext.Id != '27751_219_G10_1']
print(df_train_fld.shape[0], 'training masks')
print(df_val_fld.shape[0], 'validation masks')
print(df_train_ext.shape[0], 'external masks')
# loading original data twice to balance external data
tr_n = df_train_fld['Id'].values.tolist() + df_train_ext['Id'].values.tolist() + df_train_fld['Id'].values.tolist()
shuffle(tr_n)
val_n = df_val_fld['Id'].values.tolist()
tr_n = tr_n[:-2] # pytorch has problems if last batch has one sample
test_names = list({f[:36] for f in os.listdir(TEST)})
#=======================================================================================================================
def open_rgby(path, id): # a function that reads RGBY image
# print(id)
colors = ['red','green','blue','yellow']
#colors = ['red', 'green', 'blue']
flags = cv2.IMREAD_GRAYSCALE
try:
img = [cv2.imread(os.path.join(path, id + '_' + color + '.png'), flags).astype(np.float32) / 255
for color in colors]
img = np.stack(img, axis=-1)
return img
except:
print('img broken:', id)
class pdFilesDataset(FilesDataset):
def __init__(self, fnames, path, transform):
self.labels = pd.read_csv(LABELS_all).set_index('Id')
self.labels['Target'] = [[int(i) for i in s.split()] for s in self.labels['Target']]
super().__init__(fnames, transform, path)
def get_x(self, i):
img = open_rgby(self.path, self.fnames[i])
if self.sz == 512:
return img
else:
return cv2.resize(img, (self.sz, self.sz), cv2.INTER_AREA)
def get_y(self, i):
if (self.path == TEST):
return np.zeros(len(name_label_dict), dtype=np.int)
else:
labels = self.labels.loc[self.fnames[i]]['Target']
return np.eye(len(name_label_dict), dtype=np.float)[labels].sum(axis=0)
@property
def is_multi(self):
return True
@property
def is_reg(self):
return True
# this flag is set to remove the output sigmoid that allows log(sigmoid) optimization
# of the numerical stability of the loss function
def get_c(self):
return len(name_label_dict) # number of classes
def get_data(sz, bs):
# data augmentation
aug_tfms = [RandomRotate(30, tfm_y=TfmType.NO),
RandomDihedral(tfm_y=TfmType.NO),
RandomLighting(0.05, 0.05, tfm_y=TfmType.NO),
# RandomCrop(480, tfm_y=TfmType.NO),
# RandomRotateZoom(deg=30, zoom=1, stretch=0.5)
]
# mean and std in of each channel in the train set
# Original set: [0.0869 0.0599 0.06534 0.08923] [0.13047 0.09831 0.14875 0.1333 ]
# Extra data:[0.04445 0.0499 0.01867 0.10086] [0.05364 0.07222 0.02486 0.13921]
# combined: [0.05697 0.05282 0.03241 0.09748] [0.08622 0.08092 0.08611 0.13758]
stats = A([0.0869, 0.0599, 0.06534, 0.08923], [0.13047, 0.09831, 0.14875, 0.1333 ])
#stats = A([0.05697, 0.05282, 0.03241, 0.09748] , [0.08622, 0.08092, 0.08611, 0.13758])
#stats = A([0.05697, 0.05282, 0.03241], [0.08622, 0.08092, 0.08611])
tfms = tfms_from_stats(stats, sz, crop_type=CropType.NO, tfm_y=TfmType.NO,
aug_tfms=aug_tfms)
ds = ImageData.get_ds(pdFilesDataset, (tr_n[:-(len(tr_n) % bs)], TRAIN),
(val_n, TRAIN), tfms, test=(test_names, TEST))
md = ImageData(PATH, ds, bs, num_workers=nw, classes=None)
return md
# ======================================================================================================================
# Display
# =======================================================================================================================
# bs = 16
# sz = 256
# md = get_data(sz,bs)
#
# x,y = next(iter(md.trn_dl))
# print(x.shape, y.shape)
#
# def display_imgs(x):
# columns = 4
# bs = x.shape[0]
# rows = min((bs + 3) // 4, 4)
# fig = plt.figure(figsize=(columns * 4, rows * 4))
# for i in range(rows):
# for j in range(columns):
# idx = i + j * columns
# fig.add_subplot(rows, columns, idx + 1)
# plt.axis('off')
# plt.imshow((x[idx, :, :, :3] * 255).astype(np.int))
# plt.show()
#
#
# display_imgs(np.asarray(md.trn_ds.denorm(x)))
# =======================================================================================================================
# Stats
# =======================================================================================================================
#
# x_tot = np.zeros(4)
# x2_tot = np.zeros(4)
# for x,y in iter(md.trn_dl):
# tmp = md.trn_ds.denorm(x).reshape(16,-1)
# x = md.trn_ds.denorm(x).reshape(-1,4)
# x_tot += x.mean(axis=0)
# x2_tot += (x**2).mean(axis=0)
#
# channel_avr = x_tot/len(md.trn_dl)
# channel_std = np.sqrt(x2_tot/len(md.trn_dl) - channel_avr**2)
# print(channel_avr,channel_std)
# =======================================================================================================================
# Functions and metrics
# =======================================================================================================================
class FocalLoss(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})"
.format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + \
((-max_val).exp() + (-input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.sum(dim=1).mean()
def fbeta_torch(y_true, y_pred, beta=1, threshold=0.5, eps=1e-9):
y_pred = (y_pred.float() > threshold).float()
y_true = y_true.float()
tp = (y_pred * y_true).sum(dim=1)
precision = tp / (y_pred.sum(dim=1) + eps)
recall = tp / (y_true.sum(dim=1) + eps)
return torch.mean(
precision * recall / (precision * (beta ** 2) + recall + eps) * (1 + beta ** 2))
def acc(preds, targs, th=0.5):
preds = (preds > th).int()
targs = targs.int()
return (preds == targs).float().mean()
def recall(preds, targs, thresh=0.5):
pred_pos = preds > thresh
tpos = torch.mul((targs.byte() == pred_pos), targs.byte())
tp = tpos.sum().item()
tr = targs.sum().item()
return float(tp + 0.000001) / float(tr + 0.000001)
def precision(preds, targs, thresh=0.5):
pred_pos = preds > thresh
tpos = torch.mul((targs.byte() == pred_pos), targs.byte())
tp = tpos.sum().item()
pp = pred_pos.sum().item()
return float(tp + 0.000001) / float(pp + 0.000001)
def fbeta(preds, targs, beta, thresh=0.5):
"""Calculates the F-beta score (the weighted harmonic mean of precision and recall).
This is the micro averaged version where the true positives, false negatives and
false positives are calculated globally (as opposed to on a per label basis).
beta == 1 places equal weight on precision and recall, b < 1 emphasizes precision and
beta > 1 favors recall.
"""
assert beta > 0, 'beta needs to be greater than 0'
beta2 = beta ** 2
rec = recall(preds, targs, thresh)
prec = precision(preds, targs, thresh)
return float((1 + beta2) * prec * rec) / float(beta2 * prec + rec + 0.00000001)
def f1(preds, targs, thresh=0.5): return float(fbeta(preds, targs, 1, thresh))
# =======================================================================================================================
# Model building
# =======================================================================================================================
class ConvnetBuilder_custom():
def __init__(self, f, c, is_multi, is_reg, ps=None, xtra_fc=None, xtra_cut=0,
custom_head=None, pretrained=True):
self.f, self.c, self.is_multi, self.is_reg, self.xtra_cut = f, c, is_multi, is_reg, xtra_cut
if xtra_fc is None: xtra_fc = [512]
if ps is None: ps = [0.25] * len(xtra_fc) + [0.5]
self.ps, self.xtra_fc = ps, xtra_fc
if f in model_meta:
cut, self.lr_cut = model_meta[f]
else:
cut, self.lr_cut = 0, 0
cut -= xtra_cut
layers = cut_model(f(pretrained), cut)
# replace first convolutional layer by 4->64 while keeping corresponding weights
# and initializing new weights with zeros
w = layers[0].weight
layers[0] = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
layers[0].weight = torch.nn.Parameter(torch.cat((w, torch.zeros(64, 1, 7, 7)), dim=1))
self.nf = model_features[f] if f in model_features else (num_features(layers) * 2)
if not custom_head: layers += [AdaptiveConcatPool2d(), Flatten()]
self.top_model = nn.Sequential(*layers)
n_fc = len(self.xtra_fc) + 1
if not isinstance(self.ps, list): self.ps = [self.ps] * n_fc
if custom_head:
fc_layers = [custom_head]
else:
fc_layers = self.get_fc_layers()
self.n_fc = len(fc_layers)
self.fc_model = to_gpu(nn.Sequential(*fc_layers))
if not custom_head: apply_init(self.fc_model, kaiming_normal)
self.model = to_gpu(nn.Sequential(*(layers + fc_layers)))
@property
def name(self):
return f'{self.f.__name__}_{self.xtra_cut}'
def create_fc_layer(self, ni, nf, p, actn=None):
res = [nn.BatchNorm1d(num_features=ni)]
if p: res.append(nn.Dropout(p=p))
res.append(nn.Linear(in_features=ni, out_features=nf))
if actn: res.append(actn)
return res
def get_fc_layers(self):
res = []
ni = self.nf
for i, nf in enumerate(self.xtra_fc):
res += self.create_fc_layer(ni, nf, p=self.ps[i], actn=nn.ReLU())
ni = nf
final_actn = nn.Sigmoid() if self.is_multi else nn.LogSoftmax()
if self.is_reg: final_actn = None
res += self.create_fc_layer(ni, self.c, p=self.ps[-1], actn=final_actn)
return res
def get_layer_groups(self, do_fc=False):
if do_fc:
return [self.fc_model]
idxs = [self.lr_cut]
c = children(self.top_model)
if len(c) == 3: c = children(c[0]) + c[1:]
lgs = list(split_by_idxs(c, idxs))
return lgs + [self.fc_model]
class ConvLearner(Learner):
def __init__(self, data, models, precompute=False, **kwargs):
self.precompute = False
super().__init__(data, models, **kwargs)
if hasattr(data, 'is_multi') and not data.is_reg and self.metrics is None:
self.metrics = [accuracy_thresh(0.5)] if self.data.is_multi else [accuracy]
if precompute: self.save_fc1()
self.freeze()
self.precompute = precompute
def _get_crit(self, data):
if not hasattr(data, 'is_multi'): return super()._get_crit(data)
return F.l1_loss if data.is_reg else F.binary_cross_entropy if data.is_multi else F.nll_loss
@classmethod
def pretrained(cls, f, data, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, precompute=False,
pretrained=True, **kwargs):
models = ConvnetBuilder_custom(f, data.c, data.is_multi, data.is_reg,
ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut, custom_head=custom_head,
pretrained=pretrained)
return cls(data, models, precompute, **kwargs)
@classmethod
def lsuv_learner(cls, f, data, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, precompute=False,
needed_std=1.0, std_tol=0.1, max_attempts=10, do_orthonorm=False, **kwargs):
models = ConvnetBuilder(f, data.c, data.is_multi, data.is_reg,
ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut, custom_head=custom_head, pretrained=False)
convlearn = cls(data, models, precompute, **kwargs)
convlearn.lsuv_init()
return convlearn
@property
def model(self):
return self.models.fc_model if self.precompute else self.models.model
def half(self):
if self.fp16: return
self.fp16 = True
if type(self.model) != FP16: self.models.model = FP16(self.model)
if not isinstance(self.models.fc_model, FP16): self.models.fc_model = FP16(self.models.fc_model)
def float(self):
if not self.fp16: return
self.fp16 = False
if type(self.models.model) == FP16: self.models.model = self.model.module.float()
if type(self.models.fc_model) == FP16: self.models.fc_model = self.models.fc_model.module.float()
@property
def data(self):
return self.fc_data if self.precompute else self.data_
def create_empty_bcolz(self, n, name):
return bcolz.carray(np.zeros((0, n), np.float32), chunklen=1, mode='w', rootdir=name)
def set_data(self, data, precompute=False):
super().set_data(data)
if precompute:
self.unfreeze()
self.save_fc1()
self.freeze()
self.precompute = True
else:
self.freeze()
def get_layer_groups(self):
return self.models.get_layer_groups(self.precompute)
def summary(self):
precompute = self.precompute
self.precompute = False
res = super().summary()
self.precompute = precompute
return res
def get_activations(self, force=False):
tmpl = f'_{self.models.name}_{self.data.sz}.bc'
# TODO: Somehow check that directory names haven't changed (e.g. added test set)
names = [os.path.join(self.tmp_path, p + tmpl) for p in ('x_act', 'x_act_val', 'x_act_test')]
if os.path.exists(names[0]) and not force:
self.activations = [bcolz.open(p) for p in names]
else:
self.activations = [self.create_empty_bcolz(self.models.nf, n) for n in names]
def save_fc1(self):
self.get_activations()
act, val_act, test_act = self.activations
m = self.models.top_model
if len(self.activations[0]) != len(self.data.trn_ds):
predict_to_bcolz(m, self.data.fix_dl, act)
if len(self.activations[1]) != len(self.data.val_ds):
predict_to_bcolz(m, self.data.val_dl, val_act)
if self.data.test_dl and (len(self.activations[2]) != len(self.data.test_ds)):
if self.data.test_dl: predict_to_bcolz(m, self.data.test_dl, test_act)
self.fc_data = ImageClassifierData.from_arrays(self.data.path,
(act, self.data.trn_y), (val_act, self.data.val_y), self.data.bs,
classes=self.data.classes,
test=test_act if self.data.test_dl else None, num_workers=8)
def freeze(self):
self.freeze_to(-1)
def unfreeze(self):
self.freeze_to(0)
self.precompute = False
def predict_array(self, arr):
precompute = self.precompute
self.precompute = False
pred = super().predict_array(arr)
self.precompute = precompute
return pred
# =======================================================================================================================
# Training
# =======================================================================================================================
sz =512 # image size
bs = 8 # batch size
md = get_data(sz, bs)
learner = ConvLearner.pretrained(arch, md, ps=0.5)
learner.opt_fn = optim.Adam
learner.clip = 1.0
learner.crit = FocalLoss()
learner.metrics = [precision, recall, f1]
print(learner.summary)
tb_logger = TensorboardLogger(learner.model, md, "res18_512_fold{}".format(fold), metrics_names=["precision", 'recall', 'f1'])
lr = 2e-2
learner.fit(lr,1)
lrs = np.array([lr / 10, lr / 3, lr])
learner.unfreeze()
learner.fit(lrs / 4, 4, cycle_len=2, use_clr=(10, 20), cycle_save_name='res18_512_ext_4chn_5050_1', callbacks=[tb_logger])
learner.fit(lrs / 4, 2, cycle_len=4, use_clr=(10, 20), cycle_save_name='res18_512_ext_4chn_5050_2', callbacks=[tb_logger])
learner.fit(lrs / 16, 1, cycle_len=8, use_clr=(5, 20), cycle_save_name='res18_512_ext_4chn_5050_3', callbacks=[tb_logger])
learner.save('res18_512_fld{}'.format(fold))
#=======================================================================================================================
# predict on validation
#=======================================================================================================================
def sigmoid_np(x):
return 1.0 / (1.0 + np.exp(-x))
def F1_soft(preds, targs, th=0.5, d=50.0):
preds = sigmoid_np(d * (preds - th))
targs = targs.astype(np.float)
score = 2.0 * (preds * targs).sum(axis=0) / ((preds + targs).sum(axis=0) + 1e-6)
return score
def fit_val(x, y):
params = 0.5 * np.ones(len(name_label_dict))
wd = 1e-5
error = lambda p: np.concatenate((F1_soft(x, y, p) - 1.0,
wd * (p - 0.5)), axis=None)
p, success = opt.leastsq(error, params)
return p
print('valid predicting...')
preds, y = learner.TTA(n_aug=16)
preds = np.stack(preds, axis=-1)
preds = sigmoid_np(preds)
pred = preds.max(axis=-1)
# save valid predictions
val_names = np.array(learner.data.val_ds.fnames)
val_save_pkl = np.column_stack((val_names, pred))
val_fileName = SUB_DIR + '/' + 'oof_{}.pkl'.format(fold)
val_fileObject = open(val_fileName, 'wb')
pkl.dump(val_save_pkl, val_fileObject)
val_fileObject.close()
th = fit_val(pred, y)
th[th < 0.1] = 0.1
print('Thresholds: ', th)
print('F1 macro: ', f1_score(y, pred > th, average='macro'))
print('F1 macro (th = 0.5): ', f1_score(y, pred > 0.5, average='macro'))
print('F1 micro: ', f1_score(y, pred > th, average='micro'))
print('Fractions: ', (pred > th).mean(axis=0))
print('Fractions (true): ', (y > th).mean(axis=0))
# =======================================================================================================================
# predict on test
# =======================================================================================================================
print('test predicting...')
preds_t, y_t = learner.TTA(n_aug=16, is_test=True)
preds_t = np.stack(preds_t, axis=-1)
preds_t = sigmoid_np(preds_t)
pred_t = preds_t.max(axis=-1) # max works better for F1 macro score
names = np.array(learner.data.test_ds.fnames)
save_pkl = np.column_stack((names, pred_t))
fileName = SUB_DIR + '/' + 'preds_{}.pkl'.format(fold)
fileObject = open(fileName, 'wb')
pkl.dump(save_pkl, fileObject)
fileObject.close()
# with open(fileName, 'rb') as f:
# a = pickle.load(f)
print('TTA done')
#=======================================================================================================================
# produce submission files
#=======================================================================================================================
def save_pred(pred, th=0.5, fname='protein_classification.csv'):
pred_list = []
for line in pred:
s = ' '.join(list([str(i) for i in np.nonzero(line > th)[0]]))
pred_list.append(s)
sample_df = pd.read_csv(SAMPLE)
sample_list = list(sample_df.Id)
pred_dic = dict((key, value) for (key, value)
in zip(learner.data.test_ds.fnames, pred_list))
pred_list_cor = [pred_dic[id] for id in sample_list]
df = pd.DataFrame({'Id': sample_list, 'Predicted': pred_list_cor})
df.to_csv(SUB_DIR + '/' + fname, header=True, index=False)
# Manual thresholds
th_t = np.array([0.565, 0.39, 0.55, 0.345, 0.33, 0.39, 0.33, 0.45, 0.38, 0.39,
0.34, 0.42, 0.31, 0.38, 0.49, 0.50, 0.38, 0.43, 0.46, 0.40,
0.39, 0.505, 0.37, 0.47, 0.41, 0.545, 0.32, 0.1])
print('Fractions: ', (pred_t > th_t).mean(axis=0))
save_pred(pred_t, th_t, 'protein_classification_{}.csv'.format(fold)) # From manual threshold
# Automatic fitting the thresholds based on the public LB statistics.
lb_prob = [
0.362397820, 0.043841336, 0.075268817, 0.059322034, 0.075268817,
0.075268817, 0.043841336, 0.075268817, 0.010000000, 0.010000000,
0.010000000, 0.043841336, 0.043841336, 0.014198783, 0.043841336,
0.010000000, 0.028806584, 0.014198783, 0.028806584, 0.059322034,
0.010000000, 0.126126126, 0.028806584, 0.075268817, 0.010000000,
0.222493880, 0.028806584, 0.010000000]
# I replaced 0 by 0.01 since there may be a rounding error leading to 0
def Count_soft(preds, th=0.5, d=50.0):
preds = sigmoid_np(d * (preds - th))
return preds.mean(axis=0)
def fit_test(x, y):
params = 0.5 * np.ones(len(name_label_dict))
wd = 1e-5
error = lambda p: np.concatenate((Count_soft(x, p) - y,
wd * (p - 0.5)), axis=None)
p, success = opt.leastsq(error, params)
return p
th_t = fit_test(pred_t, lb_prob)
th_t[th_t < 0.1] = 0.1
print('Thresholds: ', th_t)
print('Fractions: ', (pred_t > th_t).mean(axis=0))
print('Fractions (th = 0.5): ', (pred_t > 0.5).mean(axis=0))
save_pred(pred_t, th_t, 'protein_classification_f_{}.csv'.format(fold)) # based on public lb stats
save_pred(pred_t, th, 'protein_classification_v_{}.csv'.format(fold)) # based on validation
save_pred(pred_t, 0.5, 'protein_classification_05_{}.csv'.format(fold)) # based on fixed threshold 0.5
# using the threshold from validation set for classes not present in the public LB:
class_list = [8, 9, 10, 15, 20, 24, 27]
for i in class_list:
th_t[i] = th[i]
save_pred(pred_t, th_t, 'protein_classification_c_{}.csv'.format(fold))
# fitting thresholds based on the frequency of classes in the train dataset:
labels = pd.read_csv(LABELS_org).set_index('Id')
label_count = np.zeros(len(name_label_dict))
for label in labels['Target']:
l = [int(i) for i in label.split()]
label_count += np.eye(len(name_label_dict))[l].sum(axis=0)
label_fraction = label_count.astype(np.float) / len(labels)
print(label_count, label_fraction)
th_t = fit_test(pred_t, label_fraction)
th_t[th_t < 0.05] = 0.05
print('Thresholds: ', th_t)
print('Fractions: ', (pred_t > th_t).mean(axis=0))
save_pred(pred_t, th_t, 'protein_classification_t_{}.csv'.format(fold)) # based on frquency of classes in train
#======================================================================================================================= |
py | b40f37a99a7b95697bc6d891f705d1477ef094c2 | import mock
import pytest
from task_processing.runners.promise import Promise
@pytest.fixture
def fake_executor():
return mock.Mock()
@pytest.fixture
def fake_runner(fake_executor):
return Promise(
executor=fake_executor,
futures_executor=mock.Mock(),
)
def test_reconcile(fake_runner, fake_executor):
fake_runner.reconcile(mock.Mock())
assert fake_executor.reconcile.call_count == 1
def test_kill(fake_runner, fake_executor):
result = fake_runner.kill('some_id')
assert result == fake_executor.kill.return_value
assert fake_executor.kill.call_count == 1
def test_stop(fake_runner, fake_executor):
fake_runner.stop()
assert fake_executor.stop.call_count == 1
|
py | b40f38be5598846b3618a77567d32f03f975f3cb | """
__author__ = "Xingjian Du"
Main
-Capture the config file
-Process the json config passed
-Create an agent instance
-Run the agent
"""
import sys
import os
try:
sys.path.append(sys.argv[2])
fn_path = sys.argv[3]
except:
sys.path.append(sys.argv[1])
fn_path = sys.argv[2]
from config_file import get_config
from agents import *
def main():
os.system("touch /tmp/debug")
config = get_config()
# Create the Agent and pass all the configuration to it then run it..
agent_class = globals()[config.agent.name]
agent = agent_class(config)
agent.test_one_file((fn_path, None))
note_fn_list = agent.data_source.test_loader.dataset.note_fn_list
audio_fn_list = agent.data_source.test_loader.dataset.audio_fn_list
for pair in zip(audio_fn_list, note_fn_list):
acc = agent.test_one_file(pair)
print(pair, acc)
'''
with open("log.csv", "a+") as f:
print(f"{pair[0]},{pair[1]},{acc}", file=f)
'''
if __name__ == '__main__':
main()
|
py | b40f395ea64564a97039dd3bddc0dda10cf43a7e | # Generated by Django 2.1.10 on 2019-07-29 00:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
py | b40f3b3ee8b21f812f7075578d78a4170c6d756a | # examples/increment_example.py
#from weave import ext_tools
# use the following so that development version is used.
import sys
sys.path.insert(0,'..')
import ext_tools
def build_increment_ext():
""" Build a simple extension with functions that increment numbers.
The extension will be built in the local directory.
"""
mod = ext_tools.ext_module('increment_ext')
a = 1 # effectively a type declaration for 'a' in the
# following functions.
ext_code = "return_val = PyInt_FromLong(a+1);"
func = ext_tools.ext_function('increment',ext_code,['a'])
mod.add_function(func)
ext_code = "return_val = PyInt_FromLong(a+2);"
func = ext_tools.ext_function('increment_by_2',ext_code,['a'])
mod.add_function(func)
mod.compile()
if __name__ == "__main__":
try:
import increment_ext
except ImportError:
build_increment_ext()
import increment_ext
a = 1
print 'a, a+1:', a, increment_ext.increment(a)
print 'a, a+2:', a, increment_ext.increment_by_2(a)
|
py | b40f3b7aa67d41d368f7e411ad7a9337e30b13de | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation for the Special Resolution filing."""
from http import HTTPStatus
from typing import Dict
from flask_babel import _
from legal_api.errors import Error
from legal_api.models import Business
from ..utils import get_str
def validate(business: Business, con: Dict) -> Error:
"""Validate the Special Resolution filing."""
if not business or not con:
return Error(HTTPStatus.BAD_REQUEST, [{'error': _('A valid business and filing are required.')}])
msg = []
resolution_path = '/filing/specialResolution/resolution'
resolution_name = get_str(con, resolution_path)
if not resolution_name:
msg.append({'error': _('Resolution must be provided.'),
'path': resolution_path})
if msg:
return Error(HTTPStatus.BAD_REQUEST, msg)
return None
|
py | b40f3c9f56ead1528f935a083dfa121a0d69dc18 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import tir
from tvm.ir.base import structural_equal
class IntSetChecker:
def __init__(self):
self.analyzer = tvm.arith.Analyzer()
def verify(self, data, dmap, expected):
res = self.analyzer.int_set(data, dmap)
def err_msg():
return "\ndata={}\ndmap={}\nres={}\nexpected={}".format(data, dmap, res, expected)
def equal(x, y):
res = self.analyzer.canonical_simplify(x - y)
return tvm.tir.analysis.expr_deep_equal(res, 0)
assert equal(res.min_value, expected[0]), err_msg()
assert equal(res.max_value, expected[1]), err_msg()
def test_basic():
s = tvm.arith.IntervalSet(2, 3)
assert s.min_value.value == 2
assert s.max_value.value == 3
s = tvm.arith.IntSet.single_point(2)
assert s.min_value.value == 2
assert s.max_value.value == 2
def test_vector():
base = 10
stride = 3
lanes = 2
s = tvm.arith.IntSet.vector(tvm.tir.Ramp(base, stride, lanes))
assert s.min_value.value == base
assert s.max_value.value == base + stride * lanes - 1
def test_add_sub():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(x + y, {x: tvm.arith.IntervalSet(0, 10)}, (y, 10 + y))
ck.verify(x + y, {x: tvm.arith.IntervalSet(0, 10), y: tvm.arith.IntervalSet(1, 11)}, (1, 21))
ck.verify(x - y, {x: tvm.arith.IntervalSet(0, 10), y: tvm.arith.IntervalSet(1, 11)}, (-11, 9))
def test_mul_div():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
tdiv = tvm.tir.truncdiv
ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True)
ck.verify(x * y, {x: tvm.arith.IntervalSet(0, 10)}, (0, 10 * y))
ck.verify(x * 2, {x: tvm.arith.IntervalSet(1, 10)}, (2, 20))
ck.verify(x * -2, {x: tvm.arith.IntervalSet(1, 10)}, (-20, -2))
ck.verify(tdiv(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, tdiv(10, y)))
ck.verify(tdiv(x, 2), {x: tvm.arith.IntervalSet(1, 10)}, (0, 5))
fld = tvm.te.floordiv
ck.verify(fld(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, fld(10, y)))
ck.verify(fld(x, 2), {x: tvm.arith.IntervalSet(-1, 10)}, (-1, 5))
def test_mod():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
tmod = tvm.tir.truncmod
ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True)
ck.verify(tmod(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, y - 1))
ck.verify(tmod(x, 10), {x: tvm.arith.IntervalSet(1, 10)}, (0, 9))
flm = tvm.te.floormod
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(-10, 10)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 5)}, (3, 5))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(13, 15)}, (3, 5))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 15)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 11)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(1, 21)}, (0, 9))
floordiv = tvm.te.floordiv
z = te.var("z")
ck.analyzer.bind(x, tvm.ir.Range.from_min_extent(0, 3))
ck.verify(flm(y, 8), {y: tvm.arith.IntervalSet(z * 8 + x * 4, z * 8 + x * 4 + 3)}, (0, 7))
ck1 = IntSetChecker()
ck1.analyzer.bind(x, tvm.ir.Range.from_min_extent(0, 2))
ck1.verify(
flm(y, 8), {y: tvm.arith.IntervalSet(z * 8 + x * 4, z * 8 + x * 4 + 3)}, (x * 4, x * 4 + 3)
)
def test_max_min():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(tvm.te.max(x, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (1, 11))
ck.verify(tvm.te.min(x - 1, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (-1, 9))
ck.verify(tvm.te.min(x, y), {}, (tvm.te.min(x, y), tvm.te.min(x, y)))
ck.verify(tvm.te.max(x, y), {}, (tvm.te.max(x, y), tvm.te.max(x, y)))
def test_select():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(tvm.tir.Select(x > 0, x - 1, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (-1, 11))
def test_region_lower_bound_not_independent():
i = tvm.tir.Var("i", "int32")
result = tvm.arith.estimate_region_lower_bound(
region=[
tvm.ir.Range(begin=i, end=i + 2),
tvm.ir.Range(begin=i + 1, end=i + 4),
],
var_dom={
i: tvm.ir.Range(begin=0, end=64),
},
predicate=tvm.tir.IntImm("bool", 1),
)
assert result is None
def test_region_lower_bound_stride_too_wide():
i = tvm.tir.Var("i", "int32")
result = tvm.arith.estimate_region_lower_bound(
region=[
tvm.ir.Range(begin=i * 4, end=i * 4 + 2),
],
var_dom={
i: tvm.ir.Range(begin=0, end=64),
},
predicate=tvm.tir.IntImm("bool", 1),
)
assert result is None
def test_region_lower_bound_small_stride():
i = tvm.tir.Var("i", "int32")
(result,) = tvm.arith.estimate_region_lower_bound(
region=[
tvm.ir.Range.from_min_extent(min_value=i * 4, extent=8),
],
var_dom={
i: tvm.ir.Range(begin=0, end=64),
},
predicate=tvm.tir.IntImm("bool", 1),
)
assert result.min_value.value == 0
assert result.max_value.value == 259
def test_region_lower_bound_split_predicate():
x_o = tvm.tir.Var("xo", "int32")
x_i = tvm.tir.Var("xi", "int32")
x = x_o * 4 + x_i
(result,) = tvm.arith.estimate_region_lower_bound(
region=[
tvm.ir.Range.from_min_extent(min_value=x * 4, extent=8),
],
var_dom={
x_o: tvm.ir.Range(begin=0, end=16),
x_i: tvm.ir.Range(begin=0, end=4),
},
predicate=x < 63,
)
assert result.min_value.value == 0
assert result.max_value.value == 255
def test_region_lower_bound_multiple_variables():
div = tvm.tir.floordiv
mod = tvm.tir.floormod
x = tvm.tir.Var("x", "int32")
wid = tvm.tir.Var("wid", "int32")
i = div(x, 16)
j = div(mod(x, 16), 4) * 8 + mod(x, 4) + div(wid, 32) * 4
k = wid % 32
(i_int_set, j_int_set, k_int_set) = tvm.arith.estimate_region_lower_bound(
region=[
tvm.ir.Range.from_min_extent(min_value=i, extent=1),
tvm.ir.Range.from_min_extent(min_value=j, extent=1),
tvm.ir.Range.from_min_extent(min_value=k, extent=1),
],
var_dom={
x: tvm.ir.Range(begin=0, end=32),
wid: tvm.ir.Range(begin=0, end=64),
},
predicate=tvm.tir.IntImm("bool", 1),
)
assert i_int_set.min_value.value == 0
assert i_int_set.max_value.value == 1
assert j_int_set.min_value.value == 0
assert j_int_set.max_value.value == 31
assert k_int_set.min_value.value == 0
assert k_int_set.max_value.value == 31
def test_region_lower_bound_negative_scale():
i = tvm.tir.Var("i", "int32")
j = tvm.tir.Var("j", "int32")
int_set_0, int_set_1 = tvm.arith.estimate_region_lower_bound(
region=[
tvm.ir.Range.from_min_extent(min_value=1 - i, extent=4),
tvm.ir.Range.from_min_extent(min_value=20 - j * 4, extent=16),
],
var_dom={
i: tvm.ir.Range(begin=0, end=4),
j: tvm.ir.Range(begin=0, end=4),
},
predicate=tvm.tir.IntImm("bool", 1),
)
assert int_set_0.min_value.value == -2
assert int_set_0.max_value.value == 4
assert int_set_1.min_value.value == 8
assert int_set_1.max_value.value == 35
def test_region_lower_bound_for_non_perfect_tile():
h1 = tvm.tir.Var("h1", "int32")
h2 = tvm.tir.Var("h2", "int32")
h3 = tvm.tir.Var("h3", "int32")
analyzer = tvm.arith.Analyzer()
def do_test_point_access(point, predicates, var_dom, expect):
regions = tvm.arith.estimate_region_lower_bound(
region=[
tvm.ir.Range.from_min_extent(min_value=point, extent=1),
],
var_dom=var_dom,
predicate=tvm.tir.all(*predicates),
)
if expect is None: # expect a failure
assert regions is None
else:
assert len(regions) == 1
for binding, expect_min, expect_max in expect:
min_diff = expect_min - regions[0].min_value
assert analyzer.simplify(tir.stmt_functor.substitute(min_diff, binding), 3) == 0
max_diff = expect_max - regions[0].max_value
assert analyzer.simplify(tir.stmt_functor.substitute(max_diff, binding), 3) == 0
# non-uniform tiling, single inner variable
# h3 == 0: region is [1, 9]
# 0 < h3 <= 26: region is [h3 * 8, h3 * 8 + 9]
# h3 > 26: region is [h3 * 8, 223]
do_test_point_access(
point=h3 * 8 + h2,
predicates=[1 <= h3 * 8 + h2, h3 * 8 + h2 < 224],
var_dom={
h2: tvm.ir.Range(begin=0, end=10),
},
expect=[
(
{},
tvm.tir.max(h3 * 8, 1),
tvm.tir.max(h3 * 8, 1)
- tvm.tir.max(h3 * 8, 214)
- tvm.tir.max(1 - h3 * 8, 0)
+ 223,
),
({h3: 0}, 1, 9),
({h3: 10}, h3 * 8, h3 * 8 + 9),
({h3: 27}, h3 * 8, 223),
],
)
# non-uniform tiling, two inner variables
do_test_point_access(
point=h3 * 8 + h2 * 5 + h1,
predicates=[1 <= h3 * 8 + h2 * 5 + h1, h3 * 8 + h2 * 5 + h1 < 224],
var_dom={
h2: tvm.ir.Range(begin=0, end=2),
h1: tvm.ir.Range(begin=0, end=5),
},
expect=[
(
{},
tvm.tir.max(h3 * 8, 1),
tvm.tir.max(h3 * 8, 1)
- tvm.tir.max(h3 * 8, 214)
- tvm.tir.max(1 - h3 * 8, 0)
+ 223,
),
({h3: 0}, 1, 9),
({h3: 10}, h3 * 8, h3 * 8 + 9),
({h3: 27}, h3 * 8, 223),
],
)
# should fail on incompatible predicates
do_test_point_access(
point=h3 * 8 + h2 * 5 + h1,
predicates=[1 <= h3 * 8 + h2 * 5 + h1, h3 * 8 + h1 * 2 + h2 < 224],
var_dom={
h2: tvm.ir.Range(begin=0, end=2),
h1: tvm.ir.Range(begin=0, end=5),
},
expect=None,
)
def test_union_lower_bound():
neg_inf = tvm.arith.int_set.neg_inf()
pos_inf = tvm.arith.int_set.pos_inf()
set_0 = tvm.arith.IntervalSet(min_value=neg_inf, max_value=0)
set_1 = tvm.arith.IntervalSet(min_value=1, max_value=pos_inf)
result = tvm.arith.int_set.union_lower_bound([set_0, set_1])
assert result.min_value.same_as(neg_inf)
assert result.max_value.same_as(pos_inf)
if __name__ == "__main__":
test_basic()
test_vector()
test_add_sub()
test_mul_div()
test_max_min()
test_select()
test_mod()
test_region_lower_bound_not_independent()
test_region_lower_bound_stride_too_wide()
test_region_lower_bound_small_stride()
test_region_lower_bound_split_predicate()
test_region_lower_bound_multiple_variables()
test_region_lower_bound_negative_scale()
test_region_lower_bound_for_non_perfect_tile()
test_union_lower_bound()
|
py | b40f3cb1ee42e1bb7e3a128e14023a582a59e1d5 | # -*- coding: utf-8 -*-
"""
ulmo.wof.core
~~~~~~~~~~~~~
This module provides direct access to `CUAHSI WaterOneFlow`_ web services.
.. _CUAHSI WaterOneFlow: http://his.cuahsi.org/wofws.html
"""
from future import standard_library
standard_library.install_aliases()
import io
import os
from builtins import str
import isodate
import suds.client
from suds.cache import ObjectCache
from tsgettoolbox.ulmo import util, waterml
_suds_client = None
def get_sites(wsdl_url, suds_cache=("default",), timeout=None, user_cache=False):
"""
Retrieves information on the sites that are available from a WaterOneFlow
service using a GetSites request. For more detailed information including
which variables and time periods are available for a given site, use
``get_site_info()``.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
suds_cache : `None` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
timeout : int or float
suds SOAP URL open timeout (seconds).
If unspecified, the suds default (90 seconds) will be used.
user_cache : bool
If False (default), use the system temp location to store cache WSDL and
other files. Use the default user ulmo directory if True.
Returns
-------
sites_dict : dict
a python dict with site codes mapped to site information
"""
suds_client = _get_client(wsdl_url, suds_cache, timeout, user_cache)
waterml_version = _waterml_version(suds_client)
if waterml_version == "1.0":
response = suds_client.service.GetSitesXml("")
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_0.parse_site_infos(response_buffer)
elif waterml_version == "1.1":
response = suds_client.service.GetSites("")
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_1.parse_site_infos(response_buffer)
return {site["network"] + ":" + site["code"]: site for site in list(sites.values())}
def get_site_info(
wsdl_url, site_code, suds_cache=("default",), timeout=None, user_cache=False
):
"""
Retrieves detailed site information from a WaterOneFlow service using a
GetSiteInfo request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
site_code : str
Site code of the site you'd like to get more information for. Site codes
MUST contain the network and be of the form <network>:<site_code>, as is
required by WaterOneFlow.
suds_cache : ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
timeout : int or float
suds SOAP URL open timeout (seconds).
If unspecified, the suds default (90 seconds) will be used.
user_cache : bool
If False (default), use the system temp location to store cache WSDL and
other files. Use the default user ulmo directory if True.
Returns
-------
site_info : dict
a python dict containing site information
"""
suds_client = _get_client(wsdl_url, suds_cache, timeout, user_cache)
waterml_version = _waterml_version(suds_client)
if waterml_version == "1.0":
response = suds_client.service.GetSiteInfo(site_code)
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_0.parse_sites(response_buffer)
elif waterml_version == "1.1":
response = suds_client.service.GetSiteInfo(site_code)
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_1.parse_sites(response_buffer)
if len(sites) == 0:
return {}
site_info = list(sites.values())[0]
series_dict = {
series["variable"]["vocabulary"] + ":" + series["variable"]["code"]: series
for series in site_info["series"]
}
site_info["series"] = series_dict
return site_info
def get_values(
wsdl_url,
site_code,
variable_code,
start=None,
end=None,
suds_cache=("default",),
timeout=None,
user_cache=False,
):
"""
Retrieves site values from a WaterOneFlow service using a GetValues request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
site_code : str
Site code of the site you'd like to get values for. Site codes MUST
contain the network and be of the form <network>:<site_code>, as is
required by WaterOneFlow.
variable_code : str
Variable code of the variable you'd like to get values for. Variable
codes MUST contain the network and be of the form
<vocabulary>:<variable_code>, as is required by WaterOneFlow.
start : ``None`` or datetime (see :ref:`dates-and-times`)
Start of the query datetime range. If omitted, data from the start of
the time series to the ``end`` timestamp will be returned (but see caveat,
in note below).
end : ``None`` or datetime (see :ref:`dates-and-times`)
End of the query datetime range. If omitted, data from the ``start``
timestamp to end of the time series will be returned (but see caveat,
in note below).
suds_cache : ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
timeout : int or float
suds SOAP URL open timeout (seconds).
If unspecified, the suds default (90 seconds) will be used.
user_cache : bool
If False (default), use the system temp location to store cache WSDL and
other files. Use the default user ulmo directory if True.
Returns
-------
site_values : dict
a python dict containing values
Notes
-----
If both ``start`` and ``end`` parameters are omitted, the entire time series
available will typically be returned. However, some service providers will return
an error if either start or end are omitted; this is specially true for services
hosted or redirected by CUAHSI via the CUAHSI HydroPortal, which have a 'WSDL' url
using the domain http://hydroportal.cuahsi.org. For HydroPortal, a start datetime
of '1753-01-01' has been known to return valid results while catching the oldest
start times, though the response may be broken up into chunks ('paged').
"""
suds_client = _get_client(wsdl_url, suds_cache, timeout, user_cache)
# Note from Emilio:
# Not clear if WOF servers really do handle time zones (time offsets or
# "Z" in the iso8601 datetime strings. In the past, I (Emilio) have
# passed naive strings to GetValues(). if a datetime object is passed to
# this ulmo function, the isodate code above will include it in the
# resulting iso8601 string; if not, no. Test effect of dt_isostr having
# a timezone code or offset, vs not having it (the latter, naive dt
# strings, is what I've been using all along)
# the interpretation of start and end time zone is server-dependent
start_dt_isostr = None
end_dt_isostr = None
if start is not None:
start_datetime = util.convert_datetime(start)
start_dt_isostr = isodate.datetime_isoformat(start_datetime)
if end is not None:
end_datetime = util.convert_datetime(end)
end_dt_isostr = isodate.datetime_isoformat(end_datetime)
waterml_version = _waterml_version(suds_client)
response = suds_client.service.GetValues(
site_code, variable_code, startDate=start_dt_isostr, endDate=end_dt_isostr
)
response_buffer = io.BytesIO(util.to_bytes(response))
if waterml_version == "1.0":
values = waterml.v1_0.parse_site_values(response_buffer)
elif waterml_version == "1.1":
values = waterml.v1_1.parse_site_values(response_buffer)
if not variable_code is None:
return list(values.values())[0]
else:
return values
def get_variable_info(
wsdl_url,
variable_code=None,
suds_cache=("default",),
timeout=None,
user_cache=False,
):
"""
Retrieves site values from a WaterOneFlow service using a GetVariableInfo
request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
variable_code : `None` or str
If `None` (default) then information on all variables will be returned,
otherwise, this should be set to the variable code of the variable you'd
like to get more information on. Variable codes MUST contain the
network and be of the form <vocabulary>:<variable_code>, as is required
by WaterOneFlow.
suds_cache : ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
timeout : int or float
suds SOAP URL open timeout (seconds).
If unspecified, the suds default (90 seconds) will be used.
user_cache : bool
If False (default), use the system temp location to store cache WSDL and
other files. Use the default user ulmo directory if True.
Returns
-------
variable_info : dict
a python dict containing variable information. If no variable code is
`None` (default) then this will be a nested set of dicts keyed by
<vocabulary>:<variable_code>
"""
suds_client = _get_client(wsdl_url, suds_cache, timeout, user_cache)
waterml_version = _waterml_version(suds_client)
response = suds_client.service.GetVariableInfo(variable_code)
response_buffer = io.BytesIO(util.to_bytes(response))
if waterml_version == "1.0":
variable_info = waterml.v1_0.parse_variables(response_buffer)
elif waterml_version == "1.1":
variable_info = waterml.v1_1.parse_variables(response_buffer)
if not variable_code is None and len(variable_info) == 1:
return list(variable_info.values())[0]
else:
return {
"{}:{}".format(var["vocabulary"], var["code"]): var
for var in list(variable_info.values())
}
def _waterml_version(suds_client):
tns_str = str(suds_client.wsdl.tns[1])
if tns_str == "http://www.cuahsi.org/his/1.0/ws/":
return "1.0"
elif tns_str == "http://www.cuahsi.org/his/1.1/ws/":
return "1.1"
else:
raise NotImplementedError(
"only WaterOneFlow 1.0 and 1.1 are currently supported"
)
def _get_client(wsdl_url, suds_cache=("default",), suds_timeout=None, user_cache=False):
"""
Open and re-use (persist) a suds.client.Client instance _suds_client throughout
the session, to minimize WOF server impact and improve performance. _suds_client
is global in scope.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
suds_cache : ``None`` or tuple
suds client local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the suds default (1 day) will be used.
Use ``None`` to turn off caching.
suds_timeout : int or float
suds SOAP URL open timeout (seconds).
If unspecified, the suds default (90 seconds) will be used.
user_cache : bool
If False (default), use the system temp location to store cache WSDL and
other files. Use the default user ulmo directory if True.
Returns
-------
_suds_client : suds Client
Newly or previously instantiated (reused) suds Client object.
"""
global _suds_client
# Handle new or changed client request (create new client)
if (
_suds_client is None
or _suds_client.wsdl.url != wsdl_url
or not suds_timeout is None
):
if user_cache:
cache_dir = os.path.join(util.get_ulmo_dir(), "suds")
util.mkdir_if_doesnt_exist(cache_dir)
_suds_client = suds.client.Client(
wsdl_url, cache=ObjectCache(location=cache_dir)
)
else:
_suds_client = suds.client.Client(wsdl_url)
if suds_cache is None:
_suds_client.set_options(cache=None)
else:
cache = _suds_client.options.cache
# could add some error catching ...
if suds_cache[0] == "default":
cache.setduration(days=1)
else:
cache.setduration(**dict([suds_cache]))
if not suds_timeout is None:
_suds_client.set_options(timeout=suds_timeout)
return _suds_client
|
py | b40f3d3663b94b1659e4a7e22e8187a54a14fa24 | import asyncio
import traceback
import urllib.parse
import asgiref.compatibility
import asgiref.wsgi
from mitmproxy import ctx, http
from mitmproxy.controller import DummyReply
class ASGIApp:
"""
An addon that hosts an ASGI/WSGI HTTP app within mitmproxy, at a specified hostname and port.
Some important caveats:
- This implementation will block and wait until the entire HTTP response is completed before sending out data.
- It currently only implements the HTTP protocol (Lifespan and WebSocket are unimplemented).
"""
def __init__(self, asgi_app, host: str, port: int):
asgi_app = asgiref.compatibility.guarantee_single_callable(asgi_app)
self.asgi_app, self.host, self.port = asgi_app, host, port
@property
def name(self) -> str:
return f"asgiapp:{self.host}:{self.port}"
def should_serve(self, flow: http.HTTPFlow) -> bool:
assert flow.reply
return bool(
(flow.request.pretty_host, flow.request.port) == (self.host, self.port)
and flow.reply.state == "start" and not flow.error and not flow.response
and not isinstance(flow.reply, DummyReply) # ignore the HTTP flows of this app loaded from somewhere
)
def request(self, flow: http.HTTPFlow) -> None:
assert flow.reply
if self.should_serve(flow):
flow.reply.take() # pause hook completion
asyncio.ensure_future(serve(self.asgi_app, flow))
class WSGIApp(ASGIApp):
def __init__(self, wsgi_app, host: str, port: int):
asgi_app = asgiref.wsgi.WsgiToAsgi(wsgi_app)
super().__init__(asgi_app, host, port)
HTTP_VERSION_MAP = {
"HTTP/1.0": "1.0",
"HTTP/1.1": "1.1",
"HTTP/2.0": "2",
}
def make_scope(flow: http.HTTPFlow) -> dict:
# %3F is a quoted question mark
quoted_path = urllib.parse.quote_from_bytes(flow.request.data.path).split("%3F", maxsplit=1)
# (Unicode string) – HTTP request target excluding any query string, with percent-encoded
# sequences and UTF-8 byte sequences decoded into characters.
path = quoted_path[0]
# (byte string) – URL portion after the ?, percent-encoded.
query_string: bytes
if len(quoted_path) > 1:
query_string = urllib.parse.unquote(quoted_path[1]).encode()
else:
query_string = b""
return {
"type": "http",
"asgi": {
"version": "3.0",
"spec_version": "2.1",
},
"http_version": HTTP_VERSION_MAP.get(flow.request.http_version, "1.1"),
"method": flow.request.method,
"scheme": flow.request.scheme,
"path": path,
"raw_path": flow.request.path,
"query_string": query_string,
"headers": list(list(x) for x in flow.request.headers.fields),
"client": flow.client_conn.peername,
"extensions": {
"mitmproxy.master": ctx.master,
}
}
async def serve(app, flow: http.HTTPFlow):
"""
Serves app on flow.
"""
assert flow.reply
scope = make_scope(flow)
done = asyncio.Event()
received_body = False
sent_response = False
async def receive():
nonlocal received_body
if not received_body:
received_body = True
return {
"type": "http.request",
"body": flow.request.raw_content,
}
else: # pragma: no cover
# We really don't expect this to be called a second time, but what to do?
# We just wait until the request is done before we continue here with sending a disconnect.
await done.wait()
return {
"type": "http.disconnect"
}
async def send(event):
if event["type"] == "http.response.start":
flow.response = http.Response.make(event["status"], b"", event.get("headers", []))
flow.response.decode()
elif event["type"] == "http.response.body":
flow.response.content += event.get("body", b"")
if not event.get("more_body", False):
nonlocal sent_response
sent_response = True
else:
raise AssertionError(f"Unexpected event: {event['type']}")
try:
await app(scope, receive, send)
if not sent_response:
raise RuntimeError(f"no response sent.")
except Exception:
ctx.log.error(f"Error in asgi app:\n{traceback.format_exc(limit=-5)}")
flow.response = http.Response.make(500, b"ASGI Error.")
finally:
flow.reply.commit()
done.set()
|
py | b40f3f74da2352b9ca48adbf167255bf55329602 | #
# Copyright 2018 Picovoice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
# Add Porcupine's binding file to the system path.
sys.path.append(os.path.join(os.path.dirname(__file__), 'porcupine/binding/python'))
# Add Snowboy's binding file to the system path.
if sys.version_info[0] < 3:
sys.path.append(os.path.join(os.path.dirname(__file__), 'snowboy/swig/Python'))
else:
sys.path.append(os.path.join(os.path.dirname(__file__), 'snowboy/swig/Python3'))
from porcupine import Porcupine
import snowboydetect
|
py | b40f4025b0584c6597cbdd9fcb156d28e919516b | """
Ethereum Specification
^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Entry point for the Ethereum specification.
"""
from dataclasses import dataclass
from typing import List, Optional, Set, Tuple
from ethereum.base_types import Bytes0
from ethereum.crypto import SECP256K1N
from ethereum.ethash import dataset_size, generate_cache, hashimoto_light
from ethereum.tangerine_whistle.eth_types import TX_CREATE_COST
from ethereum.utils.ensure import ensure
from .. import crypto, rlp
from ..base_types import U256, U256_CEIL_VALUE, Bytes, Uint
from . import vm
from .bloom import logs_bloom
from .eth_types import (
TX_BASE_COST,
TX_DATA_COST_PER_NON_ZERO,
TX_DATA_COST_PER_ZERO,
Address,
Block,
Bloom,
Hash32,
Header,
Log,
Receipt,
Root,
Transaction,
)
from .state import (
State,
create_ether,
destroy_account,
get_account,
increment_nonce,
set_account_balance,
state_root,
)
from .trie import Trie, root, trie_set
from .utils.message import prepare_message
from .vm.interpreter import process_message_call
BLOCK_REWARD = U256(5 * 10 ** 18)
GAS_LIMIT_ADJUSTMENT_FACTOR = 1024
GAS_LIMIT_MINIMUM = 5000
GENESIS_DIFFICULTY = Uint(131072)
MAX_OMMER_DEPTH = 6
@dataclass
class BlockChain:
"""
History and current state of the block chain.
"""
blocks: List[Block]
state: State
def apply_fork(old: BlockChain) -> BlockChain:
"""
Transforms the state from the previous hard fork (`old`) into the block
chain object for this hard fork and returns it.
Parameters
----------
old :
Previous block chain object.
Returns
-------
new : `BlockChain`
Upgraded block chain object for this hard fork.
"""
return old
def get_last_256_block_hashes(chain: BlockChain) -> List[Hash32]:
"""
Obtain the list of hashes of the previous 256 blocks in order of increasing
block number.
This function will return less hashes for the first 256 blocks.
Parameters
----------
chain :
History and current state.
Returns
-------
recent_block_hashes : `List[Hash32]`
Hashes of the recent 256 blocks in order of increasing block number.
"""
recent_blocks = chain.blocks[-255:]
# TODO: This function has not been tested rigorously
if len(recent_blocks) == 0:
return []
recent_block_hashes = []
for block in recent_blocks:
prev_block_hash = block.header.parent_hash
recent_block_hashes.append(prev_block_hash)
# We are computing the hash only for the most recent block and not for
# the rest of the blocks as they have successors which have the hash of
# the current block as parent hash.
most_recent_block_hash = crypto.keccak256(
rlp.encode(recent_blocks[-1].header)
)
recent_block_hashes.append(most_recent_block_hash)
return recent_block_hashes
def state_transition(chain: BlockChain, block: Block) -> None:
"""
Attempts to apply a block to an existing block chain.
Parameters
----------
chain :
History and current state.
block :
Block to apply to `chain`.
"""
parent_header = chain.blocks[-1].header
validate_header(block.header, parent_header)
validate_ommers(block.ommers, block.header, chain)
(
gas_used,
transactions_root,
receipt_root,
block_logs_bloom,
state,
) = apply_body(
chain.state,
get_last_256_block_hashes(chain),
block.header.coinbase,
block.header.number,
block.header.gas_limit,
block.header.timestamp,
block.header.difficulty,
block.transactions,
block.ommers,
)
ensure(gas_used == block.header.gas_used)
ensure(transactions_root == block.header.transactions_root)
ensure(state_root(state) == block.header.state_root)
ensure(receipt_root == block.header.receipt_root)
ensure(block_logs_bloom == block.header.bloom)
chain.blocks.append(block)
if len(chain.blocks) > 255:
# Real clients have to store more blocks to deal with reorgs, but the
# protocol only requires the last 255
chain.blocks = chain.blocks[-255:]
def validate_header(header: Header, parent_header: Header) -> None:
"""
Verifies a block header.
Parameters
----------
header :
Header to check for correctness.
parent_header :
Parent Header of the header to check for correctness
"""
block_difficulty = calculate_block_difficulty(
parent_header.number,
header.timestamp,
parent_header.timestamp,
parent_header.difficulty,
)
block_parent_hash = crypto.keccak256(rlp.encode(parent_header))
ensure(header.parent_hash == block_parent_hash)
ensure(header.difficulty == block_difficulty)
ensure(header.number == parent_header.number + 1)
ensure(check_gas_limit(header.gas_limit, parent_header.gas_limit))
ensure(header.timestamp > parent_header.timestamp)
ensure(len(header.extra_data) <= 32)
validate_proof_of_work(header)
def generate_header_hash_for_pow(header: Header) -> Hash32:
"""
Generate rlp hash of the header which is to be used for Proof-of-Work
verification. This hash is generated with the following header fields:
* `parent_hash`
* `ommers_hash`
* `coinbase`
* `state_root`
* `transactions_root`
* `receipt_root`
* `bloom`
* `difficulty`
* `number`
* `gas_limit`
* `gas_used`
* `timestamp`
* `extra_data`
In other words, the PoW artefacts `mix_digest` and `nonce` are ignored
while calculating this hash.
Parameters
----------
header :
The header object for which the hash is to be generated.
Returns
-------
hash : `Hash32`
The PoW valid rlp hash of the passed in header.
"""
header_data_without_pow_artefacts = [
header.parent_hash,
header.ommers_hash,
header.coinbase,
header.state_root,
header.transactions_root,
header.receipt_root,
header.bloom,
header.difficulty,
header.number,
header.gas_limit,
header.gas_used,
header.timestamp,
header.extra_data,
]
return rlp.rlp_hash(header_data_without_pow_artefacts)
def validate_proof_of_work(header: Header) -> None:
"""
Validates the Proof of Work constraints.
Parameters
----------
header :
Header of interest.
"""
header_hash = generate_header_hash_for_pow(header)
# TODO: Memoize this somewhere and read from that data instead of
# calculating cache for every block validation.
cache = generate_cache(header.number)
mix_digest, result = hashimoto_light(
header_hash, header.nonce, cache, dataset_size(header.number)
)
ensure(mix_digest == header.mix_digest)
ensure(
Uint.from_be_bytes(result) <= (U256_CEIL_VALUE // header.difficulty)
)
def apply_body(
state: State,
block_hashes: List[Hash32],
coinbase: Address,
block_number: Uint,
block_gas_limit: Uint,
block_time: U256,
block_difficulty: Uint,
transactions: Tuple[Transaction, ...],
ommers: Tuple[Header, ...],
) -> Tuple[Uint, Root, Root, Bloom, State]:
"""
Executes a block.
Parameters
----------
state :
Current account state.
block_hashes :
List of hashes of the previous 256 blocks in the order of
increasing block number.
coinbase :
Address of account which receives block reward and transaction fees.
block_number :
Position of the block within the chain.
block_gas_limit :
Initial amount of gas available for execution in this block.
block_time :
Time the block was produced, measured in seconds since the epoch.
block_difficulty :
Difficulty of the block.
transactions :
Transactions included in the block.
ommers :
Headers of ancestor blocks which are not direct parents (formerly
uncles.)
Returns
-------
gas_available : `eth1spec.base_types.Uint`
Remaining gas after all transactions have been executed.
transactions_root : `eth1spec.eth_types.Root`
Trie root of all the transactions in the block.
receipt_root : `eth1spec.eth_types.Root`
Trie root of all the receipts in the block.
block_logs_bloom : `Bloom`
Logs bloom of all the logs included in all the transactions of the
block.
state : `eth1spec.eth_types.State`
State after all transactions have been executed.
"""
gas_available = block_gas_limit
transactions_trie: Trie[Bytes, Optional[Transaction]] = Trie(
secured=False, default=None
)
receipts_trie: Trie[Bytes, Optional[Receipt]] = Trie(
secured=False, default=None
)
block_logs: Tuple[Log, ...] = ()
for i, tx in enumerate(transactions):
trie_set(transactions_trie, rlp.encode(Uint(i)), tx)
ensure(tx.gas <= gas_available)
sender_address = recover_sender(tx)
env = vm.Environment(
caller=sender_address,
origin=sender_address,
block_hashes=block_hashes,
coinbase=coinbase,
number=block_number,
gas_limit=block_gas_limit,
gas_price=tx.gas_price,
time=block_time,
difficulty=block_difficulty,
state=state,
)
gas_used, logs = process_transaction(env, tx)
gas_available -= gas_used
trie_set(
receipts_trie,
rlp.encode(Uint(i)),
Receipt(
post_state=state_root(state),
cumulative_gas_used=(block_gas_limit - gas_available),
bloom=logs_bloom(logs),
logs=logs,
),
)
block_logs += logs
pay_rewards(state, block_number, coinbase, ommers)
gas_remaining = block_gas_limit - gas_available
block_logs_bloom = logs_bloom(block_logs)
return (
gas_remaining,
root(transactions_trie),
root(receipts_trie),
block_logs_bloom,
state,
)
def validate_ommers(
ommers: Tuple[Header, ...], block_header: Header, chain: BlockChain
) -> None:
"""
Validates the ommers mentioned in the block.
Parameters
----------
ommers :
List of ommers mentioned in the current block.
block_header:
The header of current block.
chain :
History and current state.
"""
block_hash = rlp.rlp_hash(block_header)
ensure(rlp.rlp_hash(ommers) == block_header.ommers_hash)
if len(ommers) == 0:
# Nothing to validate
return
# Check that each ommer satisfies the constraints of a header
for ommer in ommers:
ensure(1 <= ommer.number < block_header.number)
ommer_parent_header = chain.blocks[
-(block_header.number - ommer.number) - 1
].header
validate_header(ommer, ommer_parent_header)
# Check that there can be only at most 2 ommers for a block.
ensure(len(ommers) <= 2)
ommers_hashes = [rlp.rlp_hash(ommer) for ommer in ommers]
# Check that there are no duplicates in the ommers of current block
ensure(len(ommers_hashes) == len(set(ommers_hashes)))
recent_canonical_blocks = chain.blocks[-(MAX_OMMER_DEPTH + 1) :]
recent_canonical_block_hashes = {
rlp.rlp_hash(block.header) for block in recent_canonical_blocks
}
recent_ommers_hashes: Set[Hash32] = set()
for block in recent_canonical_blocks:
recent_ommers_hashes = recent_ommers_hashes.union(
{rlp.rlp_hash(ommer) for ommer in block.ommers}
)
for ommer_index, ommer in enumerate(ommers):
ommer_hash = ommers_hashes[ommer_index]
# The current block shouldn't be the ommer
ensure(ommer_hash != block_hash)
# Ommer shouldn't be one of the recent canonical blocks
ensure(ommer_hash not in recent_canonical_block_hashes)
# Ommer shouldn't be one of the uncles mentioned in the recent
# canonical blocks
ensure(ommer_hash not in recent_ommers_hashes)
# Ommer age with respect to the current block. For example, an age of
# 1 indicates that the ommer is a sibling of previous block.
ommer_age = block_header.number - ommer.number
ensure(1 <= ommer_age <= MAX_OMMER_DEPTH)
ensure(ommer.parent_hash in recent_canonical_block_hashes)
ensure(ommer.parent_hash != block_header.parent_hash)
def pay_rewards(
state: State,
block_number: Uint,
coinbase: Address,
ommers: Tuple[Header, ...],
) -> None:
"""
Pay rewards to the block miner as well as the ommers miners.
Parameters
----------
state :
Current account state.
block_number :
Position of the block within the chain.
coinbase :
Address of account which receives block reward and transaction fees.
ommers :
List of ommers mentioned in the current block.
"""
miner_reward = BLOCK_REWARD + (len(ommers) * (BLOCK_REWARD // 32))
create_ether(state, coinbase, miner_reward)
for ommer in ommers:
# Ommer age with respect to the current block.
ommer_age = U256(block_number - ommer.number)
ommer_miner_reward = ((8 - ommer_age) * BLOCK_REWARD) // 8
create_ether(state, ommer.coinbase, ommer_miner_reward)
def process_transaction(
env: vm.Environment, tx: Transaction
) -> Tuple[U256, Tuple[Log, ...]]:
"""
Execute a transaction against the provided environment.
Parameters
----------
env :
Environment for the Ethereum Virtual Machine.
tx :
Transaction to execute.
Returns
-------
gas_left : `eth1spec.base_types.U256`
Remaining gas after execution.
logs : `Tuple[eth1spec.eth_types.Log, ...]`
Logs generated during execution.
"""
ensure(validate_transaction(tx))
sender = env.origin
sender_account = get_account(env.state, sender)
gas_fee = tx.gas * tx.gas_price
ensure(sender_account.nonce == tx.nonce)
ensure(sender_account.balance >= gas_fee)
gas = tx.gas - calculate_intrinsic_cost(tx)
increment_nonce(env.state, sender)
sender_balance_after_gas_fee = sender_account.balance - gas_fee
set_account_balance(env.state, sender, sender_balance_after_gas_fee)
message = prepare_message(
sender,
tx.to,
tx.value,
tx.data,
gas,
env,
)
(
gas_left,
refund_counter,
logs,
accounts_to_delete,
has_erred,
) = process_message_call(message, env)
gas_used = tx.gas - gas_left
gas_refund = min(gas_used // 2, refund_counter)
gas_refund_amount = (gas_left + gas_refund) * tx.gas_price
transaction_fee = (tx.gas - gas_left - gas_refund) * tx.gas_price
total_gas_used = gas_used - gas_refund
# refund gas
sender_balance_after_refund = (
get_account(env.state, sender).balance + gas_refund_amount
)
set_account_balance(env.state, sender, sender_balance_after_refund)
# transfer miner fees
coinbase_balance_after_mining_fee = (
get_account(env.state, env.coinbase).balance + transaction_fee
)
set_account_balance(
env.state, env.coinbase, coinbase_balance_after_mining_fee
)
for address in accounts_to_delete:
destroy_account(env.state, address)
return total_gas_used, logs
def validate_transaction(tx: Transaction) -> bool:
"""
Verifies a transaction.
Parameters
----------
tx :
Transaction to validate.
Returns
-------
verified : `bool`
True if the transaction can be executed, or False otherwise.
"""
return calculate_intrinsic_cost(tx) <= tx.gas
def calculate_intrinsic_cost(tx: Transaction) -> Uint:
"""
Calculates the intrinsic cost of the transaction that is charged before
execution is instantiated.
Parameters
----------
tx :
Transaction to compute the intrinsic cost of.
Returns
-------
verified : `eth1spec.base_types.Uint`
The intrinsic cost of the transaction.
"""
data_cost = 0
for byte in tx.data:
if byte == 0:
data_cost += TX_DATA_COST_PER_ZERO
else:
data_cost += TX_DATA_COST_PER_NON_ZERO
if tx.to == Bytes0(b""):
create_cost = TX_CREATE_COST
else:
create_cost = 0
return Uint(TX_BASE_COST + data_cost + create_cost)
def recover_sender(tx: Transaction) -> Address:
"""
Extracts the sender address from a transaction.
Parameters
----------
tx :
Transaction of interest.
Returns
-------
sender : `eth1spec.eth_types.Address`
The address of the account that signed the transaction.
"""
v, r, s = tx.v, tx.r, tx.s
# if v > 28:
# v = v - (chain_id*2+8)
ensure(v == 27 or v == 28)
ensure(0 < r and r < SECP256K1N)
ensure(0 < s and s <= SECP256K1N // 2)
public_key = crypto.secp256k1_recover(r, s, v - 27, signing_hash(tx))
return Address(crypto.keccak256(public_key)[12:32])
def signing_hash(tx: Transaction) -> Hash32:
"""
Compute the hash of a transaction used in the signature.
Parameters
----------
tx :
Transaction of interest.
Returns
-------
hash : `eth1spec.eth_types.Hash32`
Hash of the transaction.
"""
return crypto.keccak256(
rlp.encode(
(
tx.nonce,
tx.gas_price,
tx.gas,
tx.to,
tx.value,
tx.data,
)
)
)
def compute_header_hash(header: Header) -> Hash32:
"""
Computes the hash of a block header.
Parameters
----------
header :
Header of interest.
Returns
-------
hash : `ethereum.eth_types.Hash32`
Hash of the header.
"""
return crypto.keccak256(rlp.encode(header))
def get_block_header_by_hash(hash: Hash32, chain: BlockChain) -> Header:
"""
Fetches the block header with the corresponding hash.
Parameters
----------
hash :
Hash of the header of interest.
chain :
History and current state.
Returns
-------
Header : `ethereum.eth_types.Header`
Block header found by its hash.
"""
for block in chain.blocks:
if compute_header_hash(block.header) == hash:
return block.header
else:
raise ValueError(f"Could not find header with hash={hash.hex()}")
def check_gas_limit(gas_limit: Uint, parent_gas_limit: Uint) -> bool:
"""
Validates the gas limit for a block.
Parameters
----------
gas_limit :
Gas limit to validate.
parent_gas_limit :
Gas limit of the parent block.
Returns
-------
check : `bool`
True if gas limit constraints are satisfied, False otherwise.
"""
max_adjustment_delta = parent_gas_limit // GAS_LIMIT_ADJUSTMENT_FACTOR
if gas_limit >= parent_gas_limit + max_adjustment_delta:
return False
if gas_limit <= parent_gas_limit - max_adjustment_delta:
return False
if gas_limit < GAS_LIMIT_MINIMUM:
return False
return True
def calculate_block_difficulty(
parent_block_number: Uint,
timestamp: U256,
parent_timestamp: U256,
parent_difficulty: Uint,
) -> Uint:
"""
Computes difficulty of a block using its header and parent header.
Parameters
----------
parent_block_number :
Block number of the parent block.
timestamp :
Timestamp of the block.
parent_timestamp :
Timestamp of the parent block.
parent_difficulty :
difficulty of the parent block.
Returns
-------
difficulty : `ethereum.base_types.Uint`
Computed difficulty for a block.
"""
offset = (
int(parent_difficulty)
// 2048
* max(1 - int(timestamp - parent_timestamp) // 10, -99)
)
difficulty = int(parent_difficulty) + offset
# Historical Note: The difficulty bomb was not present in Ethereum at the
# start of Frontier, but was added shortly after launch. However since the
# bomb has no effect prior to block 200000 we pretend it existed from
# genesis.
# See https://github.com/ethereum/go-ethereum/pull/1588
num_bomb_periods = ((int(parent_block_number) + 1) // 100000) - 2
if num_bomb_periods >= 0:
return Uint(
max(difficulty + 2 ** num_bomb_periods, GENESIS_DIFFICULTY)
)
else:
return Uint(max(difficulty, GENESIS_DIFFICULTY))
|
py | b40f40af9d7e1d8a0b56338e44fc7a05014687e0 | """Provide the RPC."""
import asyncio
import inspect
import io
import logging
import os
import sys
import threading
import traceback
import uuid
import weakref
from collections import OrderedDict
from functools import reduce
from .utils import (
FuturePromise,
MessageEmitter,
ReferenceStore,
dotdict,
format_traceback,
)
API_VERSION = "0.2.3"
ALLOWED_MAGIC_METHODS = ["__enter__", "__exit__"]
IO_METHODS = [
"fileno",
"seek",
"truncate",
"detach",
"write",
"read",
"read1",
"readall",
"close",
"closed",
"__enter__",
"__exit__",
"flush",
"isatty",
"__iter__",
"__next__",
"readable",
"readline",
"readlines",
"seekable",
"tell",
"writable",
"writelines",
]
logging.basicConfig(stream=sys.stdout)
logger = logging.getLogger("RPC")
def index_object(obj, ids):
"""Index an object."""
if isinstance(ids, str):
return index_object(obj, ids.split("."))
elif len(ids) == 0:
return obj
else:
if isinstance(obj, dict):
_obj = obj[ids[0]]
elif isinstance(obj, (list, tuple)):
_obj = obj[int(ids[0])]
else:
_obj = getattr(obj, ids[0])
return index_object(_obj, ids[1:])
class RPC(MessageEmitter):
"""Represent the RPC."""
def __init__(self, connection, rpc_context, config=None, codecs=None):
"""Set up instance."""
self.manager_api = {}
self.services = {}
self._object_store = {}
self._method_weakmap = weakref.WeakKeyDictionary()
self._object_weakmap = weakref.WeakKeyDictionary()
self._local_api = None
self._remote_set = False
self._store = ReferenceStore()
self._remote_interface = None
self._codecs = codecs or {}
self.work_dir = os.getcwd()
self.abort = threading.Event()
self.id = None
self.rpc_context = rpc_context
if config is None:
config = {}
self.set_config(config)
self._remote_logger = dotdict({"info": self._log, "error": self._error})
super().__init__(self._remote_logger)
try:
# FIXME: What exception do we expect?
self.loop = asyncio.get_event_loop()
except RuntimeError:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if connection is not None:
self._connection = connection
self._setup_handlers(connection)
self.check_modules()
def init(self):
"""Initialize the RPC."""
logger.info("%s initialized", self.config.name)
self._connection.emit(
{
"type": "initialized",
"config": dict(self.config),
"peer_id": self._connection.peer_id,
}
)
def reset(self):
"""Reset."""
self._event_handlers = {}
self.services = {}
self._object_store = {}
self._method_weakmap = weakref.WeakKeyDictionary()
self._object_weakmap = weakref.WeakKeyDictionary()
self._local_api = None
self._remote_set = False
self._store = ReferenceStore()
self._remote_interface = None
def disconnect(self):
"""Disconnect."""
self._connection.emit({"type": "disconnect"})
self.reset()
self._connection.disconnect()
def default_exit(self):
"""Exit default."""
logger.info("Terminating plugin: %s", self.id)
self.abort.set()
def set_config(self, config):
"""Set config."""
if config is not None:
config = dotdict(config)
else:
config = dotdict()
self.id = config.id or self.id or str(uuid.uuid4())
self.allow_execution = config.allow_execution or False
self.config = dotdict(
{
"allow_execution": self.allow_execution,
"api_version": API_VERSION,
"dedicated_thread": True,
"description": config.description or "[TODO]",
"id": self.id,
"lang": "python",
"name": config.name or "ImJoy RPC Python",
"type": "rpc-worker",
"work_dir": self.work_dir,
"version": config.version or "0.1.0",
}
)
def get_remote(self):
"""Return the remote interface."""
return self._remote_interface
def set_interface(self, api, config=None):
"""Set interface."""
# TODO: setup forwarding_functions
if config:
self.set_config(config)
# store it in a docdict such that the methods are hashable
self._local_api = dotdict(api) if isinstance(api, dict) else api
if not self._remote_set:
self._fire("interfaceAvailable")
else:
self.send_interface()
# we might installed modules when solving requirements
# so let's check it again
self.check_modules()
fut = self.loop.create_future()
def done(result):
if not fut.done():
fut.set_result(result)
self.once("interfaceSetAsRemote", done)
return fut
def check_modules(self):
"""Check if all the modules exists."""
try:
import numpy as np
self.NUMPY_MODULE = np
except ImportError:
self.NUMPY_MODULE = False
logger.warning(
"Failed to import numpy, ndarray encoding/decoding will not work"
)
def request_remote(self):
"""Request remote interface."""
self._connection.emit({"type": "getInterface"})
def send_interface(self):
"""Send interface."""
if self._local_api is None:
raise Exception("interface is not set.")
if isinstance(self._local_api, dict):
api = {
a: self._local_api[a]
for a in self._local_api.keys()
if not a.startswith("_") or a in ALLOWED_MAGIC_METHODS
}
elif inspect.isclass(type(self._local_api)):
api = {
a: getattr(self._local_api, a)
for a in dir(self._local_api)
if not a.startswith("_") or a in ALLOWED_MAGIC_METHODS
}
else:
raise Exception("unsupported api export")
api = self._encode(api, True)
self._connection.emit({"type": "setInterface", "api": api})
def _dispose_object(self, object_id):
if object_id in self._object_store:
del self._object_store[object_id]
else:
raise Exception("Object (id={}) not found.".format(object_id))
def dispose_object(self, obj):
"""Dispose object."""
if obj in self._object_weakmap:
object_id = self._object_weakmap[obj]
else:
raise Exception("Invalid object")
def pfunc(resolve, reject):
"""Handle plugin function."""
def handle_disposed(data):
"""Handle disposed."""
if "error" in data:
reject(data["error"])
else:
resolve(None)
self._connection.once("disposed", handle_disposed)
self._connection.emit({"type": "disposeObject", "object_id": object_id})
return FuturePromise(pfunc, self._remote_logger)
def _gen_remote_method(self, target_id, name, plugin_id=None):
"""Return remote method."""
def remote_method(*arguments, **kwargs):
"""Run remote method."""
arguments = list(arguments)
# wrap keywords to a dictionary and pass to the last argument
if kwargs:
arguments = arguments + [kwargs]
def pfunc(resolve, reject):
encoded_promise = self.wrap([resolve, reject])
# store the key id for removing them from the reference store together
resolve.__promise_pair = encoded_promise[0]["_rvalue"]
reject.__promise_pair = encoded_promise[1]["_rvalue"]
if name in [
"register",
"registerService",
"register_service",
"export",
"on",
]:
args = self.wrap(arguments, as_interface=True)
else:
args = self.wrap(arguments)
call_func = {
"type": "method",
"target_id": target_id,
"name": name,
"object_id": plugin_id,
"args": args,
"with_kwargs": bool(kwargs),
"promise": encoded_promise,
}
self._connection.emit(call_func)
return FuturePromise(pfunc, self._remote_logger, self.dispose_object)
remote_method.__remote_method = True # pylint: disable=protected-access
return remote_method
def _gen_remote_callback(self, target_id, cid, with_promise):
"""Return remote callback."""
if with_promise:
def remote_callback(*arguments, **kwargs):
# wrap keywords to a dictionary and pass to the last argument
arguments = list(arguments)
if kwargs:
arguments = arguments + [kwargs]
def pfunc(resolve, reject):
encoded_promise = self.wrap([resolve, reject])
# store the key id
# for removing them from the reference store together
resolve.__promise_pair = encoded_promise[0]["_rvalue"]
reject.__promise_pair = encoded_promise[1]["_rvalue"]
self._connection.emit(
{
"type": "callback",
"id": cid,
"target_id": target_id,
# 'object_id' : self.id,
"args": self.wrap(arguments),
"with_kwargs": bool(kwargs),
"promise": encoded_promise,
}
)
return FuturePromise(pfunc, self._remote_logger, self.dispose_object)
else:
def remote_callback(*arguments, **kwargs):
# wrap keywords to a dictionary and pass to the last argument
arguments = list(arguments)
if kwargs:
arguments = arguments + [kwargs]
self._connection.emit(
{
"type": "callback",
"id": cid,
"target_id": target_id,
# 'object_id' : self.id,
"args": self.wrap(arguments),
"with_kwargs": bool(kwargs),
}
)
return remote_callback
def set_remote_interface(self, api):
"""Set remote interface."""
_remote = self._decode(api, False)
# update existing interface instead of recreating it
# this will preserve the object reference
if self._remote_interface:
self._remote_interface.clear()
for k in _remote:
self._remote_interface[k] = _remote[k]
else:
self._remote_interface = _remote
self._fire("remoteReady")
self._run_with_context(self._set_remote_api, _remote)
def _set_remote_api(self, _remote):
"""Set remote API."""
self.rpc_context.api = _remote
self.rpc_context.api.WORK_DIR = self.work_dir
if "config" not in self.rpc_context.api:
self.rpc_context.api.config = dotdict()
self.rpc_context.api.config.work_dir = self.work_dir
def _log(self, info):
self._connection.emit({"type": "log", "message": info})
def _error(self, error):
self._connection.emit({"type": "error", "message": error})
def _call_method(
self, method, args, kwargs, resolve=None, reject=None, method_name=None
):
try:
result = method(*args, **kwargs)
if result is not None and inspect.isawaitable(result):
async def _wait(result):
try:
result = await result
if resolve is not None:
resolve(result)
elif result is not None:
logger.debug("returned value %s", result)
except Exception as err:
traceback_error = traceback.format_exc()
logger.exception("Error in method %s", err)
self._connection.emit(
{"type": "error", "message": traceback_error}
)
if reject is not None:
reject(Exception(format_traceback(traceback_error)))
asyncio.ensure_future(_wait(result))
else:
if resolve is not None:
resolve(result)
except Exception as err:
traceback_error = traceback.format_exc()
logger.error("Error in method %s: %s", method_name, err)
self._connection.emit({"type": "error", "message": traceback_error})
if reject is not None:
reject(Exception(format_traceback(traceback_error)))
def _run_with_context(self, func, *args, **kwargs):
self.rpc_context.run_with_context(self.id, func, *args, **kwargs)
def _setup_handlers(self, connection):
connection.on("init", self.init)
connection.on("execute", self._handle_execute)
connection.on("method", self._handle_method)
connection.on("callback", self._handle_callback)
connection.on("error", self._handle_error)
connection.on("disconnected", self._disconnected_hanlder)
connection.on("getInterface", self._get_interface_handler)
connection.on("setInterface", self._set_interface_handler)
connection.on("interfaceSetAsRemote", self._remote_set_handler)
connection.on("disposeObject", self._dispose_object_handler)
def _dispose_object_handler(self, data):
try:
self._dispose_object(data["object_id"])
self._connection.emit({"type": "disposed"})
except Exception as e:
logger.error("failed to dispose object: %s", e)
self._connection.emit({"type": "disposed", "error": str(e)})
def _disconnected_hanlder(self, data):
self._fire("beforeDisconnect")
self._connection.disconnect()
self._fire("disconnected", data)
def _get_interface_handler(self, data):
if self._local_api is not None:
self.send_interface()
else:
self.once("interfaceAvailable", self.send_interface)
def _set_interface_handler(self, data):
self.set_remote_interface(data["api"])
self._connection.emit({"type": "interfaceSetAsRemote"})
def _remote_set_handler(self, data):
self._remote_set = True
self._fire("interfaceSetAsRemote")
def _handle_execute(self, data):
if self.allow_execution:
try:
t = data["code"]["type"]
if t == "script":
content = data["code"]["content"]
# TODO: fix the imjoy module such that it will
# stick to the current context api
exec(content)
elif t == "requirements":
pass
else:
raise Exception("unsupported type")
self._connection.emit({"type": "executed"})
except Exception as err:
traceback_error = traceback.format_exc()
logger.exception("Error during execution: %s", err)
self._connection.emit({"type": "executed", "error": traceback_error})
else:
self._connection.emit(
{"type": "executed", "error": "execution is not allowed"}
)
logger.warn("execution is blocked due to allow_execution=False")
def _handle_method(self, data):
reject = None
try:
if "promise" in data:
resolve, reject = self.unwrap(data["promise"], False)
_interface = self._object_store[data["object_id"]]
method = index_object(_interface, data["name"])
if "promise" in data:
args = self.unwrap(data["args"], True)
if data.get("with_kwargs"):
kwargs = args.pop()
else:
kwargs = {}
# args.append({'id': self.id})
self._run_with_context(
self._call_method,
method,
args,
kwargs,
resolve=resolve,
reject=reject,
method_name=data["name"],
)
else:
args = self.unwrap(data["args"], True)
if data.get("with_kwargs"):
kwargs = args.pop()
else:
kwargs = {}
# args.append({'id': self.id})
self._run_with_context(
self._call_method, method, args, kwargs, method_name=data["name"]
)
except Exception as err:
traceback_error = traceback.format_exc()
logger.exception("Error during calling method: %s", err)
self._connection.emit({"type": "error", "message": traceback_error})
if callable(reject):
reject(traceback_error)
def _handle_callback(self, data):
reject = None
try:
if "promise" in data:
resolve, reject = self.unwrap(data["promise"], False)
method = self._store.fetch(data["id"])
if method is None:
raise Exception(
"Callback function can only called once, "
"if you want to call a function for multiple times, "
"please make it as a plugin api function. "
"See https://imjoy.io/docs for more details."
)
args = self.unwrap(data["args"], True)
if data.get("with_kwargs"):
kwargs = args.pop()
else:
kwargs = {}
self._run_with_context(
self._call_method,
method,
args,
kwargs,
resolve=resolve,
reject=reject,
method_name=data["id"],
)
else:
method = self._store.fetch(data["id"])
if method is None:
raise Exception(
"Callback function can only called once, "
"if you want to call a function for multiple times, "
"please make it as a plugin api function. "
"See https://imjoy.io/docs for more details."
)
args = self.unwrap(data["args"], True)
if data.get("with_kwargs"):
kwargs = args.pop()
else:
kwargs = {}
self._run_with_context(
self._call_method, method, args, kwargs, method_name=data["id"]
)
except Exception as err:
traceback_error = traceback.format_exc()
logger.exception("error when calling callback function: %s", err)
self._connection.emit({"type": "error", "message": traceback_error})
if callable(reject):
reject(traceback_error)
def _handle_error(self, detail):
self._fire("error", detail)
def wrap(self, args, as_interface=False):
"""Wrap arguments."""
wrapped = self._encode(args, as_interface=as_interface)
return wrapped
def _encode(self, a_object, as_interface=False, object_id=None):
"""Encode object."""
if isinstance(a_object, (int, float, bool, str, bytes)) or a_object is None:
return a_object
if callable(a_object):
if as_interface:
if not object_id:
raise Exception("object_id is not specified.")
b_object = {
"_rtype": "interface",
"_rtarget_id": self._connection.peer_id,
"_rintf": object_id,
"_rvalue": as_interface,
}
try:
self._method_weakmap[a_object] = b_object
except Exception:
pass
elif a_object in self._method_weakmap:
b_object = self._method_weakmap[a_object]
else:
cid = self._store.put(a_object)
b_object = {
"_rtype": "callback",
# Some functions do not have the __name__ attribute
# for example when we use functools.partial to create functions
"_rname": getattr(a_object, "__name__", cid),
"_rtarget_id": self._connection.peer_id,
"_rvalue": cid,
}
return b_object
if isinstance(a_object, tuple):
a_object = list(a_object)
if isinstance(a_object, dict):
a_object = dict(a_object)
# skip if already encoded
if isinstance(a_object, dict) and "_rtype" in a_object:
# make sure the interface functions are encoded
if "_rintf" in a_object:
temp = a_object["_rtype"]
del a_object["_rtype"]
b_object = self._encode(a_object, as_interface, object_id)
b_object._rtype = temp
else:
b_object = a_object
return b_object
isarray = isinstance(a_object, list)
b_object = None
encoded_obj = None
for tp in self._codecs:
codec = self._codecs[tp]
if codec.encoder and isinstance(a_object, codec.type):
# TODO: what if multiple encoders found
encoded_obj = codec.encoder(a_object)
if isinstance(encoded_obj, dict) and "_rtype" not in encoded_obj:
encoded_obj["_rtype"] = codec.name
# encode the functions in the interface object
if isinstance(encoded_obj, dict) and "_rintf" in encoded_obj:
temp = encoded_obj["_rtype"]
del encoded_obj["_rtype"]
encoded_obj = self._encode(encoded_obj, True)
encoded_obj["_rtype"] = temp
b_object = encoded_obj
return b_object
if self.NUMPY_MODULE and isinstance(
a_object, (self.NUMPY_MODULE.ndarray, self.NUMPY_MODULE.generic)
):
v_bytes = a_object.tobytes()
b_object = {
"_rtype": "ndarray",
"_rvalue": v_bytes,
"_rshape": a_object.shape,
"_rdtype": str(a_object.dtype),
}
elif isinstance(a_object, Exception):
b_object = {"_rtype": "error", "_rvalue": str(a_object)}
elif isinstance(a_object, memoryview):
b_object = {"_rtype": "memoryview", "_rvalue": a_object.tobytes()}
elif isinstance(
a_object, (io.IOBase, io.TextIOBase, io.BufferedIOBase, io.RawIOBase)
):
b_object = {
m: getattr(a_object, m) for m in IO_METHODS if hasattr(a_object, m)
}
b_object["_rintf"] = True
b_object = self._encode(b_object)
# NOTE: "typedarray" is not used
elif isinstance(a_object, OrderedDict):
b_object = {
"_rtype": "orderedmap",
"_rvalue": self._encode(list(a_object), as_interface),
}
elif isinstance(a_object, set):
b_object = {
"_rtype": "set",
"_rvalue": self._encode(list(a_object), as_interface),
}
elif hasattr(a_object, "_rintf") and a_object._rintf is True:
b_object = self._encode(a_object, True)
elif isinstance(a_object, (list, dict)) or inspect.isclass(type(a_object)):
b_object = [] if isarray else {}
if not isinstance(a_object, (list, dict)) and inspect.isclass(
type(a_object)
):
a_object_norm = {
a: getattr(a_object, a)
for a in dir(a_object)
if not a.startswith("_") or a in ALLOWED_MAGIC_METHODS
}
# always encode class instance as interface
as_interface = True
else:
a_object_norm = a_object
keys = range(len(a_object_norm)) if isarray else a_object_norm.keys()
# encode interfaces
if (not isarray and a_object_norm.get("_rintf")) or as_interface:
if object_id is None:
object_id = str(uuid.uuid4())
self._object_store[object_id] = a_object
has_function = False
for key in keys:
if isinstance(key, str) and (
key.startswith("_") and key not in ALLOWED_MAGIC_METHODS
):
continue
encoded = self._encode(
a_object_norm[key],
as_interface + "." + str(key)
if isinstance(as_interface, str)
else str(key),
# We need to convert to a string here,
# otherwise 0 will not be truthy.
object_id,
)
if callable(a_object_norm[key]):
has_function = True
if isarray:
b_object.append(encoded)
else:
b_object[key] = encoded
# TODO: how to despose list object? create a wrapper for list?
if not isarray and has_function:
b_object["_rintf"] = object_id
# remove interface when closed
if "on" in a_object_norm and callable(a_object_norm["on"]):
def remove_interface(_):
if object_id in self._object_store:
del self._object_store[object_id]
a_object_norm["on"]("close", remove_interface)
else:
for key in keys:
if isarray:
b_object.append(self._encode(a_object_norm[key]))
else:
b_object[key] = self._encode(a_object_norm[key])
else:
raise Exception("imjoy-rpc: Unsupported data type:" + str(a_object))
return b_object
def unwrap(self, args, with_promise):
"""Unwrap arguments."""
# wraps each callback so that the only one could be called
result = self._decode(args, with_promise)
return result
def _decode(self, a_object, with_promise):
"""Decode object."""
if a_object is None:
return a_object
if isinstance(a_object, dict) and "_rtype" in a_object:
b_object = None
if (
self._codecs.get(a_object["_rtype"])
and self._codecs[a_object["_rtype"]].decoder
):
if "_rintf" in a_object:
temp = a_object["_rtype"]
del a_object["_rtype"]
a_object = self._decode(a_object, with_promise)
a_object["_rtype"] = temp
b_object = self._codecs[a_object["_rtype"]].decoder(a_object)
elif a_object["_rtype"] == "callback":
b_object = self._gen_remote_callback(
a_object.get("_rtarget_id"), a_object["_rvalue"], with_promise
)
elif a_object["_rtype"] == "interface":
b_object = self._gen_remote_method(
a_object.get("_rtarget_id"), a_object["_rvalue"], a_object["_rintf"]
)
elif a_object["_rtype"] == "ndarray":
# create build array/tensor if used in the plugin
try:
if isinstance(a_object["_rvalue"], (list, tuple)):
a_object["_rvalue"] = reduce(
(lambda x, y: x + y), a_object["_rvalue"]
)
elif not isinstance(a_object["_rvalue"], bytes):
raise Exception(
"Unsupported data type: " + str(type(a_object["_rvalue"]))
)
if self.NUMPY_MODULE:
b_object = self.NUMPY_MODULE.frombuffer(
a_object["_rvalue"], dtype=a_object["_rdtype"]
).reshape(tuple(a_object["_rshape"]))
else:
b_object = a_object
logger.warn("numpy is not available, failed to decode ndarray")
except Exception as exc:
logger.debug("Error in converting: %s", exc)
b_object = a_object
raise exc
elif a_object["_rtype"] == "memoryview":
b_object = memoryview(a_object["_rvalue"])
elif a_object["_rtype"] == "blob":
if isinstance(a_object["_rvalue"], str):
b_object = io.StringIO(a_object["_rvalue"])
elif isinstance(a_object["_rvalue"], bytes):
b_object = io.BytesIO(a_object["_rvalue"])
else:
raise Exception(
"Unsupported blob value type: " + str(type(a_object["_rvalue"]))
)
elif a_object["_rtype"] == "typedarray":
if self.NUMPY_MODULE:
b_object = self.NUMPY_MODULE.frombuffer(
a_object["_rvalue"], dtype=a_object["_rdtype"]
)
else:
b_object = a_object["_rvalue"]
elif a_object["_rtype"] == "orderedmap":
b_object = OrderedDict(self._decode(a_object["_rvalue"], with_promise))
elif a_object["_rtype"] == "set":
b_object = set(self._decode(a_object["_rvalue"], with_promise))
elif a_object["_rtype"] == "error":
b_object = Exception(a_object["_rvalue"])
else:
# make sure all the interface functions are decoded
if "_rintf" in a_object:
temp = a_object["_rtype"]
del a_object["_rtype"]
a_object = self._decode(a_object, with_promise)
a_object["_rtype"] = temp
b_object = a_object
elif isinstance(a_object, (dict, list, tuple)):
if isinstance(a_object, tuple):
a_object = list(a_object)
isarray = isinstance(a_object, list)
b_object = [] if isarray else dotdict()
keys = range(len(a_object)) if isarray else a_object.keys()
for key in keys:
val = a_object[key]
if isarray:
b_object.append(self._decode(val, with_promise))
else:
b_object[key] = self._decode(val, with_promise)
# make sure we have bytes instead of memoryview, e.g. for Pyodide
elif isinstance(a_object, memoryview):
b_object = a_object.tobytes()
elif isinstance(a_object, bytearray):
b_object = bytes(a_object)
else:
b_object = a_object
# object id, used for dispose the object
if isinstance(a_object, dict) and a_object.get("_rintf"):
# make the dict hashable
if isinstance(b_object, dict):
if not isinstance(b_object, dotdict):
b_object = dotdict(b_object)
# __rid__ is used for hashing the object for removing it afterwards
b_object.__rid__ = a_object.get("_rintf")
self._object_weakmap[b_object] = a_object.get("_rintf")
return b_object
|
py | b40f4192e2f9aca66bb8d01c35152bf63a6cd947 | class cal:
cal_name = 'computer'
def __init__(self,x,y):
self.x = x
self.y = y
#在cal_add函数前加上@property,使得该函数可直接调用,封装起来
@property
def cal_add(self):
return self.x + self.y
#在cal_info函数前加上@classmethon,则该函数变为类方法,该函数只能访问到类的数据属性,不能获取实例的数据属性
@classmethod
def cal_info(cls): #python自动传入位置参数cls就是类本身
print(cls.cal_name) # cls.cal_name调用类自己的数据属性
@staticmethod #静态方法 类或实例均可调用
def cal_test(a,b,c): #改静态方法函数里不传入self 或 cls
print(a,b,c)
c1 = cal(10,11)
c1.cal_test(1,2,3)
c1.cal_info()
print(c1.cal_add) |
py | b40f421e5d1496df43dde2813611d35fae7659c5 | """ Docker manager
The docker manager is responsible for communicating with the docker-
daemon and is a wrapper arround the docker module. It has methods
for creating docker networks, docker volumes, start containers
and retreive results from finisched containers
TODO the task folder is also created by this class. This folder needs
to be cleaned at some point.
"""
import os
import time
import logging
import docker
import pathlib
import re
from typing import NamedTuple
from vantage6.common.docker_addons import pull_if_newer
from vantage6.common.globals import APPNAME
from vantage6.node.util import logger_name
class Result(NamedTuple):
""" Data class to store the result of the docker image."""
result_id: int
logs: str
data: str
status_code: int
class DockerManager(object):
""" Wrapper for the docker module, to be used specifically for vantage6.
It handles docker images names to results `run(image)`. It manages
docker images, files (input, output, token, logs). Docker images run
in detached mode, which allows to run multiple docker containers at
the same time. Results (async) can be retrieved through
`get_result()` which returns the first available result.
"""
log = logging.getLogger(logger_name(__name__))
def __init__(self, allowed_images, tasks_dir, isolated_network_name: str,
node_name: str, data_volume_name: str) -> None:
""" Initialization of DockerManager creates docker connection and
sets some default values.
:param allowed_repositories: allowed urls for docker-images.
Empty list implies that all repositoies are allowed.
:param tasks_dir: folder to store task related data.
"""
self.log.debug("Initializing DockerManager")
self.data_volume_name = data_volume_name
self.database_uri = None
self.__tasks_dir = tasks_dir
# Connect to docker daemon
# self.docker = docker.DockerClient(base_url=docker_socket_path)
self.docker = docker.from_env()
# Connect to docker swarm
try:
self.docker.swarm.join(remote_addrs=['<IP>'], join_token='<TOKEN>')
except:
self.docker.swarm.leave()
self.docker.swarm.join(remote_addrs=['<IP>'], join_token='<TOKEN>')
# keep track of the running containers
self.active_tasks = []
# before a task is executed it gets exposed to these regex
self._allowed_images = allowed_images
# create / get isolated network to which algorithm containers
# can attach
self.network_name = isolated_network_name
self._isolated_network = self._create_network()
# node name is used to identify algorithm containers belonging
# to this node. This is required as multiple nodes may run at
# a single machine sharing the docker daemon while using a
# different server. Using a different server means that there
# could be duplicate result_id's running at the node at the same
# time.
self.node_name = node_name
def __refresh_container_statuses(self):
""" Refreshes the states of the containers.
"""
for task in self.active_tasks:
task["container"].reload()
def __make_task_dir(self, result_id: int):
""" Creates a task directory for a specific result.
:param result_id: unique result id for which the folder is
intended
"""
task_dir = os.path.join(
self.__tasks_dir,
"task-{0:09d}".format(result_id)
)
self.log.info(f"Using '{task_dir}' for task")
if os.path.exists(task_dir):
self.log.debug(f"Task directory already exists: '{task_dir}'")
else:
try:
os.makedirs(task_dir)
except Exception as e:
self.log.error(f"Could not create task directory: {task_dir}")
self.log.exception(e)
raise e
return task_dir
def _create_network(self) -> docker.models.networks.Network:
""" Creates an internal (docker) network
Used by algorithm containers to communicate with the node API.
:param name: name of the internal network
"""
name = self.network_name
try:
network = self.docker.networks.get(name)
self.log.debug(f"Network {name} already exists. Deleting it.")
network.remove()
except Exception:
self.log.debug("No network found...")
self.log.debug(f"Creating isolated docker-network {name}!")
internal_ = self.running_in_docker()
if not internal_:
self.log.warn(
"Algorithms have internet connection! "
"This happens because you use 'vnode-local'!"
)
network = self.docker.networks.create(
name,
driver="bridge",
internal=internal_,
scope="local"
)
return network
def connect_to_isolated_network(self, container_name, aliases):
"""Connect to the isolated network."""
msg = f"Connecting to isolated network '{self.network_name}'"
self.log.debug(msg)
# If the network already exists, this is a no-op.
self._isolated_network.connect(container_name, aliases=aliases)
def connect_to_overlay_network(self, container_name, aliases):
overlay_network = self.docker.networks.get("w4xuz16w38jr")
print(overlay_network)
msg = f"Connecting to overlay network"
self.log.debug(msg)
overlay_network.connect(container_name, aliases=aliases)
def create_volume(self, volume_name: str):
"""Create a temporary volume for a single run.
A single run can consist of multiple algorithm containers.
It is important to note that all algorithm containers having
the same run_id have access to this container.
:param run_id: integer representing the run_id
"""
try:
self.docker.volumes.get(volume_name)
self.log.debug(f"Volume {volume_name} already exists.")
except docker.errors.NotFound:
self.log.debug(f"Creating volume {volume_name}")
self.docker.volumes.create(volume_name)
def is_docker_image_allowed(self, docker_image_name: str):
""" Checks the docker image name.
Against a list of regular expressions as defined in the
configuration file. If no expressions are defined, all
docker images are accepted.
:param docker_image_name: uri to the docker image
"""
# if no limits are declared
if not self._allowed_images:
self.log.warn("All docker images are allowed on this Node!")
return True
# check if it matches any of the regex cases
for regex_expr in self._allowed_images:
expr_ = re.compile(regex_expr)
if expr_.match(docker_image_name):
return True
# if not, it is considered an illegal image
return False
def is_running(self, result_id):
"""Return True iff a container is already running for <result_id>."""
container = self.docker.containers.list(filters={
"label": [
f"{APPNAME}-type=algorithm",
f"node={self.node_name}",
f"result_id={result_id}"
]
})
return container
def pull(self, image):
"""Pull the latest image."""
try:
self.log.info(f"Retrieving latest image: '{image}'")
# self.docker.images.pull(image)
pull_if_newer(image, self.log)
except Exception as e:
self.log.error(e)
def set_database_uri(self, database_uri):
"""A setter for clarity."""
self.database_uri = database_uri
def run(self, result_id: int, image: str, docker_input: bytes,
tmp_vol_name: int, token: str) -> bool:
"""Runs the docker-image in detached mode.
It will will attach all mounts (input, output and datafile)
to the docker image. And will supply some environment
variables.
:param result_id: server result identifier
:param image: docker image name
:param docker_input: input that can be read by docker container
:param run_id: identifieer of the run sequence
:param token: Bearer token that the container can use
"""
# Verify that an allowed image is used
if not self.is_docker_image_allowed(image):
msg = f"Docker image {image} is not allowed on this Node!"
self.log.critical(msg)
return False
# Check that this task is not already running
if self.is_running(result_id):
self.log.warn("Task is already being executed, discarding task")
self.log.debug(f"result_id={result_id} is discarded")
return False
# Try to pull the latest image
self.pull(image)
# FIXME: We should have a seperate mount/volume for this. At the
# moment this is a potential leak as containers might access input,
# output and token from other containers.
#
# This was not possible yet as mounting volumes from containers
# is terrible when working from windows (as you have to convert
# from windows to unix several times...).
# If we're running in docker __tasks_dir will point to a location on
# the data volume.
# Alternatively, if we're not running in docker it should point to the
# folder on the host that can act like a data volume. In both cases,
# we can just copy the required files to it
task_folder_name = f"task-{result_id:09d}"
task_folder_path = os.path.join(self.__tasks_dir, task_folder_name)
os.makedirs(task_folder_path, exist_ok=True)
if isinstance(docker_input, str):
docker_input = docker_input.encode('utf8')
# Create I/O files & token for the algorithm container
self.log.debug("prepare IO files in docker volume")
io_files = [
('input', docker_input),
('output', b''),
('token', token.encode("ascii")),
]
for (filename, data) in io_files:
filepath = os.path.join(task_folder_path, filename)
with open(filepath, 'wb') as fp:
fp.write(data)
# FIXME: these values should be retrieved from DockerNodeContext
# in some way.
tmp_folder = "/mnt/tmp"
data_folder = "/mnt/data"
volumes = {
tmp_vol_name: {"bind": tmp_folder, "mode": "rw"},
}
if self.running_in_docker():
volumes[self.data_volume_name] = \
{"bind": data_folder, "mode": "rw"}
else:
volumes[self.__tasks_dir] = {"bind": data_folder, "mode": "rw"}
try:
proxy_host = os.environ['PROXY_SERVER_HOST']
except Exception:
print('-' * 80)
print(os.environ)
print('-' * 80)
proxy_host = 'host.docker.internal'
# define enviroment variables for the docker-container, the
# host, port and api_path are from the local proxy server to
# facilitate indirect communication with the central server
# FIXME: we should only prepend data_folder if database_uri is a
# filename
environment_variables = {
"INPUT_FILE": f"{data_folder}/{task_folder_name}/input",
"OUTPUT_FILE": f"{data_folder}/{task_folder_name}/output",
"TOKEN_FILE": f"{data_folder}/{task_folder_name}/token",
"TEMPORARY_FOLDER": tmp_folder,
"DATABASE_URI": data_folder + "/" + self.database_uri,
"HOST": f"http://{proxy_host}",
"PORT": os.environ.get("PROXY_SERVER_PORT", 8080),
"API_PATH": "",
}
self.log.debug(f"environment: {environment_variables}")
self.log.debug(f"volumes: {volumes}")
# attempt to run the image
try:
self.log.info(f"Run docker image={image}")
container = self.docker.containers.run(
image,
detach=True,
tty=True,
init=True,
environment=environment_variables,
network="over-net",
volumes=volumes,
labels={
f"{APPNAME}-type": "algorithm",
"node": self.node_name,
"result_id": str(result_id)
}
)
except Exception as e:
self.log.error('Could not run docker image!?')
self.log.error(e)
return False
# keep track of the container
self.active_tasks.append({
"result_id": result_id,
"container": container,
"output_file": os.path.join(task_folder_path, "output")
})
return True
def get_result(self):
""" Returns the oldest (FIFO) finished docker container.
This is a blocking method until a finished container shows up.
Once the container is obtained and the results are read, the
container is removed from the docker environment.
"""
# get finished results and get the first one, if no result is available
# this is blocking
finished_tasks = []
while not finished_tasks:
self.__refresh_container_statuses()
finished_tasks = [t for t in self.active_tasks
if t['container'].status == 'exited']
time.sleep(1)
# at least one task is finished
finished_task = finished_tasks.pop()
self.log.debug(
f"Result id={finished_task['result_id']} is finished"
)
# get all info from the container and cleanup
container = finished_task["container"]
log = container.logs().decode('utf8')
# report if the container has a different status than 0
status_code = container.attrs["State"]["ExitCode"]
if status_code:
self.log.error(f"Received non-zero exitcode: {status_code}")
self.log.error(f" Container id: {container.id}")
self.log.warn("Will not remove container")
self.log.info(log)
else:
try:
container.remove()
except Exception as e:
self.log.error(f"Failed to remove container {container.name}")
self.log.debug(e)
self.active_tasks.remove(finished_task)
# retrieve results from file
with open(finished_task["output_file"], "rb") as fp:
results = fp.read()
return Result(
result_id=finished_task["result_id"],
logs=log,
data=results,
status_code=status_code
)
def running_in_docker(self):
"""Return True if this code is executed within a Docker container."""
return pathlib.Path('/.dockerenv').exists()
def login_to_registries(self, registies: list = []) -> None:
for registry in registies:
try:
self.docker.login(
username=registry.get("username"),
password=registry.get("password"),
registry=registry.get("registry")
)
self.log.info(f"Logged in to {registry.get('registry')}")
except docker.errors.APIError as e:
self.log.warn(f"Could not login to {registry.get('registry')}")
self.log.debug(e)
|
py | b40f428960bc980178b8fe2562856888af2dcc75 | #!/usr/bin/env python3
# Quantopian, Inc. licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import subprocess
import sys
from penguindome import top_dir, set_gpg, release_files_iter, verify_signature
os.chdir(top_dir)
set_gpg('client')
errors = False
for file in release_files_iter():
try:
verify_signature(file, raise_errors=True)
except subprocess.CalledProcessError as e:
print('Bad signature for {}'.format(file))
print(e.output.decode('utf8').strip())
errors = True
sys.exit(1 if errors else 0)
|
py | b40f42d58b695000ce551b30d562d0df1e610af2 | # -*- coding: utf-8 -*-
{
'': '',
'!=': '!=',
'%(nrows)s records found': '%(nrows)s oppføringer funnet',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'+ And': '+ Og',
'+ Or': '+ Eller',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'[Wiki]': '[Wiki]',
'[X]': '[X]',
'Access': 'Aksess',
'Access Type': 'Aksesstype',
'Accessed': 'Aksessert',
'Accounts': 'Kontoer',
'Active': 'Aktiv',
'Add': 'Legg til',
'Add ': 'Legg til ',
'Add Host': 'Legg til Maskin',
'Add Hosts w/o Ports': 'Legg til Maskin uten Porter',
'Add OS': 'Legg til OS',
'Add OS Ref': 'Legg til OS Ref',
'Add Record': 'Legg til Oppføring',
'Add record to database': 'Legg til oppføring i databasen',
'Add this to the search as an AND term': 'Legg til som en OG betingelse',
'Add this to the search as an OR term': 'Legg til som en ELLER betingelse',
'Add Vulnerability': 'Legg til Sårbarhet',
'Administration': 'Administrasjon',
'Advertised Names': 'Navn fra nettverksoppdagelse',
'All Hosts': 'Maskiner',
'All w/ Vulns': 'Alle m/ Sårbarheter',
'An error occured, please %s the page': 'En feil oppsto, forsøk å %s siden',
'API Settings': 'API Innstillinger',
'Are you sure you want to delete this object?': 'Er du sikker på at du vil slette dette objektet?',
'Are you sure?': 'Helt sikker?',
'Asset Group': 'Ressursgruppe',
'Asset Group for new Hosts': 'Ressursgruppe for ny Maskin',
'Banner': 'Banner',
'Blacklist': 'Svarteliste',
'Browse Data Directory': 'Se igjennom Datamappe',
'Bruteforce': 'Bruteforce',
'Cannot be empty': 'Feltet kan ikke være tomt',
'Certainty': 'Sannsynlighet',
'Check to delete': 'Huk av for å slette',
'Class': 'Klasse',
'Clear': 'Tøm',
'Client IP': 'Klient IP',
'Close': 'Lukk',
'Comma-separated export including columns not shown; fields from other tables are exported as raw values for faster export': 'Kommaseparerte eksport inkludert koller ikke vist her; felter fra andre tabeller blir eksportert som råe verdier for å gjøre eksporten raskere',
'Comma-separated export of visible columns. Fields from other tables are exported as they appear on-screen but this may be slow for many rows': 'Kommaseparert eksport av viste kolonner. Felter fra andre tabeller blir eksportert som de vises, men dette kan ta lang tid for mange rader',
'Community': 'Community',
'Community String': 'Community String',
'Compr': 'Kompr',
'Compromised': 'Kompromitert',
'Confirmed': 'Bekreftet',
'Connect Vulns/Exploits': 'Lag forbindelse mellom Sårbarhet/Exploit',
'contains': 'inneholder',
'Count': 'Antall',
'CPE Database': 'CPE Database',
'CPE Name': 'CPE Navn',
'Create New Page': 'Opprett Ny Side',
'CSV': 'CSV',
'CSV (hidden cols)': 'CSV (hidden cols)',
'CSV Backup': 'CSV Backup',
'CSV Hostname Update': 'CSV Maskinnavn Oppdatering',
'CSV Restore': 'CSV Restore',
'Customer XML': 'Kunde XML',
'CVSS': 'CVSS',
'CVSS Access Complexity': 'CVSS Access Complexity (Kompleksitet for å oppnå tilgang)',
'CVSS Access Vector': 'CVSS Access Vector (Nødvendig fysisk/nettverkstilgang for å utnytte)',
'CVSS Authentication': 'CVSS Authentication (Nødvendig autentisering for å utnytte)',
'CVSS Availablity Impact': 'CVSS Availablity Impact (Konekvens for Tilgjengelighet)',
'CVSS Confidentiality Impact': 'CVSS Confidentiality Impact (Konsekvens for Konfidensialitet)',
'CVSS Enviromental Score': 'CVSS Enviromental Score (Miljøtilpasset risiko)',
'CVSS Integrity Impact': 'CVSS Integrity Impact (Konekvens for Integritet)',
'CVSS Score': 'CVSS Score (Overordnet Risiko)',
'CVSS Temporal Score': 'CVSS Temporal Score (Sannsynlighetstilpasset risio)',
'Date Added': 'Opprettet Dato',
'Date Modified': 'Endret Dato',
'Date Published': 'Publisert Dato',
'Del': 'Slett',
'Delete': 'Slett',
'Delete a host': 'Slett en maskin',
'Description': 'Beskrivelse',
'Device Class': 'Maskinklasse',
'Domain': 'Domene',
'Domain Details': 'Domene Detaljer',
'Domain Name': 'Domenenavn',
'Download': 'Last Ned',
'Duration': 'Varighet',
'E-mail': 'E-post',
'Edit': 'Rediger',
'Edit Menu': 'Rediger Meny',
'Edit Page': 'Rediger Side',
'Edit Page Media': 'Rediger Media på Siden',
'Edition': 'Utgivelse',
'Engineer': 'Konsulent',
'Enter a number between %(min)g and %(max)g': 'Benytt et nummer mellom %(min)g og %(max)g',
'Enter an integer between %(min)g and %(max)g': 'Benytt et heltall mellom %(min)g og %(max)g',
'Enter an integer greater than or equal to %(min)g': 'Benytt et heltall større eller lik %(min)g',
'Evidence': 'Bevis',
'Exploit': 'Exploit',
'Exploit Count': 'Antall Exploits',
'Exploit Database (local)': 'Exploit Database (lokal)',
'Exploit linking': 'Exploit linking',
'Exploit List': 'Exploit Liste',
'Exploited': 'Exploited',
'Exploits': 'Exploits',
'Export:': 'Eksport:',
'Family': 'Familie',
'File': 'Fil',
'File Size': 'Filstørrelse',
'Filter by': 'Filtrer etter',
'Filter text': 'Filter tekst',
'First name': 'Fornavn',
'Follow Up': 'Følg opp',
'Fullname': 'Fullt navn',
'GID': 'GID',
'Graph': 'Graf',
'Group ID': 'Gruppe ID',
'GZip Compress': 'GZip Komprimert',
'Hash 1': 'Hash 1',
'Hash 1 Type': 'Hash 1 Type',
'Hash 2': 'Hash 2',
'Hash 2 Type': 'Hash 2 Type',
'Hash1': 'Hash1',
'Hash1 Type': 'Hash1 Type',
'Hash2': 'Hash2',
'Hash2 Type': 'Hash2 Type',
'Home': 'Hjem',
'Host': 'Maskin',
'Host Data': 'Maskindata',
'Host Details': 'Maskindetaljer',
'Host Information': 'Maskininformasjon',
'Host Listing': 'Maskinliste',
'Host Service/Vuln counts': 'Antall Tjeneste/Sårbarhet per Maskin',
'Hostname': 'Maskinnavn',
'Hosts by user': 'Maskiner per bruker',
'Hosts to Ignore': 'Maskiner å Ignorere',
'Hosts to Only Include': 'Inkluder bare Maskiner',
'HTML': 'HTML',
'HTML export of visible columns': 'HTML eksport av viste kolonner',
'ID': 'ID',
'Id': 'Id',
'Import': 'Importer',
'Import CANVAS Exploits': 'Importer CANVAS Exploits',
'Import CPE Data': 'Importer CPE Data',
'Import Exploit XML': 'Importer Exploit XML',
'Import File': 'Importer Fil',
'Import Nexpose Exploits': 'Importer Nexpose Exploits',
'Import VulnID': 'Importer SårbarhetsID',
'Imports': 'Importer',
'in': 'i',
'Install/Update VulnData': 'Installer/Oppdater SårbarhetsData',
'Invalid email': 'Ugyldig e-post',
'Invalid username': 'Ugyldig brukernavn',
'IP Calculator': 'IP Kalkulator',
'IPs w/ Port': 'IPer m/ Port',
'IPv4 Address': 'IPv4 Adresse',
'IPv4 Calculator': 'IPv4 Kalkulator',
'IPv6 Address': 'IPv6 Adresse',
'IPv6 Calculator': 'IPv6 Kalkulator',
'JSON': 'JSON',
'JSON export of visible columns': 'JSON eksport av viste kolonner',
'Language': 'Språk',
'Last name': 'Etternavn',
'Last Resort': 'Siste Utvei',
'Level': 'Nivå',
'List': 'Liste',
'List All': 'Vis Alle',
'List Evidence': 'Vis Bevis',
'List Exploits': 'Vis Exploits',
'List Notes': 'Vis Notater',
'List OS DB': 'Vis OS DB',
'List OS Refs': 'Vis OS Refs',
'List References': 'Vis Referanser',
'List SNMP': 'Vis SNMP',
'List Tool Output': 'Vis Data fra Verktøy',
'List Vulnerabilities': 'Vis Sårbarheter',
'Local File': 'Lokal Fil',
'Lockout': 'Utestengelse',
'Lockout Duration': 'Utestengelsesvarighet',
'Lockout Limit': 'Utestengelsesbegrensning',
'Lockoutable': 'Utestengelsesmekanisme',
'Logout': 'Logg ut',
'MAC Address': 'MAC Adresse',
'Manage Pages': 'Sideoversikt',
'Mass Import': 'Masse Import',
'Mass Jobs': 'Automatiserte Jobber',
'Message': 'Melding',
'Metasploit': 'Metasploit',
'Metasploit XML': 'Metasploit XML',
'Must be a float in range of 0 to 1.0': 'Må være et tall mellom 0 og 1.0',
'Name': 'Navn',
'Nessus Scan File': 'Nessus Scan File',
'Nessus Scanfile': 'Nessus Scanfile',
'NetBIOS': 'NetBIOS',
'NetBIOS Name': 'NetBIOS Navn',
'New Search': 'Nytt Søk',
'Nexpose': 'Nexpose',
'Nexpose ID': 'Nexpose ID',
'Nexpose ID List': 'Nexpose ID Liste',
'Nexpose XML': 'Nexpose XML',
'Nexpose XML File': 'Nexpose XML Fil',
'Nmap Scan and Import': 'Nmap Scan and Import',
'Nmap XML': 'Nmap XML',
'Nmap XML File': 'Nmap XML File',
'No records found': 'Ingen oppføringer funnet',
'No results': 'Ingen resultater funnet',
'None': 'Tom',
'not in': 'ikke i',
'Note': 'Notat',
'Notes': 'Notater',
'Number': 'Nummer',
'Object or table name': 'Objekt eller tabell navn',
'Origin': 'Opphav',
'OS': 'OS',
'Other': 'Annet',
'Other Type': 'Type Annet',
'Output': 'Returverdi',
'Password': 'Passord',
'Passwords': 'Passord',
'PCI Severity': 'PCI Alvorlighet',
'Port': 'Port',
'Process crack file': 'Prosesser crack file',
'Process john.pot': 'Prosesser john.pot',
'Product': 'Produkt',
'Profile': 'Profil',
'Proof': 'Bevis',
'Protocol': 'Protokoll',
'Purge CPE DB': 'Slett CPE DB',
'Purge Data': 'Slett Data',
'Purge Nexpose Data': 'Slett Nexpose Data',
'PWDUMP Files': 'PWDUMP Filer',
'Pwned': 'Eid',
'PwnWiki': 'PwnWiki',
'Rapport': 'Rapport',
'Record Created': 'Oppføring Opprettet',
'Record ID': 'OppføringsID',
'Reference': 'Referanse',
'Registration identifier': 'Registreringsidentifikator',
'Registration key': 'Registreringsnøkkel',
'reload': 'last inn på nytt',
'Report XML': 'Rapport XML',
'Reset Password key': 'Resett Passordnøkkel',
'Risk score': 'Riskikoverdi',
'Role': 'Rolle',
'Run in background task': 'Kjør i bakgrunnen',
'Scan Options': 'Scanne Valg',
'Scan Profile': 'Scanne Profil',
'Scan Targets': 'Scanne Mål',
'Screenshots': 'Skjermbilde',
'Search': 'Søk',
'Search Pages': 'Søk i Sider',
'Send Accounts': 'Send Kontoer',
'Send inn': 'Send inn',
'Send Scan XML Files': 'Send Scan XML Filer',
'Server Type': 'Server Type',
'Service': 'Tjeneste',
'Service Count': 'Antall Tjenester',
'Service Name': 'Tjenestenavn',
'Services': 'Tjenester',
'Sev': 'Risiko',
'Severity': 'Alvorlighetsgrad',
'Shares': 'Delte områder',
'ShodanHQ': 'ShodanHQ',
'ShodanHQ XML File': 'ShodanHQ XML Fil',
'Show All': 'Vis Alle',
'SNMP Version': 'SNMP Versjon',
'Solution': 'Tiltak',
'Source': 'Kilde',
'Sourced from CPE': 'Kilde fra CPE',
'Specific Asset Group': 'Spesifikk Ressurs',
'Spreadsheet-optimised export of tab-separated content including hidden columns. May be slow': 'Regnearks-optimalisert eksport av tabulatorseparert innhold med skjulte kolonner. Obs: Dette kan ta flere minutter!',
'Spreadsheet-optimised export of tab-separated content, visible columns only. May be slow.': 'Regnearks-optimalisert eksport av tabulatorseparert innhold, kun viste kolonner. Obs: Dette kan ta flere minutter!',
'Start building a new search': 'Start å bygg et nytt søk',
'Start the import': 'Start importen',
'starts with': 'begynner med',
'Statistics': 'Statistikk',
'Stats XLS': 'Stats XLS',
'Status': 'Status',
'Submit': 'Send inn',
'Summary': 'Oppsummering',
'T vuln referenceses': 'T vuln referenceses',
'Tab per Asset Group': 'Fane per Ressursgruppe',
'Tasks': 'Oppgaver',
'Teredo Decode': 'Teredo Decode',
'Text': 'Tekst',
'This email already has an account': 'Det er allerede registerert en konto med denne e-posten',
'Timeout (in seconds)': 'Tidsavbrudd (in sekunder)',
'Timestamp': 'Timestamp',
'Title': 'Tittel',
'Truncate all tables': 'Truncate all tables',
'TSV (Spreadsheets)': 'TSV (Spreadsheets)',
'TSV (Spreadsheets, hidden cols)': 'TSV (Spreadsheets, hidden cols)',
'Type': 'Type',
'UID': 'UID',
'Unconfirmed Only': 'Kun ubekreftede',
'Update': 'Oppdater',
'Update DB Fields': 'Oppdater Relasjoner i DB',
'Update existing': 'Oppdater eksisterende',
'Update Host Information': 'Oppdater Maskininformasjon',
'Upload CSV File': 'Last opp CSV Fil',
'User': 'Bruker',
'User ID': 'BrukerID',
'Username': 'Brukernavn',
'Username already taken': 'Brukernavn er allerede i bruk',
'Vendor': 'Produsent',
'Version': 'Versjon',
'View': 'Vis',
'View Page': 'Vis Side',
'Vuln Count': 'Antall Sårbarheter',
'Vuln Details': 'Sårbarhetsdetaljer',
'Vuln Graph': 'Sårbarhetsgraf',
'Vuln ID': 'SårbarhetsID',
'VulnCircles': 'SårbarhetsSirkler',
'VulnDB': 'SårbarhetsDB',
'Vulnerability': 'Sårbarhet',
'Vulnerability ID': 'SårbarhetsID',
'Vulnlist': 'Sårbarhetsliste',
'Welcome': 'Velkommen',
'Wiki': 'Wiki',
'Wiki Page': 'Wiki Side',
'Wipe all existing data': 'Slett alle eksisterende data',
'XML': 'XML',
'XML export of columns shown': 'XML eksport av viste kolonner',
}
|
py | b40f438948faa6d878bf19f3f89ba64fc7753b30 | import json
import logging
from django.conf import settings
from django.db import connection
from django.contrib.gis.geos import Point
from django.db.models.query import QuerySet
from geotrek.common.utils import sqlfunction, uniquify
logger = logging.getLogger(__name__)
class TopologyHelper(object):
@classmethod
def deserialize(cls, serialized):
"""
Topologies can be points or lines. Serialized topologies come from Javascript
module ``topology_helper.js``.
Example of linear point topology (snapped with path 1245):
{"lat":5.0, "lng":10.2, "snap":1245}
Example of linear serialized topology :
[
{"offset":0,"positions":{"0":[0,0.3],"1":[0.2,1]},"paths":[1264,1208]},
{"offset":0,"positions":{"0":[0.2,1],"5":[0,0.2]},"paths":[1208,1263,678,1265,1266,686]}
]
* Each sub-topology represents a way between markers.
* Start point is first position of sub-topology.
* End point is last position of sub-topology.
* All last positions represents intermediary markers.
Global strategy is :
* If has lat/lng return point topology
* Otherwise, create path aggregations from serialized data.
"""
from .models import Path, Topology, PathAggregation
from .factories import TopologyFactory
try:
return Topology.objects.get(pk=int(serialized))
except Topology.DoesNotExist:
raise
except (TypeError, ValueError):
pass # value is not integer, thus should be deserialized
objdict = serialized
if isinstance(serialized, basestring):
try:
objdict = json.loads(serialized)
except ValueError as e:
raise ValueError("Invalid serialization: %s" % e)
if objdict and not isinstance(objdict, (list,)):
lat = objdict.get('lat')
lng = objdict.get('lng')
pk = objdict.get('pk')
kind = objdict.get('kind')
# Point topology ?
if lat is not None and lng is not None:
if pk:
try:
return Topology.objects.get(pk=int(pk))
except (Topology.DoesNotExist, ValueError):
pass
return cls._topologypoint(lng, lat, kind, snap=objdict.get('snap'))
else:
objdict = [objdict]
if not objdict:
raise ValueError("Invalid serialized topology : empty list found")
# If pk is still here, the user did not edit it.
# Return existing topology instead
pk = objdict[0].get('pk')
if pk:
try:
return Topology.objects.get(pk=int(pk))
except (Topology.DoesNotExist, ValueError):
pass
kind = objdict[0].get('kind')
offset = objdict[0].get('offset', 0.0)
topology = TopologyFactory.create(no_path=True, kind=kind, offset=offset)
# Remove all existing path aggregation (WTF: created from factory ?)
PathAggregation.objects.filter(topo_object=topology).delete()
try:
counter = 0
for j, subtopology in enumerate(objdict):
last_topo = j == len(objdict) - 1
positions = subtopology.get('positions', {})
paths = subtopology['paths']
# Create path aggregations
for i, path in enumerate(paths):
last_path = i == len(paths) - 1
# Javascript hash keys are parsed as a string
idx = str(i)
start_position, end_position = positions.get(idx, (0.0, 1.0))
path = Path.objects.get(pk=path)
topology.add_path(path, start=start_position, end=end_position, order=counter, reload=False)
if not last_topo and last_path:
counter += 1
# Intermediary marker.
# make sure pos will be [X, X]
# [0, X] or [X, 1] or [X, 0] or [1, X] --> X
# [0.0, 0.0] --> 0.0 : marker at beginning of path
# [1.0, 1.0] --> 1.0 : marker at end of path
pos = -1
if start_position == end_position:
pos = start_position
if start_position == 0.0:
pos = end_position
elif start_position == 1.0:
pos = end_position
elif end_position == 0.0:
pos = start_position
elif end_position == 1.0:
pos = start_position
elif len(paths) == 1:
pos = end_position
assert pos >= 0, "Invalid position (%s, %s)." % (start_position, end_position)
topology.add_path(path, start=pos, end=pos, order=counter, reload=False)
counter += 1
except (AssertionError, ValueError, KeyError, Path.DoesNotExist) as e:
raise ValueError("Invalid serialized topology : %s" % e)
topology.save()
return topology
@classmethod
def _topologypoint(cls, lng, lat, kind=None, snap=None):
"""
Receives a point (lng, lat) with API_SRID, and returns
a topology objects with a computed path aggregation.
"""
from .models import Path, PathAggregation
from .factories import TopologyFactory
# Find closest path
point = Point(lng, lat, srid=settings.API_SRID)
point.transform(settings.SRID)
if snap is None:
closest = Path.closest(point)
position, offset = closest.interpolate(point)
else:
closest = Path.objects.get(pk=snap)
position, offset = closest.interpolate(point)
offset = 0
# We can now instantiante a Topology object
topology = TopologyFactory.create(no_path=True, kind=kind, offset=offset)
aggrobj = PathAggregation(topo_object=topology,
start_position=position,
end_position=position,
path=closest)
aggrobj.save()
point = Point(point.x, point.y, srid=settings.SRID)
topology.geom = point
topology.save()
return topology
@classmethod
def serialize(cls, topology, with_pk=True):
# Point topology
if topology.ispoint():
point = topology.geom.transform(settings.API_SRID, clone=True)
objdict = dict(kind=topology.kind, lng=point.x, lat=point.y)
if with_pk:
objdict['pk'] = topology.pk
if topology.offset == 0:
objdict['snap'] = topology.aggregations.all()[0].path.pk
else:
# Line topology
# Fetch properly ordered aggregations
aggregations = topology.aggregations.select_related('path').all()
objdict = []
current = {}
ipath = 0
for i, aggr in enumerate(aggregations):
last = i == len(aggregations) - 1
intermediary = aggr.start_position == aggr.end_position
if with_pk:
current.setdefault('pk', topology.pk)
current.setdefault('kind', topology.kind)
current.setdefault('offset', topology.offset)
if not intermediary:
current.setdefault('paths', []).append(aggr.path.pk)
current.setdefault('positions', {})[ipath] = (aggr.start_position, aggr.end_position)
ipath = ipath + 1
subtopology_done = 'paths' in current and (intermediary or last)
if subtopology_done:
objdict.append(current)
current = {}
ipath = 0
return json.dumps(objdict)
@classmethod
def overlapping(cls, klass, queryset):
from .models import Path, Topology, PathAggregation
all_objects = klass.objects.existing()
is_generic = klass.KIND == Topology.KIND
single_input = isinstance(queryset, QuerySet)
if single_input:
topology_pks = [str(pk) for pk in queryset.values_list('pk', flat=True)]
else:
topology_pks = [str(queryset.pk)]
if len(topology_pks) == 0:
return all_objects.filter(pk__in=[])
sql = """
WITH topologies AS (SELECT id FROM %(topology_table)s WHERE id IN (%(topology_list)s)),
-- Concerned aggregations
aggregations AS (SELECT * FROM %(aggregations_table)s a, topologies t
WHERE a.evenement = t.id),
-- Concerned paths along with (start, end)
paths_aggr AS (SELECT a.pk_debut AS start, a.pk_fin AS end, p.id, a.ordre AS order
FROM %(paths_table)s p, aggregations a
WHERE a.troncon = p.id
ORDER BY a.ordre)
-- Retrieve primary keys
SELECT t.id
FROM %(topology_table)s t, %(aggregations_table)s a, paths_aggr pa
WHERE a.troncon = pa.id AND a.evenement = t.id
AND least(a.pk_debut, a.pk_fin) <= greatest(pa.start, pa.end)
AND greatest(a.pk_debut, a.pk_fin) >= least(pa.start, pa.end)
AND %(extra_condition)s
ORDER BY (pa.order + CASE WHEN pa.start > pa.end THEN (1 - a.pk_debut) ELSE a.pk_debut END);
""" % {
'topology_table': Topology._meta.db_table,
'aggregations_table': PathAggregation._meta.db_table,
'paths_table': Path._meta.db_table,
'topology_list': ','.join(topology_pks),
'extra_condition': 'true' if is_generic else "kind = '%s'" % klass.KIND
}
cursor = connection.cursor()
cursor.execute(sql)
result = cursor.fetchall()
pk_list = uniquify([row[0] for row in result])
# Return a QuerySet and preserve pk list order
# http://stackoverflow.com/a/1310188/141895
ordering = 'CASE %s END' % ' '.join(['WHEN %s.id=%s THEN %s' % (Topology._meta.db_table, id_, i)
for i, id_ in enumerate(pk_list)])
queryset = all_objects.filter(pk__in=pk_list).extra(
select={'ordering': ordering}, order_by=('ordering',))
return queryset
class PathHelper(object):
@classmethod
def snap(cls, path, point):
if not path.pk:
raise ValueError("Cannot compute snap on unsaved path")
if point.srid != path.geom.srid:
point.transform(path.geom.srid)
cursor = connection.cursor()
sql = """
WITH p AS (SELECT ST_ClosestPoint(geom, '%(ewkt)s'::geometry) AS geom
FROM %(table)s
WHERE id = '%(pk)s')
SELECT ST_X(p.geom), ST_Y(p.geom) FROM p
""" % {'ewkt': point.ewkt, 'table': path._meta.db_table, 'pk': path.pk}
cursor.execute(sql)
result = cursor.fetchall()
return Point(*result[0], srid=path.geom.srid)
@classmethod
def interpolate(cls, path, point):
if not path.pk:
raise ValueError("Cannot compute interpolation on unsaved path")
if point.srid != path.geom.srid:
point.transform(path.geom.srid)
cursor = connection.cursor()
sql = """
SELECT position, distance
FROM ft_troncon_interpolate(%(pk)s, ST_GeomFromText('POINT(%(x)s %(y)s)',%(srid)s))
AS (position FLOAT, distance FLOAT)
""" % {'pk': path.pk,
'x': point.x,
'y': point.y,
'srid': path.geom.srid}
cursor.execute(sql)
result = cursor.fetchall()
return result[0]
@classmethod
def disjoint(cls, geom, pk):
"""
Returns True if this path does not overlap another.
TODO: this could be a constraint at DB-level. But this would mean that
path never ever overlap, even during trigger computation, like path splitting...
"""
wkt = "ST_GeomFromText('%s', %s)" % (geom, settings.SRID)
disjoint = sqlfunction('SELECT * FROM check_path_not_overlap', str(pk), wkt)
return disjoint[0]
|
py | b40f43e6588c6b1013a9fceb0bf173b0dff04fee | from neurodesk import neurodesk
neurodesk.main() |
py | b40f447a2f0092b56a034fd22eb089df420de1f9 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from gensim.models import Word2Vec
from mleap.bundle.serialize import MLeapSerializer
import uuid
import numpy as np
def serialize_to_bundle(self, path, model_name):
serializer = SimpleSparkSerializer()
return serializer.serialize_to_bundle(self, path, model_name)
def sent2vec(self, words):
serializer = SimpleSparkSerializer()
return serializer.sent2vec(words, self)
def mleap_init(self, input_features, prediction_column):
self.input_features = input_features
self.prediction_column = prediction_column
self.name = "{}_{}".format(self.op, uuid.uuid4())
setattr(Word2Vec, 'op', 'word2vec')
setattr(Word2Vec, 'mlinit', mleap_init)
setattr(Word2Vec, 'serialize_to_bundle', serialize_to_bundle)
setattr(Word2Vec, 'serializable', True)
setattr(Word2Vec, 'sent2vec', sent2vec)
class SimpleSparkSerializer(MLeapSerializer):
def __init__(self):
super(SimpleSparkSerializer, self).__init__()
@staticmethod
def set_prediction_column(transformer, prediction_column):
transformer.prediction_column = prediction_column
@staticmethod
def set_input_features(transformer, input_features):
transformer.input_features = input_features
def serialize_to_bundle(self, transformer, path, model_name):
# compile tuples of model attributes to serialize
attributes = list()
attributes.append(('words', transformer.wv.index2word))
# indices = [np.float64(x) for x in list(range(len(transformer.wv.index2word)))]
word_vectors = np.array([float(y) for x in [transformer.wv.word_vec(w) for w in transformer.wv.index2word] for y in x])
# attributes.append(('indices', indices))
# Excluding indices because they are 0 - N
attributes.append(('word_vectors', word_vectors))
attributes.append(('kernel', 'sqrt'))
# define node inputs and outputs
inputs = [{
"name": transformer.input_features,
"port": "input"
}]
outputs = [{
"name": transformer.prediction_column,
"port": "output"
}]
self.serialize(transformer, path, model_name, attributes, inputs, outputs)
def sent2vec(self, words, transformer):
"""
Used with sqrt kernel
:param words:
:param transformer:
:return:
"""
sent_vec = np.zeros(transformer.vector_size)
numw = 0
for w in words:
try:
sent_vec = np.add(sent_vec, transformer.wv[w])
numw += 1
except:
continue
return sent_vec / np.sqrt(sent_vec.dot(sent_vec))
|
py | b40f47722dcfad60214cd8ef388b812c6a106a4c | from . import ExplorationTechnique
from ..sim_options import EFFICIENT_STATE_MERGING
class Veritesting(ExplorationTechnique):
"""
Enable veritesting. This technique, described in a paper[1] from CMU, attempts to address the problem of state
explosions in loops by performing smart merging.
[1] https://users.ece.cmu.edu/~aavgerin/papers/veritesting-icse-2014.pdf
"""
def __init__(self, **options):
super(Veritesting, self).__init__()
self.options = options
def step_state(self, state, **kwargs):
if EFFICIENT_STATE_MERGING not in state.options:
state.options.add(EFFICIENT_STATE_MERGING)
vt = self.project.analyses.Veritesting(state, **self.options)
if vt.result and vt.final_manager:
simgr = vt.final_manager
simgr.stash(from_stash='deviated', to_stash='active')
simgr.stash(from_stash='successful', to_stash='active')
return {
'active': simgr.active,
'unconstrained': simgr.stashes.get('unconstrained', []),
'unsat': simgr.stashes.get('unsat', []),
'pruned': simgr.stashes.get('pruned', []),
'errored': simgr.errored,
}
return None
|
py | b40f480202d5e61fd4c0fa6f36fa4ff42883b7ea | import os
from typing import *
import time
import random
from enum import Enum
from abc import ABC, abstractmethod
import glob
import numpy as np
import scipy
import scipy.stats
import statsmodels.nonparametric.bandwidths
import matplotlib
import matplotlib.pyplot as plt
import torch
import torchvision
import cv2
if os.name == "posix":
import resource
# A enumeration of all supported datasets.
DatasetInfo = Enum("DatasetInfo", "MNIST CelebA128Gender LSUN128 ImageNet")
### PyTorch utils ###
class Lambda(torch.nn.Module):
def __init__(self, forward):
super().__init__()
self.lambda_forward = forward
def forward(self, x):
return self.lambda_forward(x)
class NopLayer(torch.nn.Module):
def forward(self, x):
return x
class Flatten(torch.nn.Module):
def forward(self, x: torch.Tensor):
return x.view(x.shape[0], -1)
class Upsample(torch.nn.Module):
def forward(self, x: torch.Tensor):
return torch.nn.functional.interpolate(x, size=None, scale_factor=2, mode='bilinear', align_corners=False)
class Resize(torch.nn.Module):
def __init__(self, side: int):
super().__init__()
self.side = side
def forward(self, x: torch.Tensor):
return torch.nn.functional.interpolate(x, size=(self.side, self.side),
mode="bicubic", align_corners=False)
class Adversary(ABC):
"""
Base class for adversaries. Adversaries can perturb vectors given the gradient pointing to the direction
of making the prediction worse.
"""
@abstractmethod
def perturb(self, initial_vector: torch.Tensor,
get_gradient: Callable[[torch.Tensor], Tuple[torch.Tensor, float]]) -> torch.Tensor:
"""
Perturb the given vector.
:param initial_vector: initial vector. If this is the original image representation, it must be flattened
prior to the call (more precisely, it must be of size [1, the_rest]).
:param get_gradient: a get_gradient function. It accepts the current vector and returns a tuple
(gradient pointing to the direction of the adversarial attack, the corresponding loss function value).
:return: the pertured vector of the same size as initial_vector.
"""
pass
class NopAdversary(Adversary):
"""
Dummy adversary that acts like an identity function.
"""
def perturb(self, initial_vector: torch.Tensor,
get_gradient: Callable[[torch.Tensor], Tuple[torch.Tensor, float]]) -> torch.Tensor:
return initial_vector
class PGDAdversary(Adversary):
"""
Performes Projected Gradient Descent (PGD), or, more precisely, PG ascent according to the provided gradient.
"""
def __init__(self, rho: float = 0.1, steps: int = 25, step_size: float = 0.1, random_start: bool = True,
stop_loss: float = 0, verbose: int = 1, norm: str = "scaled_l_2",
n_repeat: int = 1, repeat_mode: str = None, unit_sphere_normalization: bool = False):
"""
Constrauts PGDAdversary.
:param rho > 0: bound on perturbation norm.
:param steps: number of steps to perform in each run. Less steps can be done if stop_loss is reached.
:param step_size: step size. Each step will be of magnitude rho * step_size.
:param random_start: if True, start search in a vector with a uniformly random radius within the rho-ball.
Otherwise, start in the center of the rho-ball.
:param stop_loss: the search will stop when this value of the "loss" function is exceeded.
:param verbose: 0 (silent), 1 (regular), 2 (verbose).
:param norm: one of 'scaled_l_2' (default), 'l_2' or 'l_inf'.
:param n_repeat: number of times to run PGD.
:param repeat_mode: 'any' or 'min': In mode 'any', n_repeat runs are identical and any run that reaches
stop_loss will prevent subsequent runs. In mode 'min', all runs will be performed, and if a run
finds a smaller perturbation according to norm, it will tighten rho on the next run.
:param unit_sphere_normalization: search perturbations on the unit sphere (according to the scaled L2 norm)
instead of the entire latent space.
"""
super().__init__()
self.rho = rho
self.steps = steps
self.step_size = step_size
self.random_start = random_start
self.stop_loss = stop_loss
self.verbose = verbose
# checks on norms
assert norm in ["scaled_l_2", "l_2", "l_inf"], "norm must be either 'scaled_l_2', 'l_2' or 'l_inf'"
self.scale_norm = norm == "scaled_l_2"
self.inf_norm = norm == "l_inf"
# checks on repeated runs
assert n_repeat >= 1, "n_repeat must be positive"
assert not(n_repeat > 1 and repeat_mode is None), "if n_repeat > 1, repeat_mode must be set"
assert repeat_mode in [None, "any", "min"], "if repeat_mode is set, it must be either 'any' or 'min'"
self.n_repeat = n_repeat
self.shrinking_repeats = repeat_mode == "min"
# optional unit sphere normalization
self.unit_sphere_normalization = unit_sphere_normalization
assert not unit_sphere_normalization or norm == "scaled_l_2",\
"unit_sphere_normalization is only compatible with scaled_l_2 norm"
def _norm(self, x: torch.Tensor) -> float:
"""
(Possibly scaled) norm of x.
"""
return x.norm(np.infty if self.inf_norm else 2).item() / (np.sqrt(x.numel()) if self.scale_norm else 1)
def _normalize_gradient(self, x: torch.Tensor) -> torch.Tensor:
"""
Normalizes the vector of gradients.
In the L2 space, this is done by dividing the vector by its norm.
In the L-inf space, this is done by taking the sign of the gradient.
"""
return x.sign() if self.inf_norm else (x / self._norm(x))
def _project(self, x: torch.Tensor, rho: float) -> torch.Tensor:
"""
Projects the vector onto the rho-ball.
In the L2 space, this is done by scaling the vector.
In the L-inf space, this is done by clamping all components independently.
"""
return x.clamp(-rho, rho) if self.inf_norm else (x / self._norm(x) * rho)
def perturb(self, initial_vector: torch.Tensor,
get_gradient: Callable[[torch.Tensor], Tuple[torch.Tensor, float]]) -> torch.Tensor:
best_perturbation = None
best_perturbation_norm = np.infty
# rho may potentially shrink with repeat_mode == "min":
rho = self.rho
random_start = self.random_start
for run_n in range(self.n_repeat):
x1 = initial_vector * 1
perturbation = x1 * 0
if random_start:
# random vector within the rho-ball
if self.inf_norm:
# uniform
perturbation = (torch.rand(1, x1.numel()) - 0.5) * 2 * rho
# possibly reduce radius to encourage search of vectors with smaller norms
perturbation *= np.random.rand()
else:
# uniform radius, random direction
# note that this distribution is not uniform in terms of R^n!
perturbation = torch.randn(1, x1.numel())
if rho > 0:
perturbation /= self._norm(perturbation) / rho
else:
perturbation *= 0
perturbation *= np.random.rand()
perturbation = Util.conditional_to_cuda(perturbation)
if self.verbose > 0:
print(f">> #run = {run_n}, ║x1║ = {self._norm(x1):.5f}, ρ = {rho:.5f}")
found = False
for i in range(self.steps):
perturbed_vector = x1 + perturbation
perturbed_vector, perturbation = self._recompute_with_unit_sphere_normalization(perturbed_vector,
perturbation)
classification_gradient, classification_loss = get_gradient(perturbed_vector)
if self.verbose > 0:
if classification_loss > self.stop_loss or i == self.steps - 1 or i % 5 == 0 and self.verbose > 1:
print(f"step {i:3d}: objective = {-classification_loss:+.7f}, "
f"║Δx║ = {self._norm(perturbation):.5f}, ║x║ = {self._norm(perturbed_vector):.5f}")
if classification_loss > self.stop_loss:
found = True
break
# learning step
perturbation_step = rho * self.step_size * self._normalize_gradient(classification_gradient)
perturbation_step = self._adjust_step_for_unit_sphere(perturbation_step, x1 + perturbation)
perturbation += perturbation_step
# projecting on rho-ball around x1
if self._norm(perturbation) > rho:
perturbation = self._project(perturbation, rho)
# end of run
if found:
if self.shrinking_repeats:
if self._norm(perturbation) < best_perturbation_norm:
best_perturbation_norm = self._norm(perturbation)
best_perturbation = perturbation
rho = best_perturbation_norm
else: # regular repeats
# return immediately
return self._optional_normalize(x1 + perturbation)
if best_perturbation is None:
best_perturbation = perturbation
if self.shrinking_repeats and run_n == self.n_repeat - 1:
# heuristic: the last run is always from the center
random_start = False
return self._optional_normalize(x1 + best_perturbation)
### projections on the unit sphere: ###
def _optional_normalize(self, x: torch.Tensor):
"""
Optional unit sphere normalization.
:param x: vector of shape 1*dim to normalize.
:return: optionally normalized x.
"""
return Util.normalize_latent(x) if self.unit_sphere_normalization else x
def _recompute_with_unit_sphere_normalization(self, perturbed_vector: torch.Tensor, perturbation: torch.Tensor):
"""
If unit sphere normalization is enabled, the perturbed vector is projected on the unit sphere,
and the perturbation vector is recomputed accordingly. Otherwise, returns the inputs unmodified.
:param perturbed_vector: perturbed vector (initial vector + perturbation).
:param perturbation: perturbation vector.
:return: possibly recomputed (perturbed_vector, perturbation).
"""
effective_perturbed_vector = self._optional_normalize(perturbed_vector)
return effective_perturbed_vector, perturbation + effective_perturbed_vector - perturbed_vector
def _adjust_step_for_unit_sphere(self, perturbation_step: torch.Tensor, previous_perturbed_vector: torch.Tensor):
"""
If unit sphere normalization is enabled, multiplies perturbation_step by a coefficient that approximately
compensates for the reduction of the learning step due to projection of a unit sphere.
:param perturbation_step: unmodified pertubation step.
:param previous_perturbed_vector: previous perturbed vector.
:return: altered perturbation_step.
"""
new_perturbed_vector = self._optional_normalize(previous_perturbed_vector + perturbation_step)
effective_perturbation_step = new_perturbed_vector - previous_perturbed_vector
coef = perturbation_step.norm() / effective_perturbation_step.norm()
return perturbation_step * coef
class ImageSet:
"""
Accumulates images and captions. Shows them in blocks of size `max_num'.
"""
def __init__(self, max_num: int):
"""
Constructs ImageSet.
:param max_num: number of images and captions to accumulate until they can be shown.
"""
self.images, self.captions = [], []
self.max_num = max_num
def append(self, images: List[torch.Tensor], captions: List[str] = None):
"""
Appends the given images and captions.
:param images: list of images (PyTorch tensors).
:param captions: list of string captions corresponding to the images.
"""
self.images += [images]
if captions is None:
captions = [""] * len(images)
self.captions += [captions]
def maybe_show(self, force: bool = False, nrow: int = 8):
"""
Shows the images and their captions if a sufficient number of them has been accumulated.
If the images and captions are shown, they are removed from the memory of ImageSet.
:param force: if True, shows everything anyway. This may be useful to make the last call of maybe_show.
:param nrow: nrow passed to Util.imshow_tensors.
"""
if self.images and (force or len(self.images) >= self.max_num):
Util.imshow_tensors(*sum(self.images, []), captions=sum(self.captions, []), nrow=nrow)
self.images.clear()
self.captions.clear()
class Util:
"""
A convenience static class for everything that does not have its own class.
"""
# if False, will use CPU even if CUDA is available
cuda_enabled = True
using_cuda = cuda_enabled and torch.cuda.is_available()
@staticmethod
def set_memory_limit(mb: int):
"""
Sets memory limit in megabytes (this has effect only on Linux).
"""
if os.name == "posix":
rsrc = resource.RLIMIT_DATA
soft, hard = resource.getrlimit(rsrc)
resource.setrlimit(rsrc, (1024 * 1024 * mb, hard))
@staticmethod
def configure(memory_limit_mb: int):
"""
Sets memory limit in megabytes (this has effect only on Linux). Configures matplotlib fonts.
"""
Util.set_memory_limit(memory_limit_mb)
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "dejavuserif"
@staticmethod
def tensor2numpy(x: torch.Tensor) -> np.ndarray:
return x.detach().cpu().numpy()
@staticmethod
def imshow(img: torch.Tensor, figsize: Tuple[float, float] = (12, 2)):
"""
Shows the image and saves the produced figure.
:param img: image to show (PyTorch tensor).
:param figsize: matplotlib figsize.
"""
plt.figure(figsize=figsize)
img = img / 2 + 0.5 # unnormalize
plt.imshow(np.transpose(Util.tensor2numpy(img), (1, 2, 0)))
plt.axis("off")
LogUtil.savefig("imshow")
plt.show()
plt.close()
@staticmethod
def imshow_tensors(*tensors: torch.Tensor, clamp: bool = True, figsize: Tuple[float, float] = (12, 2),
captions: List[str] = None, nrow: int = 8, pad_value: float = 1):
"""
Enhanced torchvision.utils.make_grid(). Supports image captions. Also saves the produced figure.
:param tensors: images to show.
:param clamp: of True, clamp all pixels to [-1, 1].
:param figsize: matplotlib figsize.
:param captions: list of string captions to be printed on top of images.
:param nrow: nrow to be passed to torchvision.utils.make_grid.
:param pad_value: pad_value to be passed to torchvision.utils.make_grid.
"""
t = torch.cat(tensors)
assert len(t.shape) == 4, f"Invalid shape of tensors {t.shape}"
# handling 1D images
if t.shape[1] == 1:
t = t.repeat(1, 3, 1, 1)
if clamp:
t = torch.clamp(t, -1, 1)
t = list(t)
# adding textual captions if they are given (this involes rescaling)
if captions is not None:
def multiline_puttext(img, caption: str):
"""
cv2.putText does not support line breaks on its own.
"""
scale = 0.8
y0 = img.shape[0] * 0.15
dy = img.shape[0] * 0.20
for i, text in enumerate(caption.split("\n")):
y = int(y0 + i * dy)
# green
img = cv2.putText(img, text, (0, y), cv2.FONT_HERSHEY_TRIPLEX, scale, (0, 250, 0))
return img
default_size = (128,) * 2
for i in range(len(t)):
assert type(captions[i]) == str, "Captions must be str"
t[i] = (Util.tensor2numpy(t[i]).transpose(1, 2, 0) / 2 + 0.5) * 255
# shape = H*W*3
t[i] = cv2.resize(t[i], default_size, interpolation=cv2.INTER_NEAREST)
t[i] = multiline_puttext(t[i], captions[i]) / 255
t[i] = torch.FloatTensor(t[i].transpose(2, 0, 1) * 2 - 1)
Util.imshow(torchvision.utils.make_grid(torch.stack(t), nrow=nrow, pad_value=pad_value), figsize)
@staticmethod
def class_specific_loader(target_label: int, parent_loader_fn: Callable) -> Callable:
"""
Filters the supplied loader parent_loader_fn, retaining only elements with target_label.
Preserves batch size.
:param target_label: the label to retain.
:param parent_loader_fn: the loader-returning function to decorate.
:return: decorated parent_loader_fn.
"""
def loader():
data_generator = iter(parent_loader_fn())
result = []
while True:
try:
items, labels = next(data_generator)
batch_size = len(items)
# accumulate only items of target_label
result += [item for item, label in zip(items, labels) if label == target_label]
if len(result) >= batch_size:
yield torch.stack(result[:batch_size]), [target_label] * batch_size
# save the remainder for the next yield
result = result[batch_size:]
except StopIteration:
return
return loader
@staticmethod
def fixed_length_loader(no_images: int, parent_loader_fn: Callable, restarts: bool = True) -> Callable:
"""
Restarts or limits the parent loader so that the desired number of images is produced.
:param no_images: desired number of images (the effective number will be a multiple of batch size).
:param parent_loader_fn: the loader-returning function to decorate.
:param restarts: if False, then just limit the parent loader and do not restart it when the end is reached.
:return: decorated parent_loader_fn.
"""
def loader():
generated = 0
while generated < no_images:
data_generator = iter(parent_loader_fn())
while generated < no_images:
try:
items, labels = next(data_generator)
except StopIteration:
if restarts:
data_generator = iter(parent_loader_fn())
items, labels = next(data_generator)
else:
return
yield items, labels
generated += len(items)
return loader
@staticmethod
def leakless_cycle(iterable_fn: Callable) -> Generator:
"""
Fixes the memory leak problem of itertools.cycle (https://github.com/pytorch/pytorch/issues/23900).
:iterable_fn function that returns an iterable.
"""
iterator = iter(iterable_fn())
while True:
try:
yield next(iterator)
except StopIteration:
iterator = iter(iterable_fn())
@staticmethod
def optimizable_clone(x: torch.Tensor) -> torch.Tensor:
"""
Clones a PyTorch tensor and makes it suitable for optimization.
:param x: input tensor.
:return: x with enabled gradients.
"""
return Util.conditional_to_cuda(x.clone().detach()).requires_grad_(True)
@staticmethod
def set_param_requires_grad(m: torch.nn.Module, value: bool):
"""
Sets requires_grad_(value) for all parameters of the module.
:param m: PyTorch module.
:param value: value to set.
"""
for p in m.parameters():
p.requires_grad_(value)
@staticmethod
def conditional_to_cuda(x: Union[torch.Tensor, torch.nn.Module]) -> torch.Tensor:
"""
Returns the tensor/module on GPU if there is at least 1 GPU, otherwise just returns the tensor.
:param x: a PyTorch tensor or module.
:return: x on GPU if there is at least 1 GPU, otherwise just x.
"""
return x.cuda() if Util.using_cuda else x
@staticmethod
def number_of_trainable_parameters(model: torch.nn.Module) -> int:
"""
Number of trainable parameters in a PyTorch module, including nested modules.
:param model: PyTorch module.
:return: number of trainable parameters in model.
"""
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
@staticmethod
def set_random_seed(seed: int = None):
"""
Set random seed of random, numpy and pytorch.
:param seed seed value. If None, it is replaced with the current timestamp.
"""
if seed is None:
seed = int(time.time())
else:
assert seed >= 0
random.seed(seed)
np.random.seed(seed + 1)
torch.manual_seed(seed + 2)
torch.cuda.manual_seed_all(seed + 2)
@staticmethod
def normalize_latent(x: torch.Tensor) -> torch.Tensor:
"""
Divides each latent vector of a batch by its scaled Euclidean norm.
:param x: batch of latent vectors.
:return: normalized vector.
"""
norm_vector = (np.sqrt(x.shape[1]) / torch.norm(x, dim=1)).unsqueeze(0)
norm_vector = norm_vector.expand(x.shape[0], norm_vector.shape[1])
return norm_vector @ x
#return torch.stack([x[i] / torch.norm(x[i]) for i in range(x.shape[0])])
@staticmethod
def get_kde_bandwidth(x: np.ndarray) -> float:
"""
This fixes a peculiar problem with sns.kdeplot/distplot. Use this to compute the bandwidth and provide it as argument bw.
https://stackoverflow.com/questions/61440184/who-is-scott-valueerror-in-seaborn-pairplot-could-not-convert-string-to-floa
:param x: input (numpy array).
:return: KDE bandwidth computed by Scott's method.
"""
return statsmodels.nonparametric.bandwidths.bw_scott(x)
class EpsDTransformer:
"""
Converter between noise magnitude (epsilon) and decay factor (d).
"""
def eps_to_d(self, eps: float) -> float:
"""
Converts d to epsilon.
:param eps: noise magnitude.
:return: decay factor.
"""
return 1 - 1 / np.sqrt(1 + eps**2)
def d_to_eps(self, d: float) -> float:
"""
Converts epsilon to d.
:param d: decay factor.
:return: noise magnitude.
"""
return np.sqrt((1 / (1 - d))**2 - 1)
class TauRhoTransformer:
"""
Converter between perturbation likelihood (tau) and norm bound (rho).
"""
def __init__(self, n_l: int, eps: float):
self.n_l = n_l
self.c1 = n_l * np.log(np.sqrt((1 + eps**2) / (2 * np.pi * eps**2)))
self.c2 = (1 + eps**2) / (2 * eps**2) * np.sqrt(n_l)
def rho_to_logtau(self, rho: float) -> float:
"""
Converts rho to the logarithm of tau.
"""
return self.c1 - (rho**2) * self.n_l * self.c2
def logtau_to_rho(self, tau: float) -> float:
"""
Converts the logarithm of tau to rho.
"""
return np.sqrt((self.c1 - tau) / self.c2 / self.n_l)
class LogUtil:
"""
Performs logging of text and images to a new timestamped directory.
Logs are also printed to stdout. Figures are only shown in notebooks.
"""
_timestamp = lambda: str(int(round(time.time() * 1000)))
_dirname = "logdir_" + _timestamp()
_created = False
@staticmethod
def set_custom_dirname(dirname: str):
"""
Set the custom name for a logging directory.
The previous contents of the directory will be deleted!
"""
LogUtil._dirname = dirname
LogUtil.ensure_dir_existence()
for filename in os.listdir(dirname):
os.remove(os.path.join(dirname, filename))
@staticmethod
def ensure_dir_existence():
"""
Creates the directory for logging if it hasn't been created yet.
"""
if not LogUtil._created:
LogUtil._created = True
os.makedirs(LogUtil._dirname, exist_ok=True)
@staticmethod
def info(msg: str, regular_print: bool = True):
"""
Log/print a message to <log directory>/log.txt.
:param msg: message to log and, optionally, print to console.
:param regular_print: whether to print msg to console.
"""
LogUtil.ensure_dir_existence()
if regular_print:
print(msg)
with open(os.path.join(LogUtil._dirname, "log.txt"), "a+", encoding="utf-8") as f:
f.write(f"[time_ms={round(time.time() * 1000)}] {msg}\n")
@staticmethod
def savefig(prefix: str, pdf: bool = False):
"""
Saves a figure to the logging directory. Filename is generated based on the current timestamp
and the provided prefix.
:param prefix: to be included in the filename.
:param pdf: if True, save as PDF. If False, save as PNG.
"""
LogUtil.ensure_dir_existence()
fname = os.path.join(LogUtil._dirname, "fig_" + prefix + "_" + LogUtil._timestamp() + (".pdf" if pdf else ".png"))
plt.savefig(fname, dpi=300, bbox_inches="tight")
LogUtil.info(f"[produced a figure: {fname}]", False)
@staticmethod
def metacall(fn: Callable, fn_name: str, *args, **kwargs):
"""
Makes a function call and logs it with concrete argument values.
:param fn: function to call.
:param fn_name: the name of the function to be put to the log.
:param args: *args to be passed to fn.
:param kwargs: *kwargs to be passed to fn.
"""
arg_assignments = [repr(arg) for arg in args]
kwarg_assignments = [f"{kwarg_name}={repr(kwarg_value)}" for kwarg_name, kwarg_value in kwargs.items()]
LogUtil.info(f"{fn_name}(" + ", ".join(arg_assignments + kwarg_assignments) + ")", False)
fn(*args, **kwargs)
@staticmethod
def to_pdf():
"""
Switches matplotlib backend to 'pdf'.
"""
matplotlib.use("pdf", warn=False, force=True)
|
py | b40f4932007d27a3251eb84183c91594233e076b | import numpy as np
import matplotlib.pyplot as plt
import cv2
class GC_executor:
def __init__(self):
pass
def _display_image(self, image):
# plt.axis('off')
frame = plt.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
plt.imshow(image)
plt.show()
def grab_cut_with_patch(self, patch, heat_map):
# Grabcut mask
# DRAW_BG = {'color': BLACK, 'val': 0}
# DRAW_FG = {'color': WHITE, 'val': 1}
# DRAW_PR_FG = {'color': GREEN, 'val': 3}
# DRAW_PR_BG = {'color': RED, 'val': 2}
self.bgdModel = np.zeros((1, 65), np.float64)
self.fgdModel = np.zeros((1, 65), np.float64)
mean = np.mean(heat_map[heat_map != 0])
heat_map_high_prob = np.where((heat_map > mean), 1, 0).astype('uint8')
heat_map_low_prob = np.where((heat_map > 0), 3, 0).astype('uint8')
mask = heat_map_high_prob + heat_map_low_prob
mask[mask == 4] = 1
mask[mask == 0] = 2
mask, bgdModel, fgdModel = cv2.grabCut(patch, mask, None, self.bgdModel, self.fgdModel, 5, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
img = patch * mask[:, :, np.newaxis]
return img, mask
def grab_cut_without_patch(self, patch):
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
mask_onlyGC = np.zeros(patch.shape[:2], np.uint8)
rect = (0, 0, patch.shape[1] - 1, patch.shape[0] - 1)
cv2.grabCut(patch, mask_onlyGC, rect, bgdModel, fgdModel, 10, cv2.GC_INIT_WITH_RECT)
mask_onlyGC = np.where((mask_onlyGC == 2) | (mask_onlyGC == 0), 0, 1).astype('uint8')
img = patch * mask_onlyGC[:, :, np.newaxis]
return img, mask_onlyGC |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.