filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_2849 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for versioning.py functions.
"""
from datetime import datetime, timedelta
from operator import itemgetter
import pytest
from botocore.exceptions import ClientError
from botocore.stub import ANY
import versioning
@pytest.mark.parametrize("fail_func,error_code,stop_on_error", [
(None, None, False),
('stub_create_bucket', 'BucketAlreadyOwnedByYou', False),
('stub_create_bucket', 'TestException', True),
('stub_put_bucket_versioning', 'TestException', True),
('stub_put_bucket_lifecycle_configuration', 'TestException', False)
])
def test_create_versioned_bucket(
make_stubber, make_unique_name, stub_controller,
fail_func, error_code, stop_on_error):
s3_stubber = make_stubber(versioning.s3.meta.client)
bucket_name = make_unique_name('bucket')
obj_prefix = 'test-prefix'
stub_controller.add(
s3_stubber.stub_create_bucket,
(bucket_name, versioning.s3.meta.client.meta.region_name))
stub_controller.add(s3_stubber.stub_put_bucket_versioning, (bucket_name, 'Enabled'))
stub_controller.add(s3_stubber.stub_put_bucket_lifecycle_configuration, (
bucket_name, [{
'Status': 'Enabled',
'Prefix': obj_prefix,
'NoncurrentVersionExpiration': {'NoncurrentDays': ANY}
}],))
stub_controller.run(fail_func, error_code, stop_on_error)
if error_code and stop_on_error:
with pytest.raises(ClientError) as exc_info:
versioning.create_versioned_bucket(bucket_name, obj_prefix)
assert exc_info.value.response['Error']['Code'] == error_code
else:
bucket = versioning.create_versioned_bucket(bucket_name, obj_prefix)
assert bucket.name == bucket_name
@pytest.mark.parametrize("rollback_version", ["version-2", "non-existent-version"])
def test_rollback_object(
make_stubber, make_unique_name, stub_controller, rollback_version):
s3_stubber = make_stubber(versioning.s3.meta.client)
bucket_name = make_unique_name('bucket')
obj_key = make_unique_name('object')
versions = [
s3_stubber.make_version(
obj_key, f'version-{index}', True,
datetime.now() + timedelta(minutes=index))
for index in range(5)]
delete_markers = [
s3_stubber.make_version(
obj_key, f'version-{index}', True,
datetime.now() + timedelta(minutes=index))
for index in range(10, 15)]
sorted_versions = \
sorted(versions + delete_markers, key=itemgetter('LastModified'), reverse=True)
stub_controller.add(
s3_stubber.stub_list_object_versions, (bucket_name,),
kwargs={'prefix': obj_key, 'versions': versions,
'delete_markers': delete_markers})
if rollback_version in [ver['VersionId'] for ver in sorted_versions]:
for version in sorted_versions:
if version['VersionId'] != rollback_version:
stub_controller.add(
s3_stubber.stub_delete_object, (bucket_name, obj_key),
{'obj_version_id': version['VersionId']})
else:
break
stub_controller.add(
s3_stubber.stub_head_object, (bucket_name, obj_key))
stub_controller.run()
if rollback_version == 'non-existent-version':
with pytest.raises(KeyError):
versioning.rollback_object(
versioning.s3.Bucket(bucket_name), obj_key, rollback_version)
else:
versioning.rollback_object(
versioning.s3.Bucket(bucket_name), obj_key, rollback_version)
@pytest.mark.parametrize(
'code_path', ['happy', 'not_latest', 'no_deletes', 'no_versions'])
def test_revive_object(make_stubber, make_unique_name, stub_controller, code_path):
s3_stubber = make_stubber(versioning.s3.meta.client)
bucket_name = make_unique_name('bucket')
obj_key = make_unique_name('object')
if code_path == 'not_latest':
stub_controller.add(
s3_stubber.stub_list_object_versions,
(bucket_name, obj_key),
{'delete_markers':
[s3_stubber.make_version(obj_key, 'version1', False, datetime.now())],
'max_keys': 1})
elif code_path == 'no_deletes':
stub_controller.add(
s3_stubber.stub_list_object_versions, (bucket_name, obj_key),
{'versions':
[s3_stubber.make_version(obj_key, 'version1', True, datetime.now())],
'max_keys': 1})
elif code_path == 'no_versions':
stub_controller.add(
s3_stubber.stub_list_object_versions, (bucket_name, obj_key),
{'max_keys': 1})
elif code_path == 'happy':
stub_controller.add(
s3_stubber.stub_list_object_versions,
(bucket_name, obj_key),
{'delete_markers':
[s3_stubber.make_version(obj_key, 'version1', True, datetime.now())],
'max_keys': 1})
stub_controller.add(
s3_stubber.stub_delete_object, (bucket_name, obj_key),
{'obj_version_id': 'version1'})
stub_controller.add(s3_stubber.stub_head_object, (bucket_name, obj_key))
stub_controller.add(
s3_stubber.stub_get_object, (bucket_name, obj_key),
{'object_data': b'Test data', 'version_id': 'version1'})
stub_controller.run()
versioning.revive_object(versioning.s3.Bucket(bucket_name), obj_key)
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_permanently_delete_object(make_stubber, make_unique_name, error_code):
s3_stubber = make_stubber(versioning.s3.meta.client)
bucket_name = make_unique_name('bucket')
obj_key = make_unique_name('object')
s3_stubber.stub_list_object_versions(
bucket_name, obj_key, delete_markers=
[s3_stubber.make_version(obj_key, 'version1', True, datetime.now())])
s3_stubber.stub_delete_object_versions(bucket_name,
[s3_stubber.make_version(obj_key, 'version1')], error_code=error_code)
if not error_code:
versioning.permanently_delete_object(versioning.s3.Bucket(bucket_name), obj_key)
else:
with pytest.raises(ClientError) as exc_info:
versioning.permanently_delete_object(versioning.s3.Bucket(bucket_name),
obj_key)
assert exc_info.value.response['Error']['Code'] == error_code
|
the-stack_0_2850 | import unittest
from io import BytesIO
from eth.util.netstring import (header, encode, FileEncoder,
decode_file, Decoder)
class TestNetstring(unittest.TestCase):
def setUp(self):
self.test_data = b"Netstring module by Will McGugan"
self.encoded_data = b"9:Netstring,6:module,2:by,4:Will,7:McGugan,"
def test_header(self):
tests = [ (b"netstring", b"9:"),
(b"Will McGugan", b"12:"),
(b"", b"0:") ]
for test, result in tests:
self.assertEqual(header(test), result)
def test_encode(self):
tests = [ (b"netstring", b"9:netstring,"),
(b"Will McGugan", b"12:Will McGugan,"),
(b"", b"0:,") ]
for test, result in tests:
self.assertEqual(encode(test), result)
def test_file_encoder(self):
file_out = BytesIO()
data = self.test_data.split()
encoder = FileEncoder(file_out)
for s in data:
encoder.write(s)
encoded_data = file_out.getvalue()
self.assertEqual(encoded_data, self.encoded_data)
def test_decode_file(self):
data = self.test_data.split()
for buffer_size in range(1, len(self.encoded_data)):
file_in = BytesIO(self.encoded_data[:])
decoded_data = list(decode_file(file_in, buffer_size=buffer_size))
self.assertEqual(decoded_data, data)
def test_decoder(self):
encoded_data = self.encoded_data
for step in range(1, len(encoded_data)):
i = 0
chunks = []
while i < len(encoded_data):
chunks.append(encoded_data[i:i+step])
i += step
decoder = Decoder()
decoded_data = []
for chunk in chunks:
for s in decoder.feed(chunk):
decoded_data.append(s)
self.assertEqual(decoded_data, self.test_data.split()) |
the-stack_0_2854 | """ Get the Bots in any chat*
Syntax: .get_bot"""
from telethon import events
from telethon.tl.types import ChannelParticipantAdmin, ChannelParticipantsBots
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="get_bot ?(.*)"))
async def _(event):
if event.fwd_from:
return
mentions = "**Bots in this Channel**: \n"
input_str = event.pattern_match.group(1)
to_write_chat = await event.get_input_chat()
chat = None
if not input_str:
chat = to_write_chat
else:
mentions = "Bots in {} channel: \n".format(input_str)
try:
chat = await borg.get_entity(input_str)
except Exception as e:
await event.edit(str(e))
return None
try:
async for x in borg.iter_participants(chat, filter=ChannelParticipantsBots):
if isinstance(x.participant, ChannelParticipantAdmin):
mentions += "\n ⚜️ [{}](tg://user?id={}) `{}`".format(x.first_name, x.id, x.id)
else:
mentions += "\n [{}](tg://user?id={}) `{}`".format(x.first_name, x.id, x.id)
except Exception as e:
mentions += " " + str(e) + "\n"
await event.edit(mentions)
|
the-stack_0_2856 | # SPDX-FileCopyrightText: Copyright (c) 2011 LG Electronics Inc.
#
# SPDX-License-Identifier: GPL-3.0-only
import os
from fosslight_util.set_log import init_log
def main():
output_dir = "tests"
logger, _result_log = init_log(os.path.join(output_dir, "test_add_log.txt"))
logger.warning("TESTING - add mode")
if __name__ == '__main__':
main()
|
the-stack_0_2857 | # coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional, Union, Literal # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator, Field, Extra # noqa: F401
from aries_cloudcontroller.model.dif_options import DIFOptions
from aries_cloudcontroller.model.presentation_definition import PresentationDefinition
class DIFProofRequest(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
DIFProofRequest - a model defined in OpenAPI
presentation_definition: The presentation_definition of this DIFProofRequest.
options: The options of this DIFProofRequest [Optional].
"""
presentation_definition: PresentationDefinition
options: Optional[DIFOptions] = None
def __init__(
self,
*,
presentation_definition: PresentationDefinition = None,
options: Optional[DIFOptions] = None,
**kwargs,
):
super().__init__(
options=options,
presentation_definition=presentation_definition,
**kwargs,
)
class Config:
allow_population_by_field_name = True
DIFProofRequest.update_forward_refs()
|
the-stack_0_2859 | #!/usr/bin/env python3
import subprocess
import os
import sys
sys.path.append("../")
sys.path.append("../../system/lib/")
sys.path.append("../volume/")
sys.path.append("../array/")
import json_parser
import pos
import cli
import api
import json
import MOUNT_ARRAY_BASIC
SPARE = MOUNT_ARRAY_BASIC.SPARE
ARRAYNAME = MOUNT_ARRAY_BASIC.ARRAYNAME
def check_result():
if api.check_state(ARRAYNAME, "NORMAL") == True:
if api.is_device_in_the_array(ARRAYNAME, SPARE) == False:
return "pass"
return "fail"
def execute():
MOUNT_ARRAY_BASIC.execute()
out = cli.remove_device(SPARE, ARRAYNAME)
return out
if __name__ == "__main__":
if len(sys.argv) >= 2:
pos.set_addr(sys.argv[1])
api.clear_result(__file__)
out = execute()
result = check_result()
ret = api.set_result_manually(out, result, __file__)
pos.flush_and_kill_pos()
exit(ret) |
the-stack_0_2861 | """Implement models for EFS resources.
See AWS docs for details:
https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html
"""
import json
import time
from copy import deepcopy
from hashlib import md5
from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel
from moto.core.utils import (
camelcase_to_underscores,
get_random_hex,
underscores_to_camelcase,
BackendDict,
)
from moto.ec2 import ec2_backends
from moto.ec2.exceptions import InvalidSubnetIdError
from moto.efs.exceptions import (
BadRequest,
FileSystemAlreadyExists,
FileSystemInUse,
FileSystemNotFound,
MountTargetConflict,
MountTargetNotFound,
PolicyNotFound,
SubnetNotFound,
SecurityGroupNotFound,
SecurityGroupLimitExceeded,
)
def _lookup_az_id(az_name):
"""Find the Availability zone ID given the AZ name."""
ec2 = ec2_backends[az_name[:-1]]
for zone in ec2.describe_availability_zones():
if zone.name == az_name:
return zone.zone_id
class FileSystem(CloudFormationModel):
"""A model for an EFS File System Volume."""
def __init__(
self,
region_name,
creation_token,
file_system_id,
performance_mode="generalPurpose",
encrypted=False,
kms_key_id=None,
throughput_mode="bursting",
provisioned_throughput_in_mibps=None,
availability_zone_name=None,
backup=False,
lifecycle_policies=None,
file_system_policy=None,
tags=None,
):
if availability_zone_name:
backup = True
if kms_key_id and not encrypted:
raise BadRequest('If kms_key_id given, "encrypted" must be True.')
# Save given parameters
self.creation_token = creation_token
self.performance_mode = performance_mode
self.encrypted = encrypted
self.kms_key_id = kms_key_id
self.throughput_mode = throughput_mode
self.provisioned_throughput_in_mibps = provisioned_throughput_in_mibps
self.availability_zone_name = availability_zone_name
self.availability_zone_id = None
if self.availability_zone_name:
self.availability_zone_id = _lookup_az_id(self.availability_zone_name)
self._backup = backup
self.lifecycle_policies = lifecycle_policies
self.file_system_policy = file_system_policy
# Validate tag structure.
if tags is None:
self.tags = []
else:
if (
not isinstance(tags, list)
or not all(isinstance(tag, dict) for tag in tags)
or not all(set(tag.keys()) == {"Key", "Value"} for tag in tags)
):
raise ValueError("Invalid tags: {}".format(tags))
else:
self.tags = tags
# Generate AWS-assigned parameters
self.file_system_id = file_system_id
self.file_system_arn = "arn:aws:elasticfilesystem:{region}:{user_id}:file-system/{file_system_id}".format(
region=region_name, user_id=ACCOUNT_ID, file_system_id=self.file_system_id
)
self.creation_time = time.time()
self.owner_id = ACCOUNT_ID
# Initialize some state parameters
self.life_cycle_state = "available"
self._mount_targets = {}
self._size_value = 0
@property
def size_in_bytes(self):
return {
"Value": self._size_value,
"ValueInIA": 0,
"ValueInStandard": self._size_value,
"Timestamp": time.time(),
}
@property
def physical_resource_id(self):
return self.file_system_id
@property
def number_of_mount_targets(self):
return len(self._mount_targets)
@property
def backup_policy(self):
if self._backup:
return {"Status": "ENABLED"}
else:
return
def info_json(self):
ret = {
underscores_to_camelcase(k.capitalize()): v
for k, v in self.__dict__.items()
if not k.startswith("_")
}
ret["SizeInBytes"] = self.size_in_bytes
ret["NumberOfMountTargets"] = self.number_of_mount_targets
return ret
def add_mount_target(self, subnet, mount_target):
# Check that the mount target doesn't violate constraints.
for other_mount_target in self._mount_targets.values():
if other_mount_target.subnet_vpc_id != subnet.vpc_id:
raise MountTargetConflict(
"requested subnet for new mount target is not in the same VPC as existing mount targets"
)
if subnet.availability_zone in self._mount_targets:
raise MountTargetConflict("mount target already exists in this AZ")
self._mount_targets[subnet.availability_zone] = mount_target
def has_mount_target(self, subnet):
return subnet.availability_zone in self._mount_targets
def iter_mount_targets(self):
for mt in self._mount_targets.values():
yield mt
def remove_mount_target(self, subnet):
del self._mount_targets[subnet.availability_zone]
@staticmethod
def cloudformation_name_type():
return
@staticmethod
def cloudformation_type():
return "AWS::EFS::FileSystem"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-efs-filesystem.html
props = deepcopy(cloudformation_json["Properties"])
props = {camelcase_to_underscores(k): v for k, v in props.items()}
if "file_system_tags" in props:
props["tags"] = props.pop("file_system_tags")
if "backup_policy" in props:
if "status" not in props["backup_policy"]:
raise ValueError("BackupPolicy must be of type BackupPolicy.")
status = props.pop("backup_policy")["status"]
if status not in ["ENABLED", "DISABLED"]:
raise ValueError('Invalid status: "{}".'.format(status))
props["backup"] = status == "ENABLED"
if "bypass_policy_lockout_safety_check" in props:
raise ValueError(
"BypassPolicyLockoutSafetyCheck not currently "
"supported by AWS Cloudformation."
)
return efs_backends[region_name].create_file_system(resource_name, **props)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
raise NotImplementedError(
"Update of EFS File System via cloudformation is not yet implemented."
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
return efs_backends[region_name].delete_file_system(resource_name)
class MountTarget(CloudFormationModel):
"""A model for an EFS Mount Target."""
def __init__(self, file_system, subnet, ip_address, security_groups):
# Set the simple given parameters.
self.file_system_id = file_system.file_system_id
self._file_system = file_system
self._file_system.add_mount_target(subnet, self)
self.subnet_id = subnet.id
self._subnet = subnet
self.vpc_id = subnet.vpc_id
self.security_groups = security_groups
# Check the number of security groups.
if self.security_groups is not None and len(self.security_groups) > 5:
raise SecurityGroupLimitExceeded(
"The maximum number of security groups per interface has been reached."
)
# Get an IP address if needed, otherwise validate the one we're given.
if ip_address is None:
ip_address = subnet.get_available_subnet_ip(self)
else:
try:
subnet.request_ip(ip_address, self)
except Exception as e:
if "IP" in str(e) and "CIDR" in str(e):
raise BadRequest(
"Address does not fall within the subnet's address range"
)
else:
raise e
self.ip_address = ip_address
# Init non-user-assigned values.
self.owner_id = ACCOUNT_ID
self.mount_target_id = "fsmt-{}".format(get_random_hex())
self.life_cycle_state = "available"
self.network_interface_id = None
self.availability_zone_id = subnet.availability_zone_id
self.availability_zone_name = subnet.availability_zone
def clean_up(self):
self._file_system.remove_mount_target(self._subnet)
self._subnet.del_subnet_ip(self.ip_address)
def set_network_interface(self, network_interface):
self.network_interface_id = network_interface.id
def info_json(self):
ret = {
underscores_to_camelcase(k.capitalize()): v
for k, v in self.__dict__.items()
if not k.startswith("_")
}
return ret
@property
def physical_resource_id(self):
return self.mounted_target_id
@property
def subnet_vpc_id(self):
return self._subnet.vpc_id
@staticmethod
def cloudformation_name_type():
pass
@staticmethod
def cloudformation_type():
return "AWS::EFS::MountTarget"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-efs-mounttarget.html
props = deepcopy(cloudformation_json["Properties"])
props = {camelcase_to_underscores(k): v for k, v in props.items()}
return efs_backends[region_name].create_mount_target(**props)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
raise NotImplementedError(
"Updates of EFS Mount Target via cloudformation are not yet implemented."
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
return efs_backends[region_name].delete_mount_target(resource_name)
class EFSBackend(BaseBackend):
"""The backend manager of EFS resources.
This is the state-machine for each region, tracking the file systems, mount targets,
and eventually access points that are deployed. Creating, updating, and destroying
such resources should always go through this class.
"""
def __init__(self, region_name=None):
super().__init__()
self.region_name = region_name
self.creation_tokens = set()
self.file_systems_by_id = {}
self.mount_targets_by_id = {}
self.next_markers = {}
def reset(self):
# preserve region
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def _mark_description(self, corpus, max_items):
if max_items < len(corpus):
new_corpus = corpus[max_items:]
new_hash = md5(json.dumps(new_corpus).encode("utf-8"))
next_marker = new_hash.hexdigest()
self.next_markers[next_marker] = new_corpus
else:
next_marker = None
return next_marker
@property
def ec2_backend(self):
return ec2_backends[self.region_name]
def create_file_system(self, creation_token, **params):
"""Create a new EFS File System Volume.
https://docs.aws.amazon.com/efs/latest/ug/API_CreateFileSystem.html
"""
if not creation_token:
raise ValueError("No creation token given.")
if creation_token in self.creation_tokens:
raise FileSystemAlreadyExists(creation_token)
# Create a new file system ID:
def make_id():
return "fs-{}".format(get_random_hex())
fsid = make_id()
while fsid in self.file_systems_by_id:
fsid = make_id()
self.file_systems_by_id[fsid] = FileSystem(
self.region_name,
creation_token,
fsid,
**{k: v for k, v in params.items() if v is not None}
)
self.creation_tokens.add(creation_token)
return self.file_systems_by_id[fsid]
def describe_file_systems(self, marker, max_items, creation_token, file_system_id):
"""Describe all the EFS File Systems, or specific File Systems.
https://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystems.html
"""
# Restrict the possible corpus of resules based on inputs.
if creation_token and file_system_id:
raise BadRequest(
"Request cannot contain both a file system ID and a creation token."
)
elif creation_token:
# Handle the creation token case.
corpus = []
for fs in self.file_systems_by_id.values():
if fs.creation_token == creation_token:
corpus.append(fs.info_json())
elif file_system_id:
# Handle the case that a file_system_id is given.
if file_system_id not in self.file_systems_by_id:
raise FileSystemNotFound(file_system_id)
corpus = [self.file_systems_by_id[file_system_id]]
elif marker is not None:
# Handle the case that a marker is given.
if marker not in self.next_markers:
raise BadRequest("Invalid Marker")
corpus = self.next_markers[marker]
else:
# Handle the vanilla case.
corpus = [fs.info_json() for fs in self.file_systems_by_id.values()]
# Handle the max_items parameter.
file_systems = corpus[:max_items]
next_marker = self._mark_description(corpus, max_items)
return next_marker, file_systems
def create_mount_target(
self, file_system_id, subnet_id, ip_address=None, security_groups=None
):
"""Create a new EFS Mount Target for a given File System to a given subnet.
Note that you can only create one mount target for each availability zone
(which is implied by the subnet ID).
https://docs.aws.amazon.com/efs/latest/ug/API_CreateMountTarget.html
"""
# Get the relevant existing resources
try:
subnet = self.ec2_backend.get_subnet(subnet_id)
except InvalidSubnetIdError:
raise SubnetNotFound(subnet_id)
if file_system_id not in self.file_systems_by_id:
raise FileSystemNotFound(file_system_id)
file_system = self.file_systems_by_id[file_system_id]
# Validate the security groups.
if security_groups:
sg_lookup = {sg.id for sg in self.ec2_backend.describe_security_groups()}
for sg_id in security_groups:
if sg_id not in sg_lookup:
raise SecurityGroupNotFound(sg_id)
# Create the new mount target
mount_target = MountTarget(file_system, subnet, ip_address, security_groups)
# Establish the network interface.
network_interface = self.ec2_backend.create_network_interface(
subnet, [mount_target.ip_address], group_ids=security_groups
)
mount_target.set_network_interface(network_interface)
# Record the new mount target
self.mount_targets_by_id[mount_target.mount_target_id] = mount_target
return mount_target
def describe_mount_targets(
self, max_items, file_system_id, mount_target_id, access_point_id, marker
):
"""Describe the mount targets given a mount target ID or a file system ID.
Note that as of this writing access points, and thus access point IDs are not
supported.
https://docs.aws.amazon.com/efs/latest/ug/API_DescribeMountTargets.html
"""
# Restrict the possible corpus of results based on inputs.
if not (bool(file_system_id) ^ bool(mount_target_id) ^ bool(access_point_id)):
raise BadRequest("Must specify exactly one mutually exclusive parameter.")
elif file_system_id:
# Handle the case that a file_system_id is given.
if file_system_id not in self.file_systems_by_id:
raise FileSystemNotFound(file_system_id)
corpus = [
mt.info_json()
for mt in self.file_systems_by_id[file_system_id].iter_mount_targets()
]
elif mount_target_id:
if mount_target_id not in self.mount_targets_by_id:
raise MountTargetNotFound(mount_target_id)
# Handle mount target specification case.
corpus = [self.mount_targets_by_id[mount_target_id].info_json()]
else:
# We don't handle access_point_id's yet.
assert False, "Moto does not yet support EFS access points."
# Handle the case that a marker is given. Note that the handling is quite
# different from that in describe_file_systems.
if marker is not None:
if marker not in self.next_markers:
raise BadRequest("Invalid Marker")
corpus_mtids = {m["MountTargetId"] for m in corpus}
marked_mtids = {m["MountTargetId"] for m in self.next_markers[marker]}
mt_ids = corpus_mtids & marked_mtids
corpus = [self.mount_targets_by_id[mt_id].info_json() for mt_id in mt_ids]
# Handle the max_items parameter.
mount_targets = corpus[:max_items]
next_marker = self._mark_description(corpus, max_items)
return next_marker, mount_targets
def delete_file_system(self, file_system_id):
"""Delete the file system specified by the given file_system_id.
Note that mount targets must be deleted first.
https://docs.aws.amazon.com/efs/latest/ug/API_DeleteFileSystem.html
"""
if file_system_id not in self.file_systems_by_id:
raise FileSystemNotFound(file_system_id)
file_system = self.file_systems_by_id[file_system_id]
if file_system.number_of_mount_targets > 0:
raise FileSystemInUse(
"Must delete all mount targets before deleting file system."
)
del self.file_systems_by_id[file_system_id]
self.creation_tokens.remove(file_system.creation_token)
return
def delete_mount_target(self, mount_target_id):
"""Delete a mount target specified by the given mount_target_id.
Note that this will also delete a network interface.
https://docs.aws.amazon.com/efs/latest/ug/API_DeleteMountTarget.html
"""
if mount_target_id not in self.mount_targets_by_id:
raise MountTargetNotFound(mount_target_id)
mount_target = self.mount_targets_by_id[mount_target_id]
self.ec2_backend.delete_network_interface(mount_target.network_interface_id)
del self.mount_targets_by_id[mount_target_id]
mount_target.clean_up()
return
def describe_backup_policy(self, file_system_id):
backup_policy = self.file_systems_by_id[file_system_id].backup_policy
if not backup_policy:
raise PolicyNotFound("None")
return backup_policy
efs_backends = BackendDict(EFSBackend, "efs")
|
the-stack_0_2862 | # -*- coding: utf-8 -*-
"""
author: zengbin93
email: [email protected]
create_dt: 2021/12/13 17:39
describe: 事件性能分析
"""
import os
import os.path
import traceback
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
from tqdm import tqdm
from typing import Callable, List
from czsc.objects import Factor
from czsc.data.ts_cache import TsDataCache
from czsc.sensors.utils import generate_signals
from czsc.utils import io
from czsc.utils import WordWriter
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
class FactorsSensor:
"""因子(Factor)感应器:分析各种信号和因子的表现"""
def __init__(self,
results_path: str,
sdt: str,
edt: str,
dc: TsDataCache,
base_freq: str,
freqs: List[str],
get_signals: Callable,
get_factors: Callable):
self.name = self.__class__.__name__
self.version = "V20211213"
os.makedirs(results_path, exist_ok=True)
self.results_path = results_path
self.sdt = sdt
self.edt = edt
self.get_signals = get_signals
self.get_factors = get_factors
self.factors: List[Factor] = get_factors()
self.base_freq = base_freq
self.freqs = freqs
self.file_docx = os.path.join(results_path, f'factors_sensor_{sdt}_{edt}.docx')
self.writer = WordWriter(self.file_docx)
self.dc = dc
self.betas = ['000001.SH', '000016.SH', '000905.SH', '000300.SH', '399001.SZ', '399006.SZ']
self.file_sf = os.path.join(results_path, f'factors_{sdt}_{edt}.pkl')
self.signals_path = os.path.join(results_path, 'signals')
os.makedirs(self.signals_path, exist_ok=True)
if os.path.exists(self.file_sf):
self.sf = io.read_pkl(self.file_sf)
else:
self.sf = self.get_stock_factors()
io.save_pkl(self.sf, self.file_sf)
def get_share_factors(self, ts_code: str, name: str):
"""获取单个标的因子信息"""
dc = self.dc
sdt = self.sdt
edt = self.edt
factors = self.factors
start_date = pd.to_datetime(self.sdt) - timedelta(days=3000)
bars = dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=edt, freq='D', asset="E", raw_bar=True)
n_bars = dc.pro_bar(ts_code=ts_code, start_date=sdt, end_date=edt, freq='D', asset="E", raw_bar=False)
nb_dicts = {row['trade_date'].strftime("%Y%m%d"): row for row in n_bars.to_dict("records")}
signals = generate_signals(bars, sdt, self.base_freq, self.freqs, self.get_signals)
results = []
for s in signals:
row = {'name': name, 'ts_code': ts_code}
for factor in factors:
row[factor.name] = factor.is_match(s)
nb_info = nb_dicts.get(s['dt'].strftime("%Y%m%d"), None)
row.update(nb_info)
results.append(row)
df_res = pd.DataFrame(results)
if df_res.empty:
return df_res
df_res = df_res[pd.to_datetime(sdt) <= df_res['trade_date']]
df_res = df_res[df_res['trade_date'] <= pd.to_datetime(edt)]
# 加入总市值
df_ = dc.daily_basic(ts_code, sdt, dc.edt)
df_['trade_date'] = pd.to_datetime(df_['trade_date'])
df_res = df_res.merge(df_[['trade_date', 'total_mv']], on='trade_date', how='left')
return signals, df_res
def get_stock_factors(self):
"""获取全部股票的因子信息"""
stocks = self.dc.stock_basic()
all_factors = []
for row in tqdm(stocks.to_dict('records'), desc="get_stock_factors"):
ts_code = row['ts_code']
name = row['name']
try:
signals, factors = self.get_share_factors(ts_code, name)
all_factors.append(factors)
file_signals = os.path.join(self.signals_path, f'{ts_code}.pkl')
io.save_pkl(signals, file_signals)
except:
print(f"get_share_factors error: {ts_code}, {name}")
traceback.print_exc()
df_factors = pd.concat(all_factors, ignore_index=True)
return df_factors
def validate_performance(self):
factors = self.factors
sf = self.sf
results = [{
"name": "全市场", "count": len(sf), 'n1b': sf.n1b.mean(), 'n2b': sf.n2b.mean(),
'n3b': sf.n3b.mean(), 'n5b': sf.n5b.mean(), 'n10b': sf.n10b.mean(), 'n20b': sf.n20b.mean()
}]
for factor in factors:
df = sf[sf[factor.name]]
row = {"name": factor.name, "count": len(df)}
row.update(df[['n1b', 'n2b', 'n3b', 'n5b', 'n10b', 'n20b']].mean().to_dict())
results.append(row)
df_nb_info = pd.DataFrame(results)
df_nb_info.to_excel(os.path.join(self.results_path, f"factors_nb_info.xlsx"), index=False)
|
the-stack_0_2863 | #!/usr/bin/env python3
# encoding: utf-8
"""
pyQms
-----
Python module for fast and accurate mass spectrometry data quantification
:license: MIT, see LICENSE.txt for more details
Authors:
* Leufken, J.
* Niehues, A.
* Sarin, L.P.
* Hippler, M.
* Leidel, S.A.
* Fufezan, C.
"""
import pickle
import sys
import os
try:
import pymzml
import pymzml.plot
except:
print("Please install pymzML via: pip install pymzml")
def main(result_pkl=None):
"""
usage:
./plot_match_examples.py <Path2ResultPkl>
Extracts the match information and plots one example isotopologue match into
the 'data' folder. Uses the plot function of pymzML (`pymzML.plot`_). Use
this script as template for annotating spectra with match information.
Note:
Plots only one high scored formula (mScore >0.95) from the result pkl.
Use e.g. with the 'BSA1.mzML_pyQms_results.pkl' obtained from e.g.
example script parse_ident_file_and_quantify_with_carbamidomethylation.py
to get example plotting data.
.. _pymzML.plot:
https://pymzml.github.io/plot.html
"""
results_class = pickle.load(open(result_pkl, "rb"))
for key, i, entry in results_class.extract_results():
if entry.score > 0.95:
p = pymzml.plot.Factory()
label_x = []
measured_peaks = []
matched_peaks = []
for (
measured_mz,
measured_intensity,
relative_i,
calculated_mz,
calculated_intensity,
) in entry.peaks:
if measured_mz is not None:
measured_peaks.append((measured_mz, measured_intensity))
matched_peaks.append(
(calculated_mz, calculated_intensity * entry.scaling_factor)
)
label_x.append(
(
calculated_mz,
"{0:5.3f} ppm".format(
(measured_mz - calculated_mz) / (measured_mz * 1e-6)
),
)
)
mz_only = [n[0] for n in measured_peaks]
mz_range = [min(mz_only) - 1, max(mz_only) + 1]
peptides = results_class.lookup["formula to molecule"][key.formula]
if len(peptides) > 1:
continue
p.newPlot(
header="Formula: {0}; Peptide: {1}; Charge: {2}\n File: {3}; Scan: {4}; RT: {5:1.3f}\n Amount: {6:1.3f}; Score: {7:1.3f}".format(
key.formula,
peptides[0],
key.charge,
key.file_name,
entry.spec_id,
entry.rt,
entry.scaling_factor,
entry.score,
),
mzRange=mz_range,
)
p.add(measured_peaks, color=(0, 0, 0), style="sticks")
p.add(matched_peaks, color=(0, 200, 0), style="triangles")
p.add(label_x, color=(0, 0, 255), style="label_x")
plot_name = os.path.join(
os.pardir,
"data",
"{0}_Peptide_{1}_Charge_{2}.xhtml".format(
key.file_name, peptides[0], key.charge
),
)
p.save(filename=plot_name, mzRange=mz_range)
print("Plotted file {0}".format(plot_name))
break
if __name__ == "__main__":
if len(sys.argv) < 2:
print(main.__doc__)
else:
main(result_pkl=sys.argv[1])
|
the-stack_0_2864 | #!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import shutil
import subprocess
import sys
import os
from charmhelpers.fetch import (
apt_install, filter_installed_packages,
apt_update
)
from charmhelpers.core.hookenv import (
open_port,
close_port,
relation_get,
relation_set,
relation_ids,
config,
Hooks, UnregisteredHookError,
log,
status_set,
WARNING,
DEBUG,
)
from charmhelpers.core.host import (
service_restart,
lsb_release,
mkdir,
init_is_systemd,
)
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available,
pausable_restart_on_change as restart_on_change,
is_unit_paused_set,
get_os_codename_install_source,
CompareOpenStackReleases,
)
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
)
from ceilometer_utils import (
disable_package_apache_site,
get_packages,
CEILOMETER_DB,
CEILOMETER_SERVICE,
CEILOMETER_ROLE,
CEILOMETER_API_SYSTEMD_CONF,
register_configs,
restart_map,
run_in_apache,
services,
get_ceilometer_context,
get_shared_secret,
do_openstack_upgrade,
set_shared_secret,
assess_status,
reload_systemd,
ceilometer_upgrade,
)
from ceilometer_contexts import CEILOMETER_PORT
from charmhelpers.contrib.openstack.ip import (
canonical_url,
PUBLIC, INTERNAL, ADMIN
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address,
get_relation_ip,
is_ipv6,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config,
is_clustered,
is_elected_leader
)
from charmhelpers.contrib.peerstorage import (
peer_retrieve,
peer_store,
)
from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.hardening.harden import harden
hooks = Hooks()
CONFIGS = register_configs()
@hooks.hook('install.real')
@harden()
def install():
execd_preinstall()
origin = config('openstack-origin')
if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and origin == 'distro'):
origin = 'cloud:precise-grizzly'
configure_installation_source(origin)
packages = filter_installed_packages(get_packages())
if packages:
status_set('maintenance', 'Installing packages')
apt_update(fatal=True)
apt_install(packages, fatal=True)
if init_is_systemd():
# NOTE(jamespage): ensure systemd override folder exists prior to
# attempting to write override.conf
mkdir(os.path.dirname(CEILOMETER_API_SYSTEMD_CONF))
if run_in_apache():
disable_package_apache_site()
@hooks.hook("amqp-relation-joined")
def amqp_joined():
relation_set(username=config('rabbit-user'),
vhost=config('rabbit-vhost'))
@hooks.hook("shared-db-relation-joined")
def db_joined():
relation_set(ceilometer_database=CEILOMETER_DB)
@hooks.hook("metric-service-relation-joined")
def metric_service_joined():
# NOTE(jamespage): gnocchiclient is required to support
# the gnocchi event dispatcher
apt_install(filter_installed_packages(['python-gnocchiclient']),
fatal=True)
@hooks.hook("amqp-relation-changed",
"amqp-relation-departed",
"shared-db-relation-changed",
"shared-db-relation-departed",
"identity-service-relation-changed",
"identity-service-relation-departed",
"identity-credentials-relation-changed",
"identity-credentials-relation-departed",
"metric-service-relation-changed",
"metric-service-relation-departed")
@restart_on_change(restart_map())
def any_changed():
CONFIGS.write_all()
configure_https()
for rid in relation_ids('identity-service'):
keystone_joined(relid=rid)
ceilometer_joined()
# NOTE(jamespage): ceilometer@ocata requires both gnocchi
# and mongodb to be configured to successfully
# upgrade the underlying data stores.
if ('metric-service' in CONFIGS.complete_contexts() and
'identity-service' in CONFIGS.complete_contexts()):
cmp_codename = CompareOpenStackReleases(
get_os_codename_install_source(config('openstack-origin')))
# NOTE(jamespage): however at queens, this limitation has gone!
if (cmp_codename < 'queens' and
'mongodb' not in CONFIGS.complete_contexts()):
return
ceilometer_upgrade()
def configure_https():
"""Enables SSL API Apache config if appropriate."""
# need to write all to ensure changes to the entire request pipeline
# propagate (c-api, haprxy, apache)
cmp_codename = CompareOpenStackReleases(
get_os_codename_install_source(config('openstack-origin')))
if cmp_codename >= 'queens':
return
CONFIGS.write_all()
if 'https' in CONFIGS.complete_contexts():
cmd = ['a2ensite', 'openstack_https_frontend']
subprocess.check_call(cmd)
else:
cmd = ['a2dissite', 'openstack_https_frontend']
subprocess.check_call(cmd)
# TODO: improve this by checking if local CN certs are available
# first then checking reload status (see LP #1433114).
if not is_unit_paused_set():
try:
subprocess.check_call(['service', 'apache2', 'reload'])
except subprocess.CalledProcessError:
subprocess.call(['service', 'apache2', 'restart'])
@hooks.hook('config-changed')
@restart_on_change(restart_map())
@harden()
def config_changed():
if not config('action-managed-upgrade'):
if openstack_upgrade_available('ceilometer-common'):
status_set('maintenance', 'Upgrading to new OpenStack release')
do_openstack_upgrade(CONFIGS)
install_event_pipeline_setting()
update_nrpe_config()
CONFIGS.write_all()
# NOTE(jamespage): Drop when charm switches to apache2+mod_wsgi
# reload ensures port override is set correctly
reload_systemd()
ceilometer_joined()
cmp_codename = CompareOpenStackReleases(
get_os_codename_install_source(config('openstack-origin')))
if cmp_codename < 'queens':
open_port(CEILOMETER_PORT)
else:
close_port(CEILOMETER_PORT)
configure_https()
# NOTE(jamespage): Iterate identity-{service,credentials} relations
# to pickup any required databag changes on these
# relations.
for rid in relation_ids('identity-service'):
keystone_joined(relid=rid)
for rid in relation_ids('identity-credentials'):
keystone_credentials_joined(relid=rid)
# Define the new ocf resource and use the key delete_resources to delete
# legacy resource for >= Liberty since the ceilometer-agent-central moved
# to ceilometer-polling in liberty (see LP: #1606787).
for rid in relation_ids('ha'):
ha_joined(rid)
def install_event_pipeline_setting():
src_file = 'files/event_pipeline_alarm.yaml'
dest_file = '/etc/ceilometer/event_pipeline_alarm.yaml'
if not os.path.isdir(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copy(src_file, dest_file)
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
install()
update_nrpe_config()
any_changed()
for rid in relation_ids('cluster'):
cluster_joined(relation_id=rid)
@hooks.hook('cluster-relation-joined')
@restart_on_change(restart_map(), stopstart=True)
def cluster_joined(relation_id=None):
# If this node is the elected leader then share our secret with other nodes
if is_elected_leader('grp_ceilometer_vips'):
peer_store('shared_secret', get_shared_secret())
CONFIGS.write_all()
settings = {}
for addr_type in ADDRESS_TYPES:
address = get_relation_ip(
addr_type,
cidr_network=config('os-{}-network'.format(addr_type)))
if address:
settings['{}-address'.format(addr_type)] = address
settings['private-address'] = get_relation_ip('cluster')
relation_set(relation_id=relation_id, relation_settings=settings)
@hooks.hook('cluster-relation-changed',
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
shared_secret = peer_retrieve('shared_secret')
if shared_secret is None or shared_secret.strip() == '':
log('waiting for shared secret to be provided by leader')
elif not shared_secret == get_shared_secret():
set_shared_secret(shared_secret)
CONFIGS.write_all()
@hooks.hook('ha-relation-joined')
def ha_joined(relation_id=None):
cluster_config = get_hacluster_config()
delete_resources = []
delete_resources.append('res_ceilometer_polling')
resources = {
'res_ceilometer_haproxy': 'lsb:haproxy',
'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central',
}
resource_params = {
'res_ceilometer_haproxy': 'op monitor interval="5s"',
'res_ceilometer_agent_central': 'op monitor interval="30s"'
}
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_ceilometer_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_ceilometer_{}_vip'.format(iface)
if vip_key in vip_group:
if vip not in resource_params[vip_key]:
vip_key = '{}_{}'.format(vip_key, vip_params)
else:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip), WARNING)
continue
resources[vip_key] = res_ceilometer_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'
''.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
relation_set(relation_id=relation_id,
groups={'grp_ceilometer_vips':
' '.join(vip_group)})
init_services = {
'res_ceilometer_haproxy': 'haproxy'
}
clones = {
'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
}
relation_set(relation_id=relation_id,
init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
delete_resources=delete_resources,
clones=clones)
@hooks.hook('ha-relation-changed')
def ha_changed():
clustered = relation_get('clustered')
if not clustered or clustered in [None, 'None', '']:
log('ha_changed: hacluster subordinate not fully clustered.')
else:
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
keystone_joined(relid=rid)
@hooks.hook("identity-credentials-relation-joined")
def keystone_credentials_joined(relid=None):
relation_set(relation_id=relid,
username=CEILOMETER_SERVICE,
requested_roles=CEILOMETER_ROLE)
@hooks.hook("identity-service-relation-joined")
def keystone_joined(relid=None):
cmp_codename = CompareOpenStackReleases(
get_os_codename_install_source(config('openstack-origin')))
if cmp_codename >= 'queens':
log('Skipping endpoint registration for >= Queens', level=DEBUG)
return
if config('vip') and not is_clustered():
log('Defering registration until clustered', level=DEBUG)
return
public_url = "{}:{}".format(
canonical_url(CONFIGS, PUBLIC),
CEILOMETER_PORT
)
admin_url = "{}:{}".format(
canonical_url(CONFIGS, ADMIN),
CEILOMETER_PORT
)
internal_url = "{}:{}".format(
canonical_url(CONFIGS, INTERNAL),
CEILOMETER_PORT
)
region = config("region")
relation_set(relation_id=relid,
service=CEILOMETER_SERVICE,
public_url=public_url,
admin_url=admin_url,
internal_url=internal_url,
requested_roles=CEILOMETER_ROLE,
region=region)
@hooks.hook('identity-notifications-relation-changed')
def identity_notifications_changed():
"""Receive notifications from keystone."""
notifications = relation_get()
if not notifications:
return
# Some ceilometer services will create a client and request
# the service catalog from keystone on startup. So if
# endpoints change we need to restart these services.
key = '%s-endpoint-changed' % (CEILOMETER_SERVICE)
if key in notifications:
service_restart('ceilometer-alarm-evaluator')
service_restart('ceilometer-alarm-notifier')
@hooks.hook("ceilometer-service-relation-joined")
def ceilometer_joined():
# Pass local context data onto related agent services
context = get_ceilometer_context()
# This value gets tranformed to a path by the context we need to
# pass the data to agents.
if 'rabbit_ssl_ca' in context:
with open(context['rabbit_ssl_ca']) as fh:
context['rabbit_ssl_ca'] = base64.b64encode(fh.read())
for relid in relation_ids('ceilometer-service'):
relation_set(relid, context)
@hooks.hook('nrpe-external-master-relation-joined',
'nrpe-external-master-relation-changed')
def update_nrpe_config():
# python-dbus is used by check_upstart_job
apt_install('python-dbus')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.copy_nrpe_checks()
nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
nrpe.add_haproxy_checks(nrpe_setup, current_unit)
nrpe_setup.write()
@hooks.hook('update-status')
@harden()
def update_status():
log('Updating status.')
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
assess_status(CONFIGS)
|
the-stack_0_2865 | from ast import literal_eval
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.conf import settings
from rest_framework import authentication, permissions,\
viewsets, filters, response, status
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from bokeh.embed import autoload_server
from .forms import JobFilter
from .models import Job, Metric, Measurement, VersionedPackage
from .serializers import JobSerializer, MetricSerializer,\
RegressionSerializer
try:
bokeh_url = settings.BOKEH_URL
except AttributeError:
# if not specified use the default which is localhost:5006
bokeh_url = 'default'
class DefaultsMixin(object):
"""
Default settings for view authentication, permissions,
filtering and pagination.
"""
authentication_classes = (
authentication.BasicAuthentication,
authentication.TokenAuthentication,
)
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
)
paginate_by = 100
# list of available filter_backends, will enable these for all ViewSets
filter_backends = (
filters.DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter,
)
class JobViewSet(DefaultsMixin, CacheResponseMixin, viewsets.ModelViewSet):
"""API endpoint for listing and creating jobs"""
queryset = Job.objects.\
prefetch_related('packages', 'measurements').order_by('date')
serializer_class = JobSerializer
filter_class = JobFilter
search_fields = ('ci_id',)
ordering_fields = ('date',)
class MeasurementViewSet(DefaultsMixin, CacheResponseMixin,
viewsets.ModelViewSet):
"""API endpoint consumed by the monitor app"""
queryset = Measurement.objects.\
prefetch_related('job', 'metric').order_by('job__date')
serializer_class = RegressionSerializer
filter_fields = ('job__ci_dataset', 'metric')
class MetricViewSet(DefaultsMixin, CacheResponseMixin, viewsets.ModelViewSet):
"""API endpoint for listing and creating metrics"""
queryset = Metric.objects.order_by('metric')
serializer_class = MetricSerializer
def create(self, request, *args, **kwargs):
# many=True for adding multiple items at once
serializer = self.get_serializer(data=request.data,
many=isinstance(request.data, list))
serializer.is_valid(raise_exception=True)
serializer.save()
return response.Response(serializer.data,
status=status.HTTP_201_CREATED)
search_fields = ('metric', )
ordering_fields = ('metric',)
class DatasetViewSet(DefaultsMixin, viewsets.ViewSet):
"""API endpoint for listing datasets"""
def list(self, request):
datasets = Job.objects.values_list('ci_dataset', flat=True).distinct()
return response.Response(datasets)
class DefaultsViewSet(DefaultsMixin, viewsets.ViewSet):
"""
API endpoint for listing default values used by
the bokeh apps
"""
def get_defaults(self):
queryset = Job.objects.values('ci_id', 'ci_dataset').latest('pk')
ci_id = queryset['ci_id']
ci_dataset = queryset['ci_dataset']
queryset = Metric.objects.values_list('metric', flat=True)
if 'AM1' in queryset:
metric = 'AM1'
else:
metric = queryset.latest('pk')
snr_cut = '100'
window = 'months'
return {'ci_id': ci_id, 'ci_dataset': ci_dataset,
'metric': metric, 'snr_cut': snr_cut,
'window': window}
def list(self, request):
defaults = self.get_defaults()
return response.Response(defaults)
class BokehAppViewSet(DefaultsMixin, viewsets.ViewSet):
def get_app_data(self, ci_id, ci_dataset, metric):
data = {}
blobs = Job.objects.filter(ci_id=ci_id,
ci_dataset=ci_dataset).values('blobs')
metadata = Measurement.\
objects.filter(metric=metric, job__ci_id=ci_id,
job__ci_dataset=ci_dataset).values('metadata')
if metadata.exists():
# workaround for getting item from queryset
metadata = metadata[0]['metadata']
if metadata:
metadata = literal_eval(literal_eval(metadata))
blob_id = metadata.pop('blobs')
data['metadata'] = metadata
if blobs.exists():
# workaround for getting item from queryset
blobs = blobs[0]['blobs']
if blobs:
blobs = literal_eval(literal_eval(blobs))
for blob in blobs:
# Look up for data blobs
if blob['identifier'] == blob_id['matchedDataset']:
data['matchedDataset'] = blob['data']
elif blob['identifier'] == blob_id['photomModel']:
data['photomModel'] = blob['data']
elif blob['identifier'] == blob_id['astromModel']:
data['astromModel'] = blob['data']
return data
def list(self, request):
defaults = DefaultsViewSet().get_defaults()
ci_id = self.request.query_params.get('ci_id',
defaults['ci_id'])
ci_dataset = self.request.query_params.get('ci_dataset',
defaults['ci_dataset'])
metric = self.request.query_params.get('metric',
defaults['metric'])
data = self.get_app_data(ci_id, ci_dataset, metric)
return response.Response(data)
def embed_bokeh(request, bokeh_app):
"""Render the requested app from the bokeh server"""
# http://bokeh.pydata.org/en/0.12.3/docs/reference/embed.html
# TODO: test if bokeh server is reachable
bokeh_script = autoload_server(None, app_path="/{}".format(bokeh_app),
url=bokeh_url)
template = loader.get_template('dashboard/embed_bokeh.html')
context = {'bokeh_script': bokeh_script,
'bokeh_app': bokeh_app}
response = HttpResponse(template.render(context, request))
# Save full url path in the HTTP response, so that the bokeh
# app can use this info, e.g:
# http://localhost:8000/dashboard/AMx/?metric=AM1&ci_dataset=cfht&ci_id=452
response.set_cookie('django_full_path', request.get_full_path())
return response
def home(request):
"""Render the home page"""
n_metrics = len(Metric.objects.all())
job = Job.objects.latest('pk')
n_packages = len(VersionedPackage.objects.filter(job=job))
n_jobs = len(Job.objects.all())
n_meas = len(Measurement.objects.all())
datasets = Job.objects.values_list('ci_dataset', flat=True).distinct()
last = Job.objects.latest('pk').date
context = {"n_metrics": n_metrics,
"n_packages": n_packages,
"n_jobs": n_jobs,
"n_meas": n_meas,
"datasets": ", ".join(datasets),
"last": last}
return render(request, 'dashboard/index.html', context)
|
the-stack_0_2867 | # nuScenes dev-kit.
# Code written by Oscar Beijbom, 2018.
import copy
import os.path as osp
import struct
from abc import ABC, abstractmethod
from functools import reduce
from typing import Tuple, List, Dict
import cv2
import numpy as np
from matplotlib.axes import Axes
from pyquaternion import Quaternion
from nuscenes.utils.geometry_utils import view_points, transform_matrix
class PointCloud(ABC):
"""
Abstract class for manipulating and viewing point clouds.
Every point cloud (lidar and radar) consists of points where:
- Dimensions 0, 1, 2 represent x, y, z coordinates.
These are modified when the point cloud is rotated or translated.
- All other dimensions are optional. Hence these have to be manually modified if the reference frame changes.
"""
def __init__(self, points: np.ndarray):
"""
Initialize a point cloud and check it has the correct dimensions.
:param points: <np.float: d, n>. d-dimensional input point cloud matrix.
"""
assert points.shape[0] == self.nbr_dims(), 'Error: Pointcloud points must have format: %d x n' % self.nbr_dims()
self.points = points
@staticmethod
@abstractmethod
def nbr_dims() -> int:
"""
Returns the number of dimensions.
:return: Number of dimensions.
"""
pass
@classmethod
@abstractmethod
def from_file(cls, file_name: str) -> 'PointCloud':
"""
Loads point cloud from disk.
:param file_name: Path of the pointcloud file on disk.
:return: PointCloud instance.
"""
pass
@classmethod
def from_file_multisweep(cls,
nusc: 'NuScenes',
sample_rec: Dict,
chan: str,
ref_chan: str,
nsweeps: int = 5,
min_distance: float = 1.0) -> Tuple['PointCloud', np.ndarray]:
"""
Return a point cloud that aggregates multiple sweeps.
As every sweep is in a different coordinate frame, we need to map the coordinates to a single reference frame.
As every sweep has a different timestamp, we need to account for that in the transformations and timestamps.
:param nusc: A NuScenes instance.
:param sample_rec: The current sample.
:param chan: The lidar/radar channel from which we track back n sweeps to aggregate the point cloud.
:param ref_chan: The reference channel of the current sample_rec that the point clouds are mapped to.
:param nsweeps: Number of sweeps to aggregated.
:param min_distance: Distance below which points are discarded.
:return: (all_pc, all_times). The aggregated point cloud and timestamps.
"""
# Init.
points = np.zeros((cls.nbr_dims(), 0))
all_pc = cls(points)
all_times = np.zeros((1, 0))
# Get reference pose and timestamp.
ref_sd_token = sample_rec['data'][ref_chan]
ref_sd_rec = nusc.get('sample_data', ref_sd_token)
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
# Homogeneous transform from ego car frame to reference frame.
ref_from_car = transform_matrix(ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), inverse=True)
# Homogeneous transformation matrix from global to _current_ ego car frame.
car_from_global = transform_matrix(ref_pose_rec['translation'], Quaternion(ref_pose_rec['rotation']),
inverse=True)
# Aggregate current and previous sweeps.
sample_data_token = sample_rec['data'][chan]
current_sd_rec = nusc.get('sample_data', sample_data_token)
for _ in range(nsweeps):
# Load up the pointcloud and remove points close to the sensor.
current_pc = cls.from_file(osp.join(nusc.dataroot, current_sd_rec['filename']))
current_pc.remove_close(min_distance)
# Get past pose.
current_pose_rec = nusc.get('ego_pose', current_sd_rec['ego_pose_token'])
global_from_car = transform_matrix(current_pose_rec['translation'],
Quaternion(current_pose_rec['rotation']), inverse=False)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = nusc.get('calibrated_sensor', current_sd_rec['calibrated_sensor_token'])
car_from_current = transform_matrix(current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']),
inverse=False)
# Fuse four transformation matrices into one and perform transform.
trans_matrix = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
current_pc.transform(trans_matrix)
# Add time vector which can be used as a temporal feature.
time_lag = ref_time - 1e-6 * current_sd_rec['timestamp'] # Positive difference.
times = time_lag * np.ones((1, current_pc.nbr_points()))
all_times = np.hstack((all_times, times))
# Merge with key pc.
all_pc.points = np.hstack((all_pc.points, current_pc.points))
# Abort if there are no previous sweeps.
if current_sd_rec['prev'] == '':
break
else:
current_sd_rec = nusc.get('sample_data', current_sd_rec['prev'])
return all_pc, all_times
def nbr_points(self) -> int:
"""
Returns the number of points.
:return: Number of points.
"""
return self.points.shape[1]
def subsample(self, ratio: float) -> None:
"""
Sub-samples the pointcloud.
:param ratio: Fraction to keep.
"""
selected_ind = np.random.choice(np.arange(0, self.nbr_points()), size=int(self.nbr_points() * ratio))
self.points = self.points[:, selected_ind]
def remove_close(self, radius: float) -> None:
"""
Removes point too close within a certain radius from origin.
:param radius: Radius below which points are removed.
"""
x_filt = np.abs(self.points[0, :]) < radius
y_filt = np.abs(self.points[1, :]) < radius
not_close = np.logical_not(np.logical_and(x_filt, y_filt))
self.points = self.points[:, not_close]
def translate(self, x: np.ndarray) -> None:
"""
Applies a translation to the point cloud.
:param x: <np.float: 3, 1>. Translation in x, y, z.
"""
for i in range(3):
self.points[i, :] = self.points[i, :] + x[i]
def rotate(self, rot_matrix: np.ndarray) -> None:
"""
Applies a rotation.
:param rot_matrix: <np.float: 3, 3>. Rotation matrix.
"""
self.points[:3, :] = np.dot(rot_matrix, self.points[:3, :])
def transform(self, transf_matrix: np.ndarray) -> None:
"""
Applies a homogeneous transform.
:param transf_matrix: <np.float: 4, 4>. Homogenous transformation matrix.
"""
self.points[:3, :] = transf_matrix.dot(np.vstack((self.points[:3, :], np.ones(self.nbr_points()))))[:3, :]
def render_height(self,
ax: Axes,
view: np.ndarray = np.eye(4),
x_lim: Tuple[float, float] = (-20, 20),
y_lim: Tuple[float, float] = (-20, 20),
marker_size: float = 1) -> None:
"""
Very simple method that applies a transformation and then scatter plots the points colored by height (z-value).
:param ax: Axes on which to render the points.
:param view: <np.float: n, n>. Defines an arbitrary projection (n <= 4).
:param x_lim: (min, max). x range for plotting.
:param y_lim: (min, max). y range for plotting.
:param marker_size: Marker size.
"""
self._render_helper(2, ax, view, x_lim, y_lim, marker_size)
def render_intensity(self,
ax: Axes,
view: np.ndarray = np.eye(4),
x_lim: Tuple[float, float] = (-20, 20),
y_lim: Tuple[float, float] = (-20, 20),
marker_size: float = 1) -> None:
"""
Very simple method that applies a transformation and then scatter plots the points colored by intensity.
:param ax: Axes on which to render the points.
:param view: <np.float: n, n>. Defines an arbitrary projection (n <= 4).
:param x_lim: (min, max).
:param y_lim: (min, max).
:param marker_size: Marker size.
"""
self._render_helper(3, ax, view, x_lim, y_lim, marker_size)
def _render_helper(self,
color_channel: int,
ax: Axes,
view: np.ndarray,
x_lim: Tuple[float, float],
y_lim: Tuple[float, float],
marker_size: float) -> None:
"""
Helper function for rendering.
:param color_channel: Point channel to use as color.
:param ax: Axes on which to render the points.
:param view: <np.float: n, n>. Defines an arbitrary projection (n <= 4).
:param x_lim: (min, max).
:param y_lim: (min, max).
:param marker_size: Marker size.
"""
points = view_points(self.points[:3, :], view, normalize=False)
ax.scatter(points[0, :], points[1, :], c=self.points[color_channel, :], s=marker_size)
ax.set_xlim(x_lim[0], x_lim[1])
ax.set_ylim(y_lim[0], y_lim[1])
class LidarPointCloud(PointCloud):
@staticmethod
def nbr_dims() -> int:
"""
Returns the number of dimensions.
:return: Number of dimensions.
"""
return 4
@classmethod
def from_file(cls, file_name: str) -> 'LidarPointCloud':
"""
Loads LIDAR data from binary numpy format. Data is stored as (x, y, z, intensity, ring index).
:param file_name: Path of the pointcloud file on disk.
:return: LidarPointCloud instance (x, y, z, intensity).
"""
assert file_name.endswith('.bin'), 'Unsupported filetype {}'.format(file_name)
scan = np.fromfile(file_name, dtype=np.float32)
points = scan.reshape((-1, 5))[:, :cls.nbr_dims()]
return cls(points.T)
class RadarPointCloud(PointCloud):
# Class-level settings for radar pointclouds, see from_file().
invalid_states = [0] # type: List[int]
dynprop_states = range(7) # type: List[int] # Use [0, 2, 6] for moving objects only.
ambig_states = [3] # type: List[int]
@classmethod
def disable_filters(cls) -> None:
"""
Disable all radar filter settings.
Use this method to plot all radar returns.
Note that this method affects the global settings.
"""
cls.invalid_states = list(range(18))
cls.dynprop_states = list(range(8))
cls.ambig_states = list(range(5))
@classmethod
def default_filters(cls) -> None:
"""
Set the defaults for all radar filter settings.
Note that this method affects the global settings.
"""
cls.invalid_states = [0]
cls.dynprop_states = range(7)
cls.ambig_states = [3]
@staticmethod
def nbr_dims() -> int:
"""
Returns the number of dimensions.
:return: Number of dimensions.
"""
return 18
@classmethod
def from_file(cls,
file_name: str,
invalid_states: List[int] = None,
dynprop_states: List[int] = None,
ambig_states: List[int] = None) -> 'RadarPointCloud':
"""
Loads RADAR data from a Point Cloud Data file. See details below.
:param file_name: The path of the pointcloud file.
:param invalid_states: Radar states to be kept. See details below.
:param dynprop_states: Radar states to be kept. Use [0, 2, 6] for moving objects only. See details below.
:param ambig_states: Radar states to be kept. See details below.
To keep all radar returns, set each state filter to range(18).
:return: <np.float: d, n>. Point cloud matrix with d dimensions and n points.
Example of the header fields:
# .PCD v0.7 - Point Cloud Data file format
VERSION 0.7
FIELDS x y z dyn_prop id rcs vx vy vx_comp vy_comp is_quality_valid ambig_state x_rms y_rms invalid_state pdh0 vx_rms vy_rms
SIZE 4 4 4 1 2 4 4 4 4 4 1 1 1 1 1 1 1 1
TYPE F F F I I F F F F F I I I I I I I I
COUNT 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
WIDTH 125
HEIGHT 1
VIEWPOINT 0 0 0 1 0 0 0
POINTS 125
DATA binary
Below some of the fields are explained in more detail:
x is front, y is left
vx, vy are the velocities in m/s.
vx_comp, vy_comp are the velocities in m/s compensated by the ego motion.
We recommend using the compensated velocities.
invalid_state: state of Cluster validity state.
(Invalid states)
0x01 invalid due to low RCS
0x02 invalid due to near-field artefact
0x03 invalid far range cluster because not confirmed in near range
0x05 reserved
0x06 invalid cluster due to high mirror probability
0x07 Invalid cluster because outside sensor field of view
0x0d reserved
0x0e invalid cluster because it is a harmonics
(Valid states)
0x00 valid
0x04 valid cluster with low RCS
0x08 valid cluster with azimuth correction due to elevation
0x09 valid cluster with high child probability
0x0a valid cluster with high probability of being a 50 deg artefact
0x0b valid cluster but no local maximum
0x0c valid cluster with high artefact probability
0x0f valid cluster with above 95m in near range
0x10 valid cluster with high multi-target probability
0x11 valid cluster with suspicious angle
dynProp: Dynamic property of cluster to indicate if is moving or not.
0: moving
1: stationary
2: oncoming
3: stationary candidate
4: unknown
5: crossing stationary
6: crossing moving
7: stopped
ambig_state: State of Doppler (radial velocity) ambiguity solution.
0: invalid
1: ambiguous
2: staggered ramp
3: unambiguous
4: stationary candidates
pdh0: False alarm probability of cluster (i.e. probability of being an artefact caused by multipath or similar).
0: invalid
1: <25%
2: 50%
3: 75%
4: 90%
5: 99%
6: 99.9%
7: <=100%
"""
assert file_name.endswith('.pcd'), 'Unsupported filetype {}'.format(file_name)
meta = []
with open(file_name, 'rb') as f:
for line in f:
line = line.strip().decode('utf-8')
meta.append(line)
if line.startswith('DATA'):
break
data_binary = f.read()
# Get the header rows and check if they appear as expected.
assert meta[0].startswith('#'), 'First line must be comment'
assert meta[1].startswith('VERSION'), 'Second line must be VERSION'
sizes = meta[3].split(' ')[1:]
types = meta[4].split(' ')[1:]
counts = meta[5].split(' ')[1:]
width = int(meta[6].split(' ')[1])
height = int(meta[7].split(' ')[1])
data = meta[10].split(' ')[1]
feature_count = len(types)
assert width > 0
assert len([c for c in counts if c != c]) == 0, 'Error: COUNT not supported!'
assert height == 1, 'Error: height != 0 not supported!'
assert data == 'binary'
# Lookup table for how to decode the binaries.
unpacking_lut = {'F': {2: 'e', 4: 'f', 8: 'd'},
'I': {1: 'b', 2: 'h', 4: 'i', 8: 'q'},
'U': {1: 'B', 2: 'H', 4: 'I', 8: 'Q'}}
types_str = ''.join([unpacking_lut[t][int(s)] for t, s in zip(types, sizes)])
# Decode each point.
offset = 0
point_count = width
points = []
for i in range(point_count):
point = []
for p in range(feature_count):
start_p = offset
end_p = start_p + int(sizes[p])
assert end_p < len(data_binary)
point_p = struct.unpack(types_str[p], data_binary[start_p:end_p])[0]
point.append(point_p)
offset = end_p
points.append(point)
# A NaN in the first point indicates an empty pointcloud.
point = np.array(points[0])
if np.any(np.isnan(point)):
return cls(np.zeros((feature_count, 0)))
# Convert to numpy matrix.
points = np.array(points).transpose()
# If no parameters are provided, use default settings.
invalid_states = cls.invalid_states if invalid_states is None else invalid_states
dynprop_states = cls.dynprop_states if dynprop_states is None else dynprop_states
ambig_states = cls.ambig_states if ambig_states is None else ambig_states
# Filter points with an invalid state.
valid = [p in invalid_states for p in points[-4, :]]
points = points[:, valid]
# Filter by dynProp.
valid = [p in dynprop_states for p in points[3, :]]
points = points[:, valid]
# Filter by ambig_state.
valid = [p in ambig_states for p in points[11, :]]
points = points[:, valid]
return cls(points)
class Box:
""" Simple data class representing a 3d box including, label, score and velocity. """
def __init__(self,
center: List[float],
size: List[float],
orientation: Quaternion,
label: int = np.nan,
score: float = np.nan,
velocity: Tuple = (np.nan, np.nan, np.nan),
name: str = None,
token: str = None):
"""
:param center: Center of box given as x, y, z.
:param size: Size of box in width, length, height.
:param orientation: Box orientation.
:param label: Integer label, optional.
:param score: Classification score, optional.
:param velocity: Box velocity in x, y, z direction.
:param name: Box name, optional. Can be used e.g. for denote category name.
:param token: Unique string identifier from DB.
"""
assert not np.any(np.isnan(center))
assert not np.any(np.isnan(size))
assert len(center) == 3
assert len(size) == 3
assert type(orientation) == Quaternion
self.center = np.array(center)
self.wlh = np.array(size)
self.orientation = orientation
self.label = int(label) if not np.isnan(label) else label
self.score = float(score) if not np.isnan(score) else score
self.velocity = np.array(velocity)
self.name = name
self.token = token
def __eq__(self, other):
center = np.allclose(self.center, other.center)
wlh = np.allclose(self.wlh, other.wlh)
orientation = np.allclose(self.orientation.elements, other.orientation.elements)
label = (self.label == other.label) or (np.isnan(self.label) and np.isnan(other.label))
score = (self.score == other.score) or (np.isnan(self.score) and np.isnan(other.score))
vel = (np.allclose(self.velocity, other.velocity) or
(np.all(np.isnan(self.velocity)) and np.all(np.isnan(other.velocity))))
return center and wlh and orientation and label and score and vel
def __repr__(self):
repr_str = 'label: {}, score: {:.2f}, xyz: [{:.2f}, {:.2f}, {:.2f}], wlh: [{:.2f}, {:.2f}, {:.2f}], ' \
'rot axis: [{:.2f}, {:.2f}, {:.2f}], ang(degrees): {:.2f}, ang(rad): {:.2f}, ' \
'vel: {:.2f}, {:.2f}, {:.2f}, name: {}, token: {}'
return repr_str.format(self.label, self.score, self.center[0], self.center[1], self.center[2], self.wlh[0],
self.wlh[1], self.wlh[2], self.orientation.axis[0], self.orientation.axis[1],
self.orientation.axis[2], self.orientation.degrees, self.orientation.radians,
self.velocity[0], self.velocity[1], self.velocity[2], self.name, self.token)
@property
def rotation_matrix(self) -> np.ndarray:
"""
Return a rotation matrix.
:return: <np.float: 3, 3>. The box's rotation matrix.
"""
return self.orientation.rotation_matrix
def translate(self, x: np.ndarray) -> None:
"""
Applies a translation.
:param x: <np.float: 3, 1>. Translation in x, y, z direction.
"""
self.center += x
def rotate(self, quaternion: Quaternion) -> None:
"""
Rotates box.
:param quaternion: Rotation to apply.
"""
self.center = np.dot(quaternion.rotation_matrix, self.center)
self.orientation = quaternion * self.orientation
self.velocity = np.dot(quaternion.rotation_matrix, self.velocity)
def corners(self, wlh_factor: float = 1.0):
"""
Returns the bounding box corners.
:param wlh_factor: Multiply w, l, h by a factor to scale the box.
:return: <np.float: 3, 8>. First four corners are the ones facing forward.
The last four are the ones facing backwards.
"""
w, l, h = self.wlh * wlh_factor
# 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners = np.vstack((x_corners, y_corners, z_corners))
# Rotate
corners = np.dot(self.orientation.rotation_matrix, corners)
# Translate
x, y, z = self.center
corners[0, :] = corners[0, :] + x
corners[1, :] = corners[1, :] + y
corners[2, :] = corners[2, :] + z
return corners
def extremePoints(self,
view: np.ndarray = np.eye(3),
normalize: bool = False):
corners = view_points(self.corners(), view, normalize=normalize)[:2, :]
# corners.sort(key = lambda x : abs(x[0] - self.center[0]), reverse = True)
corners = corners.T
corners = corners[corners[:,0].argsort()]
corners = np.array([corners[0],corners[1],corners[-1],corners[-2]])
corners = corners.T
l = np.min(corners[0]) # left limit
r = np.max(corners[0]) # right limit
t = np.max(corners[1]) # top limit
b = np.min(corners[1]) # bottom limit
return np.array([[l,b], [r,b], [r,t], [l,t]])
def get2Dbox(self,
axis: Axes,
view: np.ndarray = np.eye(3),
normalize: bool = False,
colors: Tuple = ('b', 'r', 'k'),
linewidth: float = 2):
corners = self.extremePoints(view=view, normalize=normalize)
def draw_rect(selected_corners, color):
prev = selected_corners[-1]
for corner in selected_corners:
axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)
prev = corner
draw_rect(corners, colors[0])
# def draw_rect(selected_corners, color):
# prev = selected_corners[-1]
# for corner in selected_corners:
# axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)
# prev = corner
def bottom_corners(self) -> np.ndarray:
"""
Returns the four bottom corners.
:return: <np.float: 3, 4>. Bottom corners. First two face forward, last two face backwards.
"""
return self.corners()[:, [2, 3, 7, 6]]
def render(self,
axis: Axes,
view: np.ndarray = np.eye(3),
normalize: bool = False,
colors: Tuple = ('b', 'r', 'k'),
linewidth: float = 2) -> None:
"""
Renders the box in the provided Matplotlib axis.
:param axis: Axis onto which the box should be drawn.
:param view: <np.array: 3, 3>. Define a projection in needed (e.g. for drawing projection in an image).
:param normalize: Whether to normalize the remaining coordinate.
:param colors: (<Matplotlib.colors>: 3). Valid Matplotlib colors (<str> or normalized RGB tuple) for front,
back and sides.
:param linewidth: Width in pixel of the box sides.
"""
corners = view_points(self.corners(), view, normalize=normalize)[:2, :]
def draw_rect(selected_corners, color):
prev = selected_corners[-1]
for corner in selected_corners:
axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)
prev = corner
# Draw the sides
for i in range(4):
axis.plot([corners.T[i][0], corners.T[i + 4][0]],
[corners.T[i][1], corners.T[i + 4][1]],
color=colors[2], linewidth=linewidth)
# Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)
# print(corners.shape)
draw_rect(corners.T[:4], colors[0])
draw_rect(corners.T[4:], colors[0])
# Draw line indicating the front
center_bottom_forward = np.mean(corners.T[2:4], axis=0)
center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)
axis.plot([center_bottom[0], center_bottom_forward[0]],
[center_bottom[1], center_bottom_forward[1]],
color=colors[0], linewidth=linewidth)
def render_cv2(self,
im: np.ndarray,
view: np.ndarray = np.eye(3),
normalize: bool = False,
colors: Tuple = ((0, 0, 255), (255, 0, 0), (155, 155, 155)),
linewidth: int = 2) -> None:
"""
Renders box using OpenCV2.
:param im: <np.array: width, height, 3>. Image array. Channels are in BGR order.
:param view: <np.array: 3, 3>. Define a projection if needed (e.g. for drawing projection in an image).
:param normalize: Whether to normalize the remaining coordinate.
:param colors: ((R, G, B), (R, G, B), (R, G, B)). Colors for front, side & rear.
:param linewidth: Linewidth for plot.
"""
corners = view_points(self.corners(), view, normalize=normalize)[:2, :]
def draw_rect(selected_corners, color):
prev = selected_corners[-1]
for corner in selected_corners:
cv2.line(im,
(int(prev[0]), int(prev[1])),
(int(corner[0]), int(corner[1])),
color, linewidth)
prev = corner
# Draw the sides
for i in range(4):
cv2.line(im,
(int(corners.T[i][0]), int(corners.T[i][1])),
(int(corners.T[i + 4][0]), int(corners.T[i + 4][1])),
colors[2][::-1], linewidth)
# Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)
draw_rect(corners.T[:4], colors[0][::-1])
draw_rect(corners.T[4:], colors[1][::-1])
# Draw line indicating the front
center_bottom_forward = np.mean(corners.T[2:4], axis=0)
center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)
cv2.line(im,
(int(center_bottom[0]), int(center_bottom[1])),
(int(center_bottom_forward[0]), int(center_bottom_forward[1])),
colors[0][::-1], linewidth)
def copy(self) -> 'Box':
"""
Create a copy of self.
:return: A copy.
"""
return copy.deepcopy(self)
|
the-stack_0_2868 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The CenterNet meta architecture as described in the "Objects as Points" paper [1].
[1]: https://arxiv.org/abs/1904.07850
"""
import abc
import collections
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as cn_assigner
from object_detection.utils import shape_utils
# Number of channels needed to predict size and offsets.
NUM_OFFSET_CHANNELS = 2
NUM_SIZE_CHANNELS = 2
# Error range for detecting peaks.
PEAK_EPSILON = 1e-6
# Constants shared between all keypoint tasks.
UNMATCHED_KEYPOINT_SCORE = 0.1
KEYPOINT_CANDIDATE_SEARCH_SCALE = 0.3
class CenterNetFeatureExtractor(tf.keras.Model):
"""Base class for feature extractors for the CenterNet meta architecture.
Child classes are expected to override the _output_model property which will
return 1 or more tensors predicted by the feature extractor.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes a CenterNet feature extractor.
Args:
name: str, the name used for the underlying keras model.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it. If None or empty, we use 0s.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
If None or empty, we use 1s.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetFeatureExtractor, self).__init__(name=name)
if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test
channel_means = [0., 0., 0.]
if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test
channel_stds = [1., 1., 1.]
self._channel_means = channel_means
self._channel_stds = channel_stds
self._bgr_ordering = bgr_ordering
def preprocess(self, inputs):
"""Converts a batch of unscaled images to a scale suitable for the model.
This method normalizes the image using the given `channel_means` and
`channels_stds` values at initialization time while optionally flipping
the channel order if `bgr_ordering` is set.
Args:
inputs: a [batch, height, width, channels] float32 tensor
Returns:
outputs: a [batch, height, width, channels] float32 tensor
"""
if self._bgr_ordering:
red, green, blue = tf.unstack(inputs, axis=3)
inputs = tf.stack([blue, green, red], axis=3)
channel_means = tf.reshape(tf.constant(self._channel_means),
[1, 1, 1, -1])
channel_stds = tf.reshape(tf.constant(self._channel_stds),
[1, 1, 1, -1])
return (inputs - channel_means)/channel_stds
@property
@abc.abstractmethod
def out_stride(self):
"""The stride in the output image of the network."""
pass
@property
@abc.abstractmethod
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
pass
@property
@abc.abstractmethod
def supported_sub_model_types(self):
"""Valid sub model types supported by the get_sub_model function."""
pass
@abc.abstractmethod
def get_sub_model(self, sub_model_type):
"""Returns the underlying keras model for the given sub_model_type.
This function is useful when we only want to get a subset of weights to
be restored from a checkpoint.
Args:
sub_model_type: string, the type of sub model. Currently, CenterNet
feature extractors support 'detection' and 'classification'.
"""
pass
def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256,
bias_fill=None, use_depthwise=False, name=None):
"""Creates a network to predict the given number of output channels.
This function is intended to make the prediction heads for the CenterNet
meta architecture.
Args:
num_out_channels: Number of output channels.
kernel_size: The size of the conv kernel in the intermediate layer
num_filters: The number of filters in the intermediate conv layer.
bias_fill: If not None, is used to initialize the bias in the final conv
layer.
use_depthwise: If true, use SeparableConv2D to construct the Sequential
layers instead of Conv2D.
name: Optional name for the prediction net.
Returns:
net: A keras module which when called on an input tensor of size
[batch_size, height, width, num_in_channels] returns an output
of size [batch_size, height, width, num_out_channels]
"""
if use_depthwise:
conv_fn = tf.keras.layers.SeparableConv2D
else:
conv_fn = tf.keras.layers.Conv2D
out_conv = tf.keras.layers.Conv2D(num_out_channels, kernel_size=1)
if bias_fill is not None:
out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)
net = tf.keras.Sequential(
[conv_fn(num_filters, kernel_size=kernel_size, padding='same'),
tf.keras.layers.ReLU(),
out_conv],
name=name)
return net
def _to_float32(x):
return tf.cast(x, tf.float32)
def _get_shape(tensor, num_dims):
tf.Assert(tensor.get_shape().ndims == num_dims, [tensor])
return shape_utils.combined_static_and_dynamic_shape(tensor)
def _flatten_spatial_dimensions(batch_images):
batch_size, height, width, channels = _get_shape(batch_images, 4)
return tf.reshape(batch_images, [batch_size, height * width,
channels])
def _multi_range(limit,
value_repetitions=1,
range_repetitions=1,
dtype=tf.int32):
"""Creates a sequence with optional value duplication and range repetition.
As an example (see the Args section for more details),
_multi_range(limit=2, value_repetitions=3, range_repetitions=4) returns:
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]
Args:
limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive.
value_repetitions: Integer. The number of times a value in the sequence is
repeated. With value_repetitions=3, the result is [0, 0, 0, 1, 1, 1, ..].
range_repetitions: Integer. The number of times the range is repeated. With
range_repetitions=3, the result is [0, 1, 2, .., 0, 1, 2, ..].
dtype: The type of the elements of the resulting tensor.
Returns:
A 1-D tensor of type `dtype` and size
[`limit` * `value_repetitions` * `range_repetitions`] that contains the
specified range with given repetitions.
"""
return tf.reshape(
tf.tile(
tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1),
multiples=[range_repetitions, value_repetitions]), [-1])
def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100,
per_channel=False):
"""Returns the top k scores and their locations in a feature map.
Given a feature map, the top k values (based on activation) are returned. If
`per_channel` is True, the top k values **per channel** are returned.
The `max_pool_kernel_size` argument allows for selecting local peaks in a
region. This filtering is done per channel, so nothing prevents two values at
the same location to be returned.
Args:
feature_map: [batch, height, width, channels] float32 feature map.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood (independently for each channel).
For example, to make sure no two neighboring values (in the same channel)
are returned, set max_pool_kernel_size=3. If None or 1, will not apply max
pooling.
k: The number of highest scoring locations to return.
per_channel: If True, will return the top k scores and locations per
feature map channel. If False, the top k across the entire feature map
(height x width x channels) are returned.
Returns:
Tuple of
scores: A [batch, N] float32 tensor with scores from the feature map in
descending order. If per_channel is False, N = k. Otherwise,
N = k * channels, and the first k elements correspond to channel 0, the
second k correspond to channel 1, etc.
y_indices: A [batch, N] int tensor with y indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
x_indices: A [batch, N] int tensor with x indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
channel_indices: A [batch, N] int tensor with channel indices of the top k
feature map locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
"""
if not max_pool_kernel_size or max_pool_kernel_size == 1:
feature_map_peaks = feature_map
else:
feature_map_max_pool = tf.nn.max_pool(
feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')
feature_map_peak_mask = tf.math.abs(
feature_map - feature_map_max_pool) < PEAK_EPSILON
# Zero out everything that is not a peak.
feature_map_peaks = (
feature_map * _to_float32(feature_map_peak_mask))
batch_size, _, width, num_channels = _get_shape(feature_map, 4)
if per_channel:
# Perform top k over batch and channels.
feature_map_peaks_transposed = tf.transpose(feature_map_peaks,
perm=[0, 3, 1, 2])
feature_map_peaks_transposed = tf.reshape(
feature_map_peaks_transposed, [batch_size, num_channels, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_transposed, k=k)
# Convert the indices such that they represent the location in the full
# (flattened) feature map of size [batch, height * width * channels].
channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]
peak_flat_indices = num_channels * peak_flat_indices + channel_idx
scores = tf.reshape(scores, [batch_size, -1])
peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1])
else:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k)
# Get x, y and channel indices corresponding to the top indices in the flat
# array.
y_indices, x_indices, channel_indices = (
row_col_channel_indices_from_flattened_indices(
peak_flat_indices, width, num_channels))
return scores, y_indices, x_indices, channel_indices
def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices,
channel_indices, height_width_predictions,
offset_predictions):
"""Converts CenterNet class-center, offset and size predictions to boxes.
Args:
detection_scores: A [batch, num_boxes] float32 tensor with detection
scores in range [0, 1].
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
channel_indices: A [batch, num_boxes] int32 tensor with channel indices
corresponding to object classes.
height_width_predictions: A float tensor of shape [batch_size, height,
width, 2] representing the height and width of a box centered at each
pixel.
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box centered at each pixel. This
helps reduce the error from downsampling.
Returns:
detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the
the raw bounding box coordinates of boxes.
detection_classes: An integer tensor of shape [batch_size, num_boxes]
indicating the predicted class for each box.
detection_scores: A float tensor of shape [batch_size, num_boxes] indicating
the score for each box.
num_detections: An integer tensor of shape [batch_size,] indicating the
number of boxes detected for each sample in the batch.
"""
batch_size, num_boxes = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_boxes),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
new_height_width = tf.gather_nd(height_width_predictions, combined_indices)
new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, -1])
new_offsets = tf.gather_nd(offset_predictions, combined_indices)
offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1])
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
height_width = tf.maximum(new_height_width, 0)
heights, widths = tf.unstack(height_width, axis=2)
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
detection_classes = channel_indices
num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1)
boxes = tf.stack([y_indices + y_offsets - heights / 2.0,
x_indices + x_offsets - widths / 2.0,
y_indices + y_offsets + heights / 2.0,
x_indices + x_offsets + widths / 2.0], axis=2)
return boxes, detection_classes, detection_scores, num_detections
def prediction_tensors_to_temporal_offsets(
y_indices, x_indices, offset_predictions):
"""Converts CenterNet temporal offset map predictions to batched format.
This function is similar to the box offset conversion function, as both
temporal offsets and box offsets are size-2 vectors.
Args:
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box's center across adjacent frames.
Returns:
offsets: A tensor of shape [batch_size, num_boxes, 2] holding the
the object temporal offsets of (y, x) dimensions.
"""
batch_size, num_boxes = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_boxes),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
new_offsets = tf.gather_nd(offset_predictions, combined_indices)
offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1])
return offsets
def prediction_tensors_to_keypoint_candidates(
keypoint_heatmap_predictions,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.1,
max_pool_kernel_size=1,
max_candidates=20):
"""Convert keypoint heatmap predictions and offsets to keypoint candidates.
Args:
keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,
width, num_keypoints] representing the per-keypoint heatmaps.
keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,
width, 2] (or [batch_size, height, width, 2 * num_keypoints] if
'per_keypoint_offset' is set True) representing the per-keypoint offsets.
keypoint_score_threshold: float, the threshold for considering a keypoint
a candidate.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood. For example, to make sure no two
neighboring values for the same keypoint are returned, set
max_pool_kernel_size=3. If None or 1, will not apply any local filtering.
max_candidates: integer, maximum number of keypoint candidates per
keypoint type.
Returns:
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] with the number of candidates for each
keypoint type, as it's possible to filter some candidates due to the score
threshold.
"""
batch_size, _, _, num_keypoints = _get_shape(keypoint_heatmap_predictions, 4)
# Get x, y and channel indices corresponding to the top indices in the
# keypoint heatmap predictions.
# Note that the top k candidates are produced for **each keypoint type**.
# Might be worth eventually trying top k in the feature map, independent of
# the keypoint type.
keypoint_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(keypoint_heatmap_predictions,
max_pool_kernel_size=max_pool_kernel_size,
k=max_candidates,
per_channel=True))
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
_, num_indices = _get_shape(y_indices, 2)
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,
combined_indices)
selected_offsets = tf.reshape(selected_offsets_flat,
[batch_size, num_indices, -1])
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
_, _, num_channels = _get_shape(selected_offsets, 3)
if num_channels > 2:
# Offsets are per keypoint and the last dimension of selected_offsets
# contains all those offsets, so reshape the offsets to make sure that the
# last dimension contains (y_offset, x_offset) for a single keypoint.
reshaped_offsets = tf.reshape(selected_offsets,
[batch_size, num_indices, -1, 2])
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that. In this
# case, channel_indices indicates which keypoint to use the offset from.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
_multi_range(num_indices, range_repetitions=batch_size),
tf.reshape(channel_indices, [-1])
], axis=1)
offsets = tf.gather_nd(reshaped_offsets, combined_indices)
offsets = tf.reshape(offsets, [batch_size, num_indices, -1])
else:
offsets = selected_offsets
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
keypoint_candidates = tf.stack([y_indices + y_offsets,
x_indices + x_offsets], axis=2)
keypoint_candidates = tf.reshape(
keypoint_candidates,
[batch_size, num_keypoints, max_candidates, 2])
keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores = tf.reshape(
keypoint_scores,
[batch_size, num_keypoints, max_candidates])
keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])
num_candidates = tf.reduce_sum(
tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1)
return keypoint_candidates, keypoint_scores, num_candidates
def regressed_keypoints_at_object_centers(regressed_keypoint_predictions,
y_indices, x_indices):
"""Returns the regressed keypoints at specified object centers.
The original keypoint predictions are regressed relative to each feature map
location. The returned keypoints are expressed in absolute coordinates in the
output frame (i.e. the center offsets are added to each individual regressed
set of keypoints).
Args:
regressed_keypoint_predictions: A float tensor of shape
[batch_size, height, width, 2 * num_keypoints] holding regressed
keypoints. The last dimension has keypoint coordinates ordered as follows:
[y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where
regressed keypoints are gathered at the provided locations, and converted
to absolute coordinates in the output coordinate frame.
"""
batch_size, num_instances = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_instances),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
relative_regressed_keypoints = tf.gather_nd(regressed_keypoint_predictions,
combined_indices)
relative_regressed_keypoints = tf.reshape(
relative_regressed_keypoints,
[batch_size, num_instances, -1, 2])
relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack(
relative_regressed_keypoints, axis=3)
y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1))
x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1))
absolute_regressed_keypoints = tf.stack(
[y_indices + relative_regressed_keypoints_y,
x_indices + relative_regressed_keypoints_x],
axis=3)
return tf.reshape(absolute_regressed_keypoints,
[batch_size, num_instances, -1])
def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores,
num_keypoint_candidates, bboxes=None,
unmatched_keypoint_score=0.1, box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance'):
"""Refines regressed keypoints by snapping to the nearest candidate keypoints.
The initial regressed keypoints represent a full set of keypoints regressed
from the centers of the objects. The keypoint candidates are estimated
independently from heatmaps, and are not associated with any object instances.
This function refines the regressed keypoints by "snapping" to the
nearest/highest score/highest score-distance ratio (depending on the
candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").
If no candidates are nearby, the regressed keypoint remains unchanged.
In order to snap a regressed keypoint to a candidate keypoint, the following
must be satisfied:
- the candidate keypoint must be of the same type as the regressed keypoint
- the candidate keypoint must not lie outside the predicted boxes (or the
boxes which encloses the regressed keypoints for the instance if `bboxes` is
not provided). Note that the box is scaled by
`regressed_box_scale` in height and width, to provide some margin around the
keypoints
- the distance to the closest candidate keypoint cannot exceed
candidate_search_scale * max(height, width), where height and width refer to
the bounding box for the instance.
Note that the same candidate keypoint is allowed to snap to regressed
keypoints in difference instances.
Args:
regressed_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the initial regressed
keypoints.
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the location of
keypoint candidates in [y, x] format (expressed in absolute coordinates in
the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] indicating the number of valid candidates for
each keypoint type, as there may be padding (dim 1) of
`keypoint_candidates` and `keypoint_scores`.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
unmatched_keypoint_score: float, the default score to use for regressed
keypoints that are not successfully snapped to a nearby candidate.
box_scale: float, the multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints) for
an instance. This scale is typically larger than 1.0 when not providing
`bboxes`.
candidate_search_scale: float, the scale parameter that multiplies the
largest dimension of a bounding box. The resulting distance becomes a
search radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: A string as one of ['min_distance',
'score_distance_ratio'] indicating how to select the candidate. If invalid
value is provided, an ValueError will be raised.
Returns:
A tuple with:
refined_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the final, refined
keypoints.
refined_scores: A float tensor of shape
[batch_size, num_instances, num_keypoints] with scores associated with all
instances and keypoints in `refined_keypoints`.
Raises:
ValueError: if provided candidate_ranking_mode is not one of
['min_distance', 'score_distance_ratio']
"""
batch_size, num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(regressed_keypoints))
max_candidates = keypoint_candidates.shape[1]
# Replace all invalid (i.e. padded) keypoint candidates with NaN.
# This will prevent them from being considered.
range_tiled = tf.tile(
tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]),
[batch_size, 1, num_keypoints])
num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1),
[1, max_candidates, 1])
invalid_candidates = range_tiled >= num_candidates_tiled
nan_mask = tf.where(
invalid_candidates,
np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32),
tf.ones_like(invalid_candidates, dtype=tf.float32))
keypoint_candidates_with_nans = tf.math.multiply(
keypoint_candidates, tf.expand_dims(nan_mask, -1))
# Pairwise squared distances between regressed keypoints and candidate
# keypoints (for a single keypoint type).
# Shape [batch_size, num_instances, 1, num_keypoints, 2].
regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints,
axis=2)
# Shape [batch_size, 1, max_candidates, num_keypoints, 2].
keypoint_candidates_expanded = tf.expand_dims(
keypoint_candidates_with_nans, axis=1)
# Use explicit tensor shape broadcasting (since the tensor dimensions are
# expanded to 5D) to make it tf.lite compatible.
regressed_keypoint_expanded = tf.tile(
regressed_keypoint_expanded, multiples=[1, 1, max_candidates, 1, 1])
keypoint_candidates_expanded = tf.tile(
keypoint_candidates_expanded, multiples=[1, num_instances, 1, 1, 1])
# Replace tf.math.squared_difference by "-" operator and tf.multiply ops since
# tf.lite convert doesn't support squared_difference with undetermined
# dimension.
diff = regressed_keypoint_expanded - keypoint_candidates_expanded
sqrd_distances = tf.math.reduce_sum(tf.multiply(diff, diff), axis=-1)
distances = tf.math.sqrt(sqrd_distances)
# Determine the candidates that have the minimum distance to the regressed
# keypoints. Shape [batch_size, num_instances, num_keypoints].
min_distances = tf.math.reduce_min(distances, axis=2)
if candidate_ranking_mode == 'min_distance':
nearby_candidate_inds = tf.math.argmin(distances, axis=2)
elif candidate_ranking_mode == 'score_distance_ratio':
# tiled_keypoint_scores:
# Shape [batch_size, num_instances, max_candidates, num_keypoints].
tiled_keypoint_scores = tf.tile(
tf.expand_dims(keypoint_scores, axis=1),
multiples=[1, num_instances, 1, 1])
ranking_scores = tiled_keypoint_scores / (distances + 1e-6)
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
else:
raise ValueError('Not recognized candidate_ranking_mode: %s' %
candidate_ranking_mode)
# Gather the coordinates and scores corresponding to the closest candidates.
# Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and
# [batch_size, num_instances, num_keypoints], respectively.
nearby_candidate_coords, nearby_candidate_scores = (
_gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
nearby_candidate_inds))
if bboxes is None:
# Create bboxes from regressed keypoints.
# Shape [batch_size * num_instances, 4].
regressed_keypoints_flattened = tf.reshape(
regressed_keypoints, [-1, num_keypoints, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
regressed_keypoints_flattened)
else:
bboxes_flattened = tf.reshape(bboxes, [-1, 4])
# Scale the bounding boxes.
# Shape [batch_size, num_instances, 4].
boxlist = box_list.BoxList(bboxes_flattened)
boxlist_scaled = box_list_ops.scale_height_width(
boxlist, box_scale, box_scale)
bboxes_scaled = boxlist_scaled.get()
bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])
# Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint.
# Shape [batch_size, num_instances, num_keypoints].
bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])
ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3)
# Produce a mask that indicates whether the original regressed keypoint
# should be used instead of a candidate keypoint.
# Shape [batch_size, num_instances, num_keypoints].
search_radius = (
tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale)
mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) +
# Filter out the chosen candidate with score lower than unmatched
# keypoint score.
tf.cast(nearby_candidate_scores <
unmatched_keypoint_score, tf.int32) +
tf.cast(min_distances > search_radius, tf.int32))
mask = mask > 0
# Create refined keypoints where candidate keypoints replace original
# regressed keypoints if they are in the vicinity of the regressed keypoints.
# Shape [batch_size, num_instances, num_keypoints, 2].
refined_keypoints = tf.where(
tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]),
regressed_keypoints,
nearby_candidate_coords)
# Update keypoints scores. In the case where we use the original regressed
# keypoints, we use a default score of `unmatched_keypoint_score`.
# Shape [batch_size, num_instances, num_keypoints].
refined_scores = tf.where(
mask,
unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores),
nearby_candidate_scores)
return refined_keypoints, refined_scores
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,
num_total_keypoints):
"""Scatter keypoint elements into tensors with full keypoints dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
keypoint_inds: a list of integers that indicate the keypoint indices for
this specific keypoint class. These indices are used to scatter into
tensors that have a `num_total_keypoints` dimension.
num_total_keypoints: The total number of keypoints that this model predicts.
Returns:
A tuple with
keypoint_coords_padded: a
[batch_size, num_instances, num_total_keypoints,2] float32 tensor.
keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]
float32 tensor.
"""
batch_size, num_instances, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_coords_transposed,
shape=[num_total_keypoints, batch_size, num_instances, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_scores_transposed,
shape=[num_total_keypoints, batch_size, num_instances])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
return keypoint_coords_padded, keypoint_scores_padded
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,
max_instances):
"""Scatter keypoint elements into tensors with full instance dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
instance_inds: a list of integers that indicate the instance indices for
these keypoints. These indices are used to scatter into tensors
that have a `max_instances` dimension.
max_instances: The maximum number of instances detected by the model.
Returns:
A tuple with
keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]
float32 tensor.
keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]
float32 tensor.
"""
batch_size, _, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
instance_inds = tf.expand_dims(instance_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_coords_transposed,
shape=[max_instances, batch_size, num_keypoints, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_scores_transposed,
shape=[max_instances, batch_size, num_keypoints])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
return keypoint_coords_padded, keypoint_scores_padded
def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
indices):
"""Gathers keypoint candidate coordinates and scores at indices.
Args:
keypoint_candidates: a float tensor of shape [batch_size, max_candidates,
num_keypoints, 2] with candidate coordinates.
keypoint_scores: a float tensor of shape [batch_size, max_candidates,
num_keypoints] with keypoint scores.
indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]
with indices.
Returns:
A tuple with
gathered_keypoint_candidates: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2] with gathered coordinates.
gathered_keypoint_scores: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2].
"""
batch_size, num_indices, num_keypoints = _get_shape(indices, 3)
# Transpose tensors so that all batch dimensions are up front.
keypoint_candidates_transposed = tf.transpose(keypoint_candidates,
[0, 2, 1, 3])
keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])
nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1])
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(
batch_size,
value_repetitions=num_keypoints * num_indices,
dtype=tf.int64),
_multi_range(
num_keypoints,
value_repetitions=num_indices,
range_repetitions=batch_size,
dtype=tf.int64),
tf.reshape(nearby_candidate_inds_transposed, [-1])
], axis=1)
nearby_candidate_coords_transposed = tf.gather_nd(
keypoint_candidates_transposed, combined_indices)
nearby_candidate_coords_transposed = tf.reshape(
nearby_candidate_coords_transposed,
[batch_size, num_keypoints, num_indices, -1])
nearby_candidate_scores_transposed = tf.gather_nd(keypoint_scores_transposed,
combined_indices)
nearby_candidate_scores_transposed = tf.reshape(
nearby_candidate_scores_transposed,
[batch_size, num_keypoints, num_indices])
gathered_keypoint_candidates = tf.transpose(
nearby_candidate_coords_transposed, [0, 2, 1, 3])
gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed,
[0, 2, 1])
return gathered_keypoint_candidates, gathered_keypoint_scores
def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):
"""Get the index in a flattened array given row and column indices."""
return (row_indices * num_cols) + col_indices
def row_col_channel_indices_from_flattened_indices(indices, num_cols,
num_channels):
"""Computes row, column and channel indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
num_channels: Number of channels in the image.
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
channel_indices. The channel indices corresponding to each of the input
indices.
"""
row_indices = (indices // num_channels) // num_cols
col_indices = (indices // num_channels) % num_cols
channel_indices = indices % num_channels
return row_indices, col_indices, channel_indices
def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,
width):
"""Computes valid anchor weights for an image assuming pixels will be flattened.
This function is useful when we only want to penalize valid areas in the
image in the case when padding is used. The function assumes that the loss
function will be applied after flattening the spatial dimensions and returns
anchor weights accordingly.
Args:
true_image_shapes: An integer tensor of shape [batch_size, 3] representing
the true image shape (without padding) for each sample in the batch.
height: height of the prediction from the network.
width: width of the prediction from the network.
Returns:
valid_anchor_weights: a float tensor of shape [batch_size, height * width]
with 1s in locations where the spatial coordinates fall within the height
and width in true_image_shapes.
"""
indices = tf.reshape(tf.range(height * width), [1, -1])
batch_size = tf.shape(true_image_shapes)[0]
batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices
y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(
batch_indices, width, 1)
max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]
max_x = _to_float32(tf.expand_dims(max_x, 1))
max_y = _to_float32(tf.expand_dims(max_y, 1))
x_coords = _to_float32(x_coords)
y_coords = _to_float32(y_coords)
valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)
return _to_float32(valid_mask)
def convert_strided_predictions_to_normalized_boxes(boxes, stride,
true_image_shapes):
"""Converts predictions in the output space to normalized boxes.
Boxes falling outside the valid image boundary are clipped to be on the
boundary.
Args:
boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw
coordinates of boxes in the model's output space.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
Returns:
boxes: A tensor of shape [batch_size, num_boxes, 4] representing the
coordinates of the normalized boxes.
"""
def _normalize_boxlist(args):
boxes, height, width = args
boxes = box_list_ops.scale(boxes, stride, stride)
boxes = box_list_ops.to_normalized_coordinates(boxes, height, width)
boxes = box_list_ops.clip_to_window(boxes, [0., 0., 1., 1.],
filter_nonoverlapping=False)
return boxes
box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)]
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
true_heights_list = tf.unstack(true_heights, axis=0)
true_widths_list = tf.unstack(true_widths, axis=0)
box_lists = list(map(_normalize_boxlist,
zip(box_lists, true_heights_list, true_widths_list)))
boxes = tf.stack([box_list_instance.get() for
box_list_instance in box_lists], axis=0)
return boxes
def convert_strided_predictions_to_normalized_keypoints(
keypoint_coords, keypoint_scores, stride, true_image_shapes,
clip_out_of_frame_keypoints=False):
"""Converts predictions in the output space to normalized keypoints.
If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside
the valid image boundary are normalized but not clipped; If
clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the
valid image boundary are clipped to the closest image boundary and the scores
will be set to 0.0.
Args:
keypoint_coords: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] holding the raw coordinates
of keypoints in the model's output space.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] holding the keypoint scores.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside
the image boundary should be clipped. If True, keypoint coords will be
clipped to image boundary. If False, keypoints are normalized but not
filtered based on their location.
Returns:
keypoint_coords_normalized: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] representing the coordinates
of the normalized keypoints.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] representing the updated
keypoint scores.
"""
# Flatten keypoints and scores.
batch_size, _, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
# Scale and normalize keypoints.
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
yscale = float(stride) / tf.cast(true_heights, tf.float32)
xscale = float(stride) / tf.cast(true_widths, tf.float32)
yx_scale = tf.stack([yscale, xscale], axis=1)
keypoint_coords_normalized = keypoint_coords * tf.reshape(
yx_scale, [batch_size, 1, 1, 2])
if clip_out_of_frame_keypoints:
# Determine the keypoints that are in the true image regions.
valid_indices = tf.logical_and(
tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0,
keypoint_coords_normalized[:, :, :, 0] <= 1.0),
tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0,
keypoint_coords_normalized[:, :, :, 1] <= 1.0))
batch_window = tf.tile(
tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32),
multiples=[batch_size, 1])
def clip_to_window(inputs):
keypoints, window = inputs
return keypoint_ops.clip_to_window(keypoints, window)
# Specify the TensorSpec explicitly in the tf.map_fn to make it tf.lite
# compatible.
kpts_dims = _get_shape(keypoint_coords_normalized, 4)
output_spec = tf.TensorSpec(
shape=[kpts_dims[1], kpts_dims[2], kpts_dims[3]], dtype=tf.float32)
keypoint_coords_normalized = tf.map_fn(
clip_to_window, (keypoint_coords_normalized, batch_window),
dtype=tf.float32, back_prop=False,
fn_output_signature=output_spec)
keypoint_scores = tf.where(valid_indices, keypoint_scores,
tf.zeros_like(keypoint_scores))
return keypoint_coords_normalized, keypoint_scores
def convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap=None, densepose_surface_coords=None, stride=4,
mask_height=256, mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Converts predicted full-image masks into instance masks.
For each predicted detection box:
* Crop and resize the predicted mask (and optionally DensePose coordinates)
based on the detected bounding box coordinates and class prediction. Uses
bilinear resampling.
* Binarize the mask using the provided score threshold.
Args:
boxes: A tensor of shape [batch, max_detections, 4] holding the predicted
boxes, in normalized coordinates (relative to the true image dimensions).
classes: An integer tensor of shape [batch, max_detections] containing the
detected class for each box (0-indexed).
masks: A [batch, output_height, output_width, num_classes] float32
tensor with class probabilities.
true_image_shapes: A tensor of shape [batch, 3] representing the true
shape of the inputs not considering padding.
densepose_part_heatmap: (Optional) A [batch, output_height, output_width,
num_parts] float32 tensor with part scores (i.e. logits).
densepose_surface_coords: (Optional) A [batch, output_height, output_width,
2 * num_parts] float32 tensor with predicted part coordinates (in
vu-format).
stride: The stride in the output space.
mask_height: The desired resized height for instance masks.
mask_width: The desired resized width for instance masks.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: The class index (0-indexed) corresponding to the
class which has DensePose labels (e.g. person class).
Returns:
A tuple of masks and surface_coords.
instance_masks: A [batch_size, max_detections, mask_height, mask_width]
uint8 tensor with predicted foreground mask for each
instance. If DensePose tensors are provided, then each pixel value in the
mask encodes the 1-indexed part.
surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. Note that v, u coordinates are
only defined on instance masks, and the coordinates at each location of
the foreground mask correspond to coordinates on a local part coordinate
system (the specific part can be inferred from the `instance_masks`
output. If DensePose feature maps are not passed to this function, this
output will be None.
Raises:
ValueError: If one but not both of `densepose_part_heatmap` and
`densepose_surface_coords` is provided.
"""
batch_size, output_height, output_width, _ = (
shape_utils.combined_static_and_dynamic_shape(masks))
input_height = stride * output_height
input_width = stride * output_width
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
# If necessary, create dummy DensePose tensors to simplify the map function.
densepose_present = True
if ((densepose_part_heatmap is not None) ^
(densepose_surface_coords is not None)):
raise ValueError('To use DensePose, both `densepose_part_heatmap` and '
'`densepose_surface_coords` must be provided')
if densepose_part_heatmap is None and densepose_surface_coords is None:
densepose_present = False
densepose_part_heatmap = tf.zeros(
(batch_size, output_height, output_width, 1), dtype=tf.float32)
densepose_surface_coords = tf.zeros(
(batch_size, output_height, output_width, 2), dtype=tf.float32)
crop_and_threshold_fn = functools.partial(
crop_and_threshold_masks, input_height=input_height,
input_width=input_width, mask_height=mask_height, mask_width=mask_width,
score_threshold=score_threshold,
densepose_class_index=densepose_class_index)
instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn(
crop_and_threshold_fn,
elems=[boxes, classes, masks, densepose_part_heatmap,
densepose_surface_coords, true_heights, true_widths],
dtype=[tf.uint8, tf.float32],
back_prop=False)
surface_coords = surface_coords if densepose_present else None
return instance_masks, surface_coords
def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256,
mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Crops and thresholds masks based on detection boxes.
Args:
elems: A tuple of
boxes - float32 tensor of shape [max_detections, 4]
classes - int32 tensor of shape [max_detections] (0-indexed)
masks - float32 tensor of shape [output_height, output_width, num_classes]
part_heatmap - float32 tensor of shape [output_height, output_width,
num_parts]
surf_coords - float32 tensor of shape [output_height, output_width,
2 * num_parts]
true_height - scalar int tensor
true_width - scalar int tensor
input_height: Input height to network.
input_width: Input width to network.
mask_height: Height for resizing mask crops.
mask_width: Width for resizing mask crops.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: scalar int tensor with the class index (0-indexed)
for DensePose.
Returns:
A tuple of
all_instances: A [max_detections, mask_height, mask_width] uint8 tensor
with a predicted foreground mask for each instance. Background is encoded
as 0, and foreground is encoded as a positive integer. Specific part
indices are encoded as 1-indexed parts (for classes that have part
information).
surface_coords: A [max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. for each part.
"""
(boxes, classes, masks, part_heatmap, surf_coords, true_height,
true_width) = elems
# Boxes are in normalized coordinates relative to true image shapes. Convert
# coordinates to be normalized relative to input image shapes (since masks
# may still have padding).
boxlist = box_list.BoxList(boxes)
y_scale = true_height / input_height
x_scale = true_width / input_width
boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)
boxes = boxlist.get()
# Convert masks from [output_height, output_width, num_classes] to
# [num_classes, output_height, output_width, 1].
num_classes = tf.shape(masks)[-1]
masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]
# Tile part and surface coordinate masks for all classes.
part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d],
axis=-1)
# The following tensor has shape
# [max_detections, mask_height, mask_width, 1 + 3 * num_parts].
cropped_masks = tf2.image.crop_and_resize(
feature_maps_concat,
boxes=boxes,
box_indices=classes,
crop_size=[mask_height, mask_width],
method='bilinear')
# Split the cropped masks back into instance masks, part masks, and surface
# coordinates.
num_parts = tf.shape(part_heatmap)[-1]
instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split(
cropped_masks, [1, num_parts, 2 * num_parts], axis=-1)
# Threshold the instance masks. Resulting tensor has shape
# [max_detections, mask_height, mask_width, 1].
instance_masks_int = tf.cast(
tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)
# Produce a binary mask that is 1.0 only:
# - in the foreground region for an instance
# - in detections corresponding to the DensePose class
det_with_parts = tf.equal(classes, densepose_class_index)
det_with_parts = tf.cast(
tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32)
instance_masks_with_parts = tf.math.multiply(instance_masks_int,
det_with_parts)
# Similarly, produce a binary mask that holds the foreground masks only for
# instances without parts (i.e. non-DensePose classes).
det_without_parts = 1 - det_with_parts
instance_masks_without_parts = tf.math.multiply(instance_masks_int,
det_without_parts)
# Assemble a tensor that has standard instance segmentation masks for
# non-DensePose classes (with values in [0, 1]), and part segmentation masks
# for DensePose classes (with vaues in [0, 1, ..., num_parts]).
part_mask_int_zero_indexed = tf.math.argmax(
part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis]
part_mask_int_one_indexed = part_mask_int_zero_indexed + 1
all_instances = (instance_masks_without_parts +
instance_masks_with_parts * part_mask_int_one_indexed)
# Gather the surface coordinates for the parts.
surface_coords_cropped = tf.reshape(
surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2])
surface_coords = gather_surface_coords_for_parts(surface_coords_cropped,
part_mask_int_zero_indexed)
surface_coords = (
surface_coords * tf.cast(instance_masks_with_parts, tf.float32))
return [tf.squeeze(all_instances, axis=3), surface_coords]
def gather_surface_coords_for_parts(surface_coords_cropped,
highest_scoring_part):
"""Gathers the (v, u) coordinates for the highest scoring DensePose parts.
Args:
surface_coords_cropped: A [max_detections, height, width, num_parts, 2]
float32 tensor with (v, u) surface coordinates.
highest_scoring_part: A [max_detections, height, width] integer tensor with
the highest scoring part (0-indexed) indices for each location.
Returns:
A [max_detections, height, width, 2] float32 tensor with the (v, u)
coordinates selected from the highest scoring parts.
"""
max_detections, height, width, num_parts, _ = (
shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped))
flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2])
flattened_part_ids = tf.reshape(highest_scoring_part, [-1])
# Produce lookup indices that represent the locations of the highest scoring
# parts in the `flattened_surface_coords` tensor.
flattened_lookup_indices = (
num_parts * tf.range(max_detections * height * width) +
flattened_part_ids)
vu_coords_flattened = tf.gather(flattened_surface_coords,
flattened_lookup_indices, axis=0)
return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])
def predicted_embeddings_at_object_centers(embedding_predictions,
y_indices, x_indices):
"""Returns the predicted embeddings at specified object centers.
Args:
embedding_predictions: A float tensor of shape [batch_size, height, width,
reid_embed_size] holding predicted embeddings.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, reid_embed_size] where
predicted embeddings are gathered at the provided locations.
"""
batch_size, _, width, _ = _get_shape(embedding_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
_, num_instances = _get_shape(flattened_indices, 2)
embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)
embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)
embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1])
return embeddings
class ObjectDetectionParams(
collections.namedtuple('ObjectDetectionParams', [
'localization_loss', 'scale_loss_weight', 'offset_loss_weight',
'task_loss_weight'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the object detection task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
localization_loss,
scale_loss_weight,
offset_loss_weight,
task_loss_weight=1.0):
"""Constructor with default values for ObjectDetectionParams.
Args:
localization_loss: a object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
scale_loss_weight: float, The weight for localizing box size. Note that
the scale loss is dependent on the input image size, since we penalize
the raw height and width. This constant may need to be adjusted
depending on the input size.
offset_loss_weight: float, The weight for localizing center offsets.
task_loss_weight: float, the weight of the object detection loss.
Returns:
An initialized ObjectDetectionParams namedtuple.
"""
return super(ObjectDetectionParams,
cls).__new__(cls, localization_loss, scale_loss_weight,
offset_loss_weight, task_loss_weight)
class KeypointEstimationParams(
collections.namedtuple('KeypointEstimationParams', [
'task_name', 'class_id', 'keypoint_indices', 'classification_loss',
'localization_loss', 'keypoint_labels', 'keypoint_std_dev',
'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight',
'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold',
'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight',
'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale',
'candidate_search_scale', 'candidate_ranking_mode',
'offset_peak_radius', 'per_keypoint_offset'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the keypoint estimation task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
task_name,
class_id,
keypoint_indices,
classification_loss,
localization_loss,
keypoint_labels=None,
keypoint_std_dev=None,
keypoint_heatmap_loss_weight=1.0,
keypoint_offset_loss_weight=1.0,
keypoint_regression_loss_weight=1.0,
keypoint_candidate_score_threshold=0.1,
heatmap_bias_init=-2.19,
num_candidates_per_keypoint=100,
task_loss_weight=1.0,
peak_max_pool_kernel_size=3,
unmatched_keypoint_score=0.1,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance',
offset_peak_radius=0,
per_keypoint_offset=False):
"""Constructor with default values for KeypointEstimationParams.
Args:
task_name: string, the name of the task this namedtuple corresponds to.
Note that it should be an unique identifier of the task.
class_id: int, the ID of the class that contains the target keypoints to
considered in this task. For example, if the task is human pose
estimation, the class id should correspond to the "human" class. Note
that the ID is 0-based, meaning that class 0 corresponds to the first
non-background object class.
keypoint_indices: A list of integers representing the indicies of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
keypoint_labels: A list of strings representing the label text of each
keypoint, e.g. "nose", 'left_shoulder". Note that the length of this
list should be equal to keypoint_indices.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap. It is to provide
the flexibility of using different sizes of Gaussian kernel for each
keypoint class.
keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.
keypoint_offset_loss_weight: float, The weight for the keypoint offsets
loss.
keypoint_regression_loss_weight: float, The weight for keypoint regression
loss. Note that the loss is dependent on the input image size, since we
penalize the raw height and width. This constant may need to be adjusted
depending on the input size.
keypoint_candidate_score_threshold: float, The heatmap score threshold for
a keypoint to become a valid candidate.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the class prediction head. If set to None, the bias is
initialized with zeros.
num_candidates_per_keypoint: The maximum number of candidates to retrieve
for each keypoint.
task_loss_weight: float, the weight of the keypoint estimation loss.
peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak
score locations in a neighborhood (independently for each keypoint
types).
unmatched_keypoint_score: The default score to use for regressed keypoints
that are not successfully snapped to a nearby candidate.
box_scale: The multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints).
candidate_search_scale: The scale parameter that multiplies the largest
dimension of a bounding box. The resulting distance becomes a search
radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio']
indicating how to select the keypoint candidate.
offset_peak_radius: The radius (in the unit of output pixel) around
groundtruth heatmap peak to assign the offset targets. If set 0, then
the offset target will only be assigned to the heatmap peak (same
behavior as the original paper).
per_keypoint_offset: A bool indicates whether to assign offsets for each
keypoint channel separately. If set False, the output offset target has
the shape [batch_size, out_height, out_width, 2] (same behavior as the
original paper). If set True, the output offset target has the shape
[batch_size, out_height, out_width, 2 * num_keypoints] (recommended when
the offset_peak_radius is not zero).
Returns:
An initialized KeypointEstimationParams namedtuple.
"""
return super(KeypointEstimationParams, cls).__new__(
cls, task_name, class_id, keypoint_indices, classification_loss,
localization_loss, keypoint_labels, keypoint_std_dev,
keypoint_heatmap_loss_weight, keypoint_offset_loss_weight,
keypoint_regression_loss_weight, keypoint_candidate_score_threshold,
heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight,
peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale,
candidate_search_scale, candidate_ranking_mode, offset_peak_radius,
per_keypoint_offset)
class ObjectCenterParams(
collections.namedtuple('ObjectCenterParams', [
'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init',
'min_box_overlap_iou', 'max_box_predictions', 'use_only_known_classes'
])):
"""Namedtuple to store object center prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
object_center_loss_weight,
heatmap_bias_init=-2.19,
min_box_overlap_iou=0.7,
max_box_predictions=100,
use_labeled_classes=False):
"""Constructor with default values for ObjectCenterParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
object_center_loss_weight: float, The weight for the object center loss.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the object center prediction head. If set to None, the bias is
initialized with zeros.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
max_box_predictions: int, the maximum number of boxes to predict.
use_labeled_classes: boolean, compute the loss only labeled classes.
Returns:
An initialized ObjectCenterParams namedtuple.
"""
return super(ObjectCenterParams,
cls).__new__(cls, classification_loss,
object_center_loss_weight, heatmap_bias_init,
min_box_overlap_iou, max_box_predictions,
use_labeled_classes)
class MaskParams(
collections.namedtuple('MaskParams', [
'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width',
'score_threshold', 'heatmap_bias_init'
])):
"""Namedtuple to store mask prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
task_loss_weight=1.0,
mask_height=256,
mask_width=256,
score_threshold=0.5,
heatmap_bias_init=-2.19):
"""Constructor with default values for MaskParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the semantic segmentation predictions in CenterNet.
task_loss_weight: float, The loss weight for the segmentation task.
mask_height: The height of the resized instance segmentation mask.
mask_width: The width of the resized instance segmentation mask.
score_threshold: The threshold at which to convert predicted mask
probabilities (after passing through sigmoid) into foreground pixels.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the semantic segmentation prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized MaskParams namedtuple.
"""
return super(MaskParams,
cls).__new__(cls, classification_loss,
task_loss_weight, mask_height, mask_width,
score_threshold, heatmap_bias_init)
class DensePoseParams(
collections.namedtuple('DensePoseParams', [
'class_id', 'classification_loss', 'localization_loss',
'part_loss_weight', 'coordinate_loss_weight', 'num_parts',
'task_loss_weight', 'upsample_to_input_res', 'upsample_method',
'heatmap_bias_init'
])):
"""Namedtuple to store DensePose prediction related parameters."""
__slots__ = ()
def __new__(cls,
class_id,
classification_loss,
localization_loss,
part_loss_weight=1.0,
coordinate_loss_weight=1.0,
num_parts=24,
task_loss_weight=1.0,
upsample_to_input_res=True,
upsample_method='bilinear',
heatmap_bias_init=-2.19):
"""Constructor with default values for DensePoseParams.
Args:
class_id: the ID of the class that contains the DensePose groundtruth.
This should typically correspond to the "person" class. Note that the ID
is 0-based, meaning that class 0 corresponds to the first non-background
object class.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the body part predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the surface coordinate regression in CenterNet.
part_loss_weight: The loss weight to apply to part prediction.
coordinate_loss_weight: The loss weight to apply to surface coordinate
prediction.
num_parts: The number of DensePose parts to predict.
task_loss_weight: float, the loss weight for the DensePose task.
upsample_to_input_res: Whether to upsample the DensePose feature maps to
the input resolution before applying loss. Note that the prediction
outputs are still at the standard CenterNet output stride.
upsample_method: Method for upsampling DensePose feature maps. Options are
either 'bilinear' or 'nearest'). This takes no effect when
`upsample_to_input_res` is False.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the part prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized DensePoseParams namedtuple.
"""
return super(DensePoseParams,
cls).__new__(cls, class_id, classification_loss,
localization_loss, part_loss_weight,
coordinate_loss_weight, num_parts,
task_loss_weight, upsample_to_input_res,
upsample_method, heatmap_bias_init)
class TrackParams(
collections.namedtuple('TrackParams', [
'num_track_ids', 'reid_embed_size', 'num_fc_layers',
'classification_loss', 'task_loss_weight'
])):
"""Namedtuple to store tracking prediction related parameters."""
__slots__ = ()
def __new__(cls,
num_track_ids,
reid_embed_size,
num_fc_layers,
classification_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
num_track_ids: int. The maximum track ID in the dataset. Used for ReID
embedding classification task.
reid_embed_size: int. The embedding size for ReID task.
num_fc_layers: int. The number of (fully-connected, batch-norm, relu)
layers for track ID classification head.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the ReID embedding in CenterNet.
task_loss_weight: float, the loss weight for the tracking task.
Returns:
An initialized TrackParams namedtuple.
"""
return super(TrackParams,
cls).__new__(cls, num_track_ids, reid_embed_size,
num_fc_layers, classification_loss,
task_loss_weight)
class TemporalOffsetParams(
collections.namedtuple('TemporalOffsetParams', [
'localization_loss', 'task_loss_weight'
])):
"""Namedtuple to store temporal offset related parameters."""
__slots__ = ()
def __new__(cls,
localization_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
localization_loss: an object_detection.core.losses.Loss object to
compute the loss for the temporal offset in CenterNet.
task_loss_weight: float, the loss weight for the temporal offset
task.
Returns:
An initialized TemporalOffsetParams namedtuple.
"""
return super(TemporalOffsetParams,
cls).__new__(cls, localization_loss, task_loss_weight)
# The following constants are used to generate the keys of the
# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch
# class.
DETECTION_TASK = 'detection_task'
OBJECT_CENTER = 'object_center'
BOX_SCALE = 'box/scale'
BOX_OFFSET = 'box/offset'
KEYPOINT_REGRESSION = 'keypoint/regression'
KEYPOINT_HEATMAP = 'keypoint/heatmap'
KEYPOINT_OFFSET = 'keypoint/offset'
SEGMENTATION_TASK = 'segmentation_task'
SEGMENTATION_HEATMAP = 'segmentation/heatmap'
DENSEPOSE_TASK = 'densepose_task'
DENSEPOSE_HEATMAP = 'densepose/heatmap'
DENSEPOSE_REGRESSION = 'densepose/regression'
LOSS_KEY_PREFIX = 'Loss'
TRACK_TASK = 'track_task'
TRACK_REID = 'track/reid'
TEMPORALOFFSET_TASK = 'temporal_offset_task'
TEMPORAL_OFFSET = 'track/offset'
def get_keypoint_name(task_name, head_name):
return '%s/%s' % (task_name, head_name)
def get_num_instances_from_weights(groundtruth_weights_list):
"""Computes the number of instances/boxes from the weights in a batch.
Args:
groundtruth_weights_list: A list of float tensors with shape
[max_num_instances] representing whether there is an actual instance in
the image (with non-zero value) or is padded to match the
max_num_instances (with value 0.0). The list represents the batch
dimension.
Returns:
A scalar integer tensor incidating how many instances/boxes are in the
images in the batch. Note that this function is usually used to normalize
the loss so the minimum return value is 1 to avoid weird behavior.
"""
num_instances = tf.reduce_sum(
[tf.math.count_nonzero(w) for w in groundtruth_weights_list])
num_instances = tf.maximum(num_instances, 1)
return num_instances
class CenterNetMetaArch(model.DetectionModel):
"""The CenterNet meta architecture [1].
[1]: https://arxiv.org/abs/1904.07850
"""
def __init__(self,
is_training,
add_summaries,
num_classes,
feature_extractor,
image_resizer_fn,
object_center_params,
object_detection_params=None,
keypoint_params_dict=None,
mask_params=None,
densepose_params=None,
track_params=None,
temporal_offset_params=None,
use_depthwise=False):
"""Initializes a CenterNet model.
Args:
is_training: Set to True if this model is being built for training.
add_summaries: Whether to add tf summaries in the model.
num_classes: int, The number of classes that the model should predict.
feature_extractor: A CenterNetFeatureExtractor to use to extract features
from an image.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within the
resized image tensor as the resized image tensor could be padded. See
builders/image_resizer_builder.py.
object_center_params: An ObjectCenterParams namedtuple. This object holds
the hyper-parameters for object center prediction. This is required by
either object detection or keypoint estimation tasks.
object_detection_params: An ObjectDetectionParams namedtuple. This object
holds the hyper-parameters necessary for object detection. Please see
the class definition for more details.
keypoint_params_dict: A dictionary that maps from task name to the
corresponding KeypointEstimationParams namedtuple. This object holds the
hyper-parameters necessary for multiple keypoint estimations. Please
see the class definition for more details.
mask_params: A MaskParams namedtuple. This object
holds the hyper-parameters for segmentation. Please see the class
definition for more details.
densepose_params: A DensePoseParams namedtuple. This object holds the
hyper-parameters for DensePose prediction. Please see the class
definition for more details. Note that if this is provided, it is
expected that `mask_params` is also provided.
track_params: A TrackParams namedtuple. This object
holds the hyper-parameters for tracking. Please see the class
definition for more details.
temporal_offset_params: A TemporalOffsetParams namedtuple. This object
holds the hyper-parameters for offset prediction based tracking.
use_depthwise: If true, all task heads will be constructed using
separable_conv. Otherwise, standard convoltuions will be used.
"""
assert object_detection_params or keypoint_params_dict
# Shorten the name for convenience and better formatting.
self._is_training = is_training
# The Objects as Points paper attaches loss functions to multiple
# (`num_feature_outputs`) feature maps in the the backbone. E.g.
# for the hourglass backbone, `num_feature_outputs` is 2.
self._feature_extractor = feature_extractor
self._num_feature_outputs = feature_extractor.num_feature_outputs
self._stride = self._feature_extractor.out_stride
self._image_resizer_fn = image_resizer_fn
self._center_params = object_center_params
self._od_params = object_detection_params
self._kp_params_dict = keypoint_params_dict
self._mask_params = mask_params
if densepose_params is not None and mask_params is None:
raise ValueError('To run DensePose prediction, `mask_params` must also '
'be supplied.')
self._densepose_params = densepose_params
self._track_params = track_params
self._temporal_offset_params = temporal_offset_params
self._use_depthwise = use_depthwise
# Construct the prediction head nets.
self._prediction_head_dict = self._construct_prediction_heads(
num_classes,
self._num_feature_outputs,
class_prediction_bias_init=self._center_params.heatmap_bias_init)
# Initialize the target assigners.
self._target_assigner_dict = self._initialize_target_assigners(
stride=self._stride,
min_box_overlap_iou=self._center_params.min_box_overlap_iou)
# Will be used in VOD single_frame_meta_arch for tensor reshape.
self._batched_prediction_tensor_names = []
super(CenterNetMetaArch, self).__init__(num_classes)
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
def _construct_prediction_heads(self, num_classes, num_feature_outputs,
class_prediction_bias_init):
"""Constructs the prediction heads based on the specific parameters.
Args:
num_classes: An integer indicating how many classes in total to predict.
num_feature_outputs: An integer indicating how many feature outputs to use
for calculating the loss. The Objects as Points paper attaches loss
functions to multiple (`num_feature_outputs`) feature maps in the the
backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.
class_prediction_bias_init: float, the initial value of bias in the
convolutional kernel of the class prediction head. If set to None, the
bias is initialized with zeros.
Returns:
A dictionary of keras modules generated by calling make_prediction_net
function. It will also create and set a private member of the class when
learning the tracking task.
"""
prediction_heads = {}
prediction_heads[OBJECT_CENTER] = [
make_prediction_net(num_classes, bias_fill=class_prediction_bias_init,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
if self._od_params is not None:
prediction_heads[BOX_SCALE] = [
make_prediction_net(
NUM_SIZE_CHANNELS, use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
prediction_heads[BOX_OFFSET] = [
make_prediction_net(
NUM_OFFSET_CHANNELS, use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
num_keypoints = len(kp_params.keypoint_indices)
# pylint: disable=g-complex-comprehension
prediction_heads[get_keypoint_name(task_name, KEYPOINT_HEATMAP)] = [
make_prediction_net(
num_keypoints,
bias_fill=kp_params.heatmap_bias_init,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
# pylint: enable=g-complex-comprehension
prediction_heads[get_keypoint_name(task_name, KEYPOINT_REGRESSION)] = [
make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
if kp_params.per_keypoint_offset:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [
make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
else:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [
make_prediction_net(NUM_OFFSET_CHANNELS,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
# pylint: disable=g-complex-comprehension
if self._mask_params is not None:
prediction_heads[SEGMENTATION_HEATMAP] = [
make_prediction_net(
num_classes,
bias_fill=self._mask_params.heatmap_bias_init,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)]
if self._densepose_params is not None:
prediction_heads[DENSEPOSE_HEATMAP] = [
make_prediction_net(
self._densepose_params.num_parts,
bias_fill=self._densepose_params.heatmap_bias_init,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)]
prediction_heads[DENSEPOSE_REGRESSION] = [
make_prediction_net(2 * self._densepose_params.num_parts,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
# pylint: enable=g-complex-comprehension
if self._track_params is not None:
prediction_heads[TRACK_REID] = [
make_prediction_net(self._track_params.reid_embed_size,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)]
# Creates a classification network to train object embeddings by learning
# a projection from embedding space to object track ID space.
self.track_reid_classification_net = tf.keras.Sequential()
for _ in range(self._track_params.num_fc_layers - 1):
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.reid_embed_size,
input_shape=(
self._track_params.reid_embed_size,)))
self.track_reid_classification_net.add(
tf.keras.layers.BatchNormalization())
self.track_reid_classification_net.add(tf.keras.layers.ReLU())
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.num_track_ids,
input_shape=(
self._track_params.reid_embed_size,)))
if self._temporal_offset_params is not None:
prediction_heads[TEMPORAL_OFFSET] = [
make_prediction_net(NUM_OFFSET_CHANNELS,
use_depthwise=self._use_depthwise)
for _ in range(num_feature_outputs)
]
return prediction_heads
def _initialize_target_assigners(self, stride, min_box_overlap_iou):
"""Initializes the target assigners and puts them in a dictionary.
Args:
stride: An integer indicating the stride of the image.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
Returns:
A dictionary of initialized target assigners for each task.
"""
target_assigners = {}
target_assigners[OBJECT_CENTER] = (
cn_assigner.CenterNetCenterHeatmapTargetAssigner(
stride, min_box_overlap_iou))
if self._od_params is not None:
target_assigners[DETECTION_TASK] = (
cn_assigner.CenterNetBoxTargetAssigner(stride))
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
target_assigners[task_name] = (
cn_assigner.CenterNetKeypointTargetAssigner(
stride=stride,
class_id=kp_params.class_id,
keypoint_indices=kp_params.keypoint_indices,
keypoint_std_dev=kp_params.keypoint_std_dev,
peak_radius=kp_params.offset_peak_radius,
per_keypoint_offset=kp_params.per_keypoint_offset))
if self._mask_params is not None:
target_assigners[SEGMENTATION_TASK] = (
cn_assigner.CenterNetMaskTargetAssigner(stride))
if self._densepose_params is not None:
dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride
target_assigners[DENSEPOSE_TASK] = (
cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride))
if self._track_params is not None:
target_assigners[TRACK_TASK] = (
cn_assigner.CenterNetTrackTargetAssigner(
stride, self._track_params.num_track_ids))
if self._temporal_offset_params is not None:
target_assigners[TEMPORALOFFSET_TASK] = (
cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride))
return target_assigners
def _compute_object_center_loss(self, input_height, input_width,
object_center_predictions, per_pixel_weights):
"""Computes the object center loss.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_center_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_classes] representing the object center
feature maps.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the object center loss per instance.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
if self._center_params.use_only_known_classes:
gt_labeled_classes_list = self.groundtruth_lists(
fields.InputDataFields.groundtruth_labeled_classes)
batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)
batch_labeled_classes_shape = tf.shape(batch_labeled_classes)
batch_labeled_classes = tf.reshape(
batch_labeled_classes,
[batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]])
per_pixel_weights = per_pixel_weights * batch_labeled_classes
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[OBJECT_CENTER]
heatmap_targets = assigner.assign_center_targets_from_boxes(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
loss = 0.0
object_center_loss = self._center_params.classification_loss
# Loop through each feature output head.
for pred in object_center_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += object_center_loss(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_center_predictions)) * num_boxes)
return loss_per_instance
def _compute_object_detection_losses(self, input_height, input_width,
prediction_dict, per_pixel_weights):
"""Computes the weighted object detection losses.
This wrapper function calls the function which computes the losses for
object detection task and applies corresponding weights to the losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by
"predict" function. See "predict" function for more detailed
description.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
object detection task:
BOX_SCALE: the weighted scale (height/width) loss.
BOX_OFFSET: the weighted object offset loss.
"""
od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss(
scale_predictions=prediction_dict[BOX_SCALE],
offset_predictions=prediction_dict[BOX_OFFSET],
input_height=input_height,
input_width=input_width)
loss_dict = {}
loss_dict[BOX_SCALE] = (
self._od_params.scale_loss_weight * od_scale_loss)
loss_dict[BOX_OFFSET] = (
self._od_params.offset_loss_weight * od_offset_loss)
return loss_dict
def _compute_box_scale_and_offset_loss(self, input_height, input_width,
scale_predictions, offset_predictions):
"""Computes the scale loss of the object detection task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
scale_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object scale (i.e height and width).
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object offset.
Returns:
A tuple of two losses:
scale_loss: A float scalar tensor representing the object height/width
loss normalized by total number of boxes.
offset_loss: A float scalar tensor representing the object offset loss
normalized by total number of boxes
"""
# TODO(vighneshb) Explore a size invariant version of scale loss.
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
num_predictions = float(len(scale_predictions))
assigner = self._target_assigner_dict[DETECTION_TASK]
(batch_indices, batch_height_width_targets, batch_offset_targets,
batch_weights) = assigner.assign_size_and_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
scale_loss = 0
offset_loss = 0
localization_loss_fn = self._od_params.localization_loss
for scale_pred, offset_pred in zip(scale_predictions, offset_predictions):
# Compute the scale loss.
scale_pred = cn_assigner.get_batch_predictions_from_indices(
scale_pred, batch_indices)
scale_loss += localization_loss_fn(
scale_pred, batch_height_width_targets, weights=batch_weights)
# Compute the offset loss.
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += localization_loss_fn(
offset_pred, batch_offset_targets, weights=batch_weights)
scale_loss = tf.reduce_sum(scale_loss) / (
num_predictions * num_boxes)
offset_loss = tf.reduce_sum(offset_loss) / (
num_predictions * num_boxes)
return scale_loss, offset_loss
def _compute_keypoint_estimation_losses(self, task_name, input_height,
input_width, prediction_dict,
per_pixel_weights):
"""Computes the weighted keypoint losses."""
kp_params = self._kp_params_dict[task_name]
heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)
offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)
regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)
heatmap_loss = self._compute_kp_heatmap_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
heatmap_predictions=prediction_dict[heatmap_key],
classification_loss_fn=kp_params.classification_loss,
per_pixel_weights=per_pixel_weights)
offset_loss = self._compute_kp_offset_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
offset_predictions=prediction_dict[offset_key],
localization_loss_fn=kp_params.localization_loss)
reg_loss = self._compute_kp_regression_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
regression_predictions=prediction_dict[regression_key],
localization_loss_fn=kp_params.localization_loss)
loss_dict = {}
loss_dict[heatmap_key] = (
kp_params.keypoint_heatmap_loss_weight * heatmap_loss)
loss_dict[offset_key] = (
kp_params.keypoint_offset_loss_weight * offset_loss)
loss_dict[regression_key] = (
kp_params.keypoint_regression_loss_weight * reg_loss)
return loss_dict
def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,
heatmap_predictions, classification_loss_fn,
per_pixel_weights):
"""Computes the heatmap loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
heatmap_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_keypoints] representing the prediction heads
of the model for keypoint heatmap.
classification_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
loss: A float scalar tensor representing the object keypoint heatmap loss
normalized by number of instances.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
assigner = self._target_assigner_dict[task_name]
(keypoint_heatmap, num_instances_per_kp_type,
valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
flattened_valid_mask = _flatten_spatial_dimensions(
tf.expand_dims(valid_mask_batch, axis=-1))
flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)
# Sum over the number of instances per keypoint types to get the total
# number of keypoints. Note that this is used to normalized the loss and we
# keep the minimum value to be 1 to avoid generating weird loss value when
# no keypoint is in the image batch.
num_instances = tf.maximum(
tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),
1.0)
loss = 0.0
# Loop through each feature output head.
for pred in heatmap_predictions:
pred = _flatten_spatial_dimensions(pred)
unweighted_loss = classification_loss_fn(
pred,
flattened_heapmap_targets,
weights=tf.ones_like(per_pixel_weights))
# Apply the weights after the loss function to have full control over it.
loss += unweighted_loss * per_pixel_weights * flattened_valid_mask
loss = tf.reduce_sum(loss) / (
float(len(heatmap_predictions)) * num_instances)
return loss
def _compute_kp_offset_loss(self, input_height, input_width, task_name,
offset_predictions, localization_loss_fn):
"""Computes the offset loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for keypoint offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint offset loss
normalized by number of total keypoints.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_offsets,
batch_weights) = assigner.assign_keypoints_offset_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list)
# Keypoint offset loss.
loss = 0.0
for prediction in offset_predictions:
batch_size, out_height, out_width, channels = _get_shape(prediction, 4)
if channels > 2:
prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
prediction = cn_assigner.get_batch_predictions_from_indices(
prediction, batch_indices)
# The dimensions passed are not as per the doc string but the loss
# still computes the correct value.
unweighted_loss = localization_loss_fn(
prediction,
batch_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(offset_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_kp_regression_loss(self, input_height, input_width, task_name,
regression_predictions, localization_loss_fn):
"""Computes the keypoint regression loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
regression_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_keypoints] representing the prediction
heads of the model for keypoint regression offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint regression offset predictions in
CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint regression offset
loss normalized by number of total keypoints.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
# keypoint regression offset loss.
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_regression_offsets,
batch_weights) = assigner.assign_joint_regression_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list,
gt_boxes_list=gt_boxes_list)
loss = 0.0
for prediction in regression_predictions:
batch_size, out_height, out_width, _ = _get_shape(prediction, 4)
reshaped_prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
reg_prediction = cn_assigner.get_batch_predictions_from_indices(
reshaped_prediction, batch_indices)
unweighted_loss = localization_loss_fn(
reg_prediction,
batch_regression_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(regression_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):
"""Computes all the losses associated with segmentation.
Args:
prediction_dict: The dictionary returned from the predict() method.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary with segmentation losses.
"""
segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]
mask_loss = self._compute_mask_loss(
segmentation_heatmap, per_pixel_weights)
losses = {
SEGMENTATION_HEATMAP: mask_loss
}
return losses
def _compute_mask_loss(self, segmentation_predictions,
per_pixel_weights):
"""Computes the mask loss.
Args:
segmentation_predictions: A list of float32 tensors of shape [batch_size,
out_height, out_width, num_classes].
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the mask loss.
"""
gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[SEGMENTATION_TASK]
heatmap_targets = assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
loss = 0.0
mask_loss_fn = self._mask_params.classification_loss
total_pixels_in_loss = tf.reduce_sum(per_pixel_weights)
# Loop through each feature output head.
for pred in segmentation_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += mask_loss_fn(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
# TODO(ronnyvotel): Consider other ways to normalize loss.
total_loss = tf.reduce_sum(loss) / (
float(len(segmentation_predictions)) * total_pixels_in_loss)
return total_loss
def _compute_densepose_losses(self, input_height, input_width,
prediction_dict):
"""Computes the weighted DensePose losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by the
"predict" function. See the "predict" function for more detailed
description.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
the DensePose task:
DENSEPOSE_HEATMAP: the weighted part segmentation loss.
DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.
"""
dp_heatmap_loss, dp_regression_loss = (
self._compute_densepose_part_and_coordinate_losses(
input_height=input_height,
input_width=input_width,
part_predictions=prediction_dict[DENSEPOSE_HEATMAP],
surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION]))
loss_dict = {}
loss_dict[DENSEPOSE_HEATMAP] = (
self._densepose_params.part_loss_weight * dp_heatmap_loss)
loss_dict[DENSEPOSE_REGRESSION] = (
self._densepose_params.coordinate_loss_weight * dp_regression_loss)
return loss_dict
def _compute_densepose_part_and_coordinate_losses(
self, input_height, input_width, part_predictions,
surface_coord_predictions):
"""Computes the individual losses for the DensePose task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
part_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_parts].
surface_coord_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_parts].
Returns:
A tuple with two scalar loss tensors: part_prediction_loss and
surface_coord_loss.
"""
gt_dp_num_points_list = self.groundtruth_lists(
fields.BoxListFields.densepose_num_points)
gt_dp_part_ids_list = self.groundtruth_lists(
fields.BoxListFields.densepose_part_ids)
gt_dp_surface_coords_list = self.groundtruth_lists(
fields.BoxListFields.densepose_surface_coords)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[DENSEPOSE_TASK]
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
assigner.assign_part_and_coordinate_targets(
height=input_height,
width=input_width,
gt_dp_num_points_list=gt_dp_num_points_list,
gt_dp_part_ids_list=gt_dp_part_ids_list,
gt_dp_surface_coords_list=gt_dp_surface_coords_list,
gt_weights_list=gt_weights_list))
part_prediction_loss = 0
surface_coord_loss = 0
classification_loss_fn = self._densepose_params.classification_loss
localization_loss_fn = self._densepose_params.localization_loss
num_predictions = float(len(part_predictions))
num_valid_points = tf.math.count_nonzero(batch_weights)
num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)
for part_pred, surface_coord_pred in zip(part_predictions,
surface_coord_predictions):
# Potentially upsample the feature maps, so that better quality (i.e.
# higher res) groundtruth can be applied.
if self._densepose_params.upsample_to_input_res:
part_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
part_pred)
surface_coord_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
surface_coord_pred)
# Compute the part prediction loss.
part_pred = cn_assigner.get_batch_predictions_from_indices(
part_pred, batch_indices[:, 0:3])
part_prediction_loss += classification_loss_fn(
part_pred[:, tf.newaxis, :],
batch_part_ids[:, tf.newaxis, :],
weights=batch_weights[:, tf.newaxis, tf.newaxis])
# Compute the surface coordinate loss.
batch_size, out_height, out_width, _ = _get_shape(
surface_coord_pred, 4)
surface_coord_pred = tf.reshape(
surface_coord_pred, [batch_size, out_height, out_width, -1, 2])
surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(
surface_coord_pred, batch_indices)
surface_coord_loss += localization_loss_fn(
surface_coord_pred,
batch_surface_coords,
weights=batch_weights[:, tf.newaxis])
part_prediction_loss = tf.reduce_sum(part_prediction_loss) / (
num_predictions * num_valid_points)
surface_coord_loss = tf.reduce_sum(surface_coord_loss) / (
num_predictions * num_valid_points)
return part_prediction_loss, surface_coord_loss
def _compute_track_losses(self, input_height, input_width, prediction_dict):
"""Computes all the losses associated with tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with tracking losses.
"""
object_reid_predictions = prediction_dict[TRACK_REID]
embedding_loss = self._compute_track_embedding_loss(
input_height=input_height,
input_width=input_width,
object_reid_predictions=object_reid_predictions)
losses = {
TRACK_REID: embedding_loss
}
return losses
def _compute_track_embedding_loss(self, input_height, input_width,
object_reid_predictions):
"""Computes the object ReID loss.
The embedding is trained as a classification task where the target is the
ID of each track among all tracks in the whole dataset.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_reid_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, reid_embed_size] representing the object
embedding feature maps.
Returns:
A float scalar tensor representing the object ReID loss per instance.
"""
gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[TRACK_TASK]
batch_indices, batch_weights, track_targets = assigner.assign_track_targets(
height=input_height,
width=input_width,
gt_track_ids_list=gt_track_ids_list,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
loss = 0.0
object_reid_loss = self._track_params.classification_loss
# Loop through each feature output head.
for pred in object_reid_predictions:
embedding_pred = cn_assigner.get_batch_predictions_from_indices(
pred, batch_indices)
reid_classification = self.track_reid_classification_net(embedding_pred)
loss += object_reid_loss(
reid_classification, track_targets, weights=batch_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_reid_predictions)) * num_boxes)
return loss_per_instance
def _compute_temporal_offset_loss(self, input_height,
input_width, prediction_dict):
"""Computes the temporal offset loss for tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with track/temporal_offset losses.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_offsets_list = self.groundtruth_lists(
fields.BoxListFields.temporal_offsets)
gt_match_list = self.groundtruth_lists(
fields.BoxListFields.track_match_flags)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = tf.cast(
get_num_instances_from_weights(gt_weights_list), tf.float32)
offset_predictions = prediction_dict[TEMPORAL_OFFSET]
num_predictions = float(len(offset_predictions))
assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]
(batch_indices, batch_offset_targets,
batch_weights) = assigner.assign_temporal_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_offsets_list=gt_offsets_list,
gt_match_list=gt_match_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
offset_loss_fn = self._temporal_offset_params.localization_loss
loss_dict = {}
offset_loss = 0
for offset_pred in offset_predictions:
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += offset_loss_fn(offset_pred[:, None],
batch_offset_targets[:, None],
weights=batch_weights)
offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes)
loss_dict[TEMPORAL_OFFSET] = offset_loss
return loss_dict
def preprocess(self, inputs):
outputs = shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn)
resized_inputs, true_image_shapes = outputs
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def predict(self, preprocessed_inputs, _):
"""Predicts CenterNet prediction tensors given an input batch.
Feature extractors are free to produce predictions from multiple feature
maps and therefore we return a dictionary mapping strings to lists.
E.g. the hourglass backbone produces two feature maps.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding predicted tensors with
'preprocessed_inputs' - The input image after being resized and
preprocessed by the feature extractor.
'object_center' - A list of size num_feature_outputs containing
float tensors of size [batch_size, output_height, output_width,
num_classes] representing the predicted object center heatmap logits.
'box/scale' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted box height and width at each output
location. This field exists only when object detection task is
specified.
'box/offset' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted y and x offsets at each output location.
'$TASK_NAME/keypoint_heatmap' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, num_keypoints] representing the predicted
keypoint heatmap logits.
'$TASK_NAME/keypoint_offset' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2] representing the predicted keypoint
offsets at each output location.
'$TASK_NAME/keypoint_regression' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2 * num_keypoints] representing the
predicted keypoint regression at each output location.
'segmentation/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_classes] representing the mask logits.
'densepose/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_parts] representing the mask logits for each part.
'densepose/regression' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, 2 * num_parts] representing the DensePose surface
coordinate predictions.
Note the $TASK_NAME is provided by the KeypointEstimation namedtuple
used to differentiate between different keypoint tasks.
"""
features_list = self._feature_extractor(preprocessed_inputs)
predictions = {}
for head_name, heads in self._prediction_head_dict.items():
predictions[head_name] = [
head(feature) for (feature, head) in zip(features_list, heads)
]
predictions['preprocessed_inputs'] = preprocessed_inputs
self._batched_prediction_tensor_names = predictions.keys()
return predictions
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Computes scalar loss tensors with respect to provided groundtruth.
This function implements the various CenterNet losses.
Args:
prediction_dict: a dictionary holding predicted tensors returned by
"predict" function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
scope: Optional scope name.
Returns:
A dictionary mapping the keys [
'Loss/object_center',
'Loss/box/scale', (optional)
'Loss/box/offset', (optional)
'Loss/$TASK_NAME/keypoint/heatmap', (optional)
'Loss/$TASK_NAME/keypoint/offset', (optional)
'Loss/$TASK_NAME/keypoint/regression', (optional)
'Loss/segmentation/heatmap', (optional)
'Loss/densepose/heatmap', (optional)
'Loss/densepose/regression', (optional)
'Loss/track/reid'] (optional)
'Loss/track/offset'] (optional)
scalar tensors corresponding to the losses for different tasks. Note the
$TASK_NAME is provided by the KeypointEstimation namedtuple used to
differentiate between different keypoint tasks.
"""
_, input_height, input_width, _ = _get_shape(
prediction_dict['preprocessed_inputs'], 4)
output_height, output_width = (input_height // self._stride,
input_width // self._stride)
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(
tf.to_float(true_image_shapes) / self._stride)
valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)
object_center_loss = self._compute_object_center_loss(
object_center_predictions=prediction_dict[OBJECT_CENTER],
input_height=input_height,
input_width=input_width,
per_pixel_weights=valid_anchor_weights)
losses = {
OBJECT_CENTER:
self._center_params.object_center_loss_weight * object_center_loss
}
if self._od_params is not None:
od_losses = self._compute_object_detection_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in od_losses:
od_losses[key] = od_losses[key] * self._od_params.task_loss_weight
losses.update(od_losses)
if self._kp_params_dict is not None:
for task_name, params in self._kp_params_dict.items():
kp_losses = self._compute_keypoint_estimation_losses(
task_name=task_name,
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in kp_losses:
kp_losses[key] = kp_losses[key] * params.task_loss_weight
losses.update(kp_losses)
if self._mask_params is not None:
seg_losses = self._compute_segmentation_losses(
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in seg_losses:
seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight
losses.update(seg_losses)
if self._densepose_params is not None:
densepose_losses = self._compute_densepose_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in densepose_losses:
densepose_losses[key] = (
densepose_losses[key] * self._densepose_params.task_loss_weight)
losses.update(densepose_losses)
if self._track_params is not None:
track_losses = self._compute_track_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in track_losses:
track_losses[key] = (
track_losses[key] * self._track_params.task_loss_weight)
losses.update(track_losses)
if self._temporal_offset_params is not None:
offset_losses = self._compute_temporal_offset_loss(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in offset_losses:
offset_losses[key] = (
offset_losses[key] * self._temporal_offset_params.task_loss_weight)
losses.update(offset_losses)
# Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the
# losses will be grouped together in Tensorboard.
return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val)
for key, val in losses.items()])
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Produces boxes given a prediction dict returned by predict().
Although predict returns a list of tensors, only the last tensor in
each list is used for making box predictions.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_boxes - A tensor of shape [batch, max_detections, 4]
holding the predicted boxes.
detection_boxes_strided: A tensor of shape [batch_size, num_detections,
4] holding the predicted boxes in absolute coordinates of the
feature extractor's final layer output.
detection_scores: A tensor of shape [batch, max_detections] holding
the predicted score for each box.
detection_classes: An integer tensor of shape [batch, max_detections]
containing the detected class for each box.
num_detections: An integer tensor of shape [batch] containing the
number of detected boxes for each sample in the batch.
detection_keypoints: (Optional) A float tensor of shape [batch,
max_detections, num_keypoints, 2] with normalized keypoints. Any
invalid keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: (Optional) A float tensor of shape [batch,
max_detection, num_keypoints] with scores for each keypoint.
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
detection_surface_coords: (Optional) A float32 tensor of shape [batch,
max_detection, mask_height, mask_width, 2] with DensePose surface
coordinates, in (v, u) format.
detection_embeddings: (Optional) A float tensor of shape [batch,
max_detections, reid_embed_size] containing object embeddings.
"""
object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])
# Get x, y and channel indices corresponding to the top indices in the class
# center predictions.
detection_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(
object_center_prob, max_pool_kernel_size=3,
k=self._center_params.max_box_predictions))
boxes_strided, classes, scores, num_detections = (
prediction_tensors_to_boxes(
detection_scores, y_indices, x_indices, channel_indices,
prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1]))
boxes = convert_strided_predictions_to_normalized_boxes(
boxes_strided, self._stride, true_image_shapes)
postprocess_dict = {
fields.DetectionResultFields.detection_boxes: boxes,
fields.DetectionResultFields.detection_scores: scores,
fields.DetectionResultFields.detection_classes: classes,
fields.DetectionResultFields.num_detections: num_detections,
'detection_boxes_strided': boxes_strided
}
if self._kp_params_dict:
keypoints, keypoint_scores = self._postprocess_keypoints(
prediction_dict, classes, y_indices, x_indices,
boxes_strided, num_detections)
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints, keypoint_scores, self._stride, true_image_shapes,
clip_out_of_frame_keypoints=True))
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoints: keypoints,
fields.DetectionResultFields.detection_keypoint_scores:
keypoint_scores
})
if self._mask_params:
masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1])
densepose_part_heatmap, densepose_surface_coords = None, None
densepose_class_index = 0
if self._densepose_params:
densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1]
densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1]
densepose_class_index = self._densepose_params.class_id
instance_masks, surface_coords = (
convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap, densepose_surface_coords,
stride=self._stride, mask_height=self._mask_params.mask_height,
mask_width=self._mask_params.mask_width,
score_threshold=self._mask_params.score_threshold,
densepose_class_index=densepose_class_index))
postprocess_dict[
fields.DetectionResultFields.detection_masks] = instance_masks
if self._densepose_params:
postprocess_dict[
fields.DetectionResultFields.detection_surface_coords] = (
surface_coords)
if self._track_params:
embeddings = self._postprocess_embeddings(prediction_dict,
y_indices, x_indices)
postprocess_dict.update({
fields.DetectionResultFields.detection_embeddings: embeddings
})
if self._temporal_offset_params:
offsets = prediction_tensors_to_temporal_offsets(
y_indices, x_indices,
prediction_dict[TEMPORAL_OFFSET][-1])
postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets
return postprocess_dict
def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):
"""Performs postprocessing on embedding predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain embedding prediction
feature maps for tracking task.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
Returns:
embeddings: A [batch_size, max_detection, reid_embed_size] float32
tensor with L2 normalized embeddings extracted from detection box
centers.
"""
embedding_predictions = prediction_dict[TRACK_REID][-1]
embeddings = predicted_embeddings_at_object_centers(
embedding_predictions, y_indices, x_indices)
embeddings, _ = tf.linalg.normalize(embeddings, axis=-1)
return embeddings
def _postprocess_keypoints(self, prediction_dict, classes, y_indices,
x_indices, boxes, num_detections):
"""Performs postprocessing on keypoint predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
"""
total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict
in self._kp_params_dict.values())
batch_size, max_detections, _ = _get_shape(boxes, 3)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
for ex_ind in range(batch_size):
kpt_coords_for_class_list = []
kpt_scores_for_class_list = []
instance_inds_for_class_list = []
for task_name, kp_params in self._kp_params_dict.items():
keypoint_heatmap = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]
keypoint_offsets = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]
instance_inds = self._get_instance_indices(
classes, num_detections, ex_ind, kp_params.class_id)
num_ind = _get_shape(instance_inds, 1)
def true_fn(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds,
ex_ind, kp_params):
"""Logics to execute when instance_inds is not an empty set."""
# Postprocess keypoints and scores for class and single image. Shapes
# are [1, num_instances_i, num_keypoints_i, 2] and
# [1, num_instances_i, num_keypoints_i], respectively. Note that
# num_instances_i and num_keypoints_i refers to the number of
# instances and keypoints for class i, respectively.
kpt_coords_for_class, kpt_scores_for_class = (
self._postprocess_keypoints_for_class_and_image(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds,
ex_ind, kp_params))
# Expand keypoint dimension (with padding) so that coordinates and
# scores have shape [1, num_instances_i, num_total_keypoints, 2] and
# [1, num_instances_i, num_total_keypoints], respectively.
kpts_coords_for_class_padded, kpt_scores_for_class_padded = (
_pad_to_full_keypoint_dim(
kpt_coords_for_class, kpt_scores_for_class,
kp_params.keypoint_indices, total_num_keypoints))
return kpts_coords_for_class_padded, kpt_scores_for_class_padded
def false_fn():
"""Logics to execute when the instance_inds is an empty set."""
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32),
tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
true_fn = functools.partial(
true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds, ex_ind,
kp_params)
# Use dimension values instead of tf.size for tf.lite compatibility.
results = tf.cond(num_ind[0] > 0, true_fn, false_fn)
kpt_coords_for_class_list.append(results[0])
kpt_scores_for_class_list.append(results[1])
instance_inds_for_class_list.append(instance_inds)
# Concatenate all keypoints across all classes (single example).
kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1)
kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1)
instance_inds_for_example = tf.concat(instance_inds_for_class_list,
axis=0)
# Use dimension values instead of tf.size for tf.lite compatibility.
num_inds = _get_shape(instance_inds_for_example, 1)
if num_inds[0] > 0:
# Scatter into tensor where instances align with original detection
# instances. New shape of keypoint coordinates and scores are
# [1, max_detections, num_total_keypoints, 2] and
# [1, max_detections, num_total_keypoints], respectively.
kpt_coords_for_example_all_det, kpt_scores_for_example_all_det = (
_pad_to_full_instance_dim(
kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example,
self._center_params.max_box_predictions))
else:
kpt_coords_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints, 2], dtype=tf.float32)
kpt_scores_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints], dtype=tf.float32)
kpt_coords_for_example_list.append(kpt_coords_for_example_all_det)
kpt_scores_for_example_list.append(kpt_scores_for_example_all_det)
# Concatenate all keypoints and scores from all examples in the batch.
# Shapes are [batch_size, max_detections, num_total_keypoints, 2] and
# [batch_size, max_detections, num_total_keypoints], respectively.
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
return keypoints, keypoint_scores
def _get_instance_indices(self, classes, num_detections, batch_index,
class_id):
"""Gets the instance indices that match the target class ID.
Args:
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
batch_index: An integer specifying the index for an example in the batch.
class_id: Class id
Returns:
instance_inds: A [num_instances] int32 tensor where each element indicates
the instance location within the `classes` tensor. This is useful to
associate the refined keypoints with the original detections (i.e.
boxes)
"""
classes = classes[batch_index:batch_index+1, ...]
_, max_detections = shape_utils.combined_static_and_dynamic_shape(
classes)
# Get the detection indices corresponding to the target class.
# Call tf.math.equal with matched tensor shape to make it tf.lite
# compatible.
valid_detections_with_kpt_class = tf.math.logical_and(
tf.range(max_detections) < num_detections[batch_index],
tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id)))
instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]
# Cast the indices tensor to int32 for tf.lite compatibility.
return tf.cast(instance_inds, tf.int32)
def _postprocess_keypoints_for_class_and_image(
self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes,
y_indices, x_indices, boxes, indices_with_kpt_class, batch_index,
kp_params):
"""Postprocess keypoints for a single image and class.
This function performs the following postprocessing operations on a single
image and single keypoint class:
- Converts keypoints scores to range [0, 1] with sigmoid.
- Determines the detections that correspond to the specified keypoint class.
- Gathers the regressed keypoints at the detection (i.e. box) centers.
- Gathers keypoint candidates from the keypoint heatmaps.
- Snaps regressed keypoints to nearby keypoint candidates.
Args:
keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32
tensor with keypoint heatmaps.
keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with
local offsets to keypoint centers.
keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]
float32 tensor with regressed offsets to all keypoints.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with detected
boxes in the output (strided) frame.
indices_with_kpt_class: A [num_instances] int tensor where each element
indicates the instance location within the `classes` tensor. This is
useful to associate the refined keypoints with the original detections
(i.e. boxes)
batch_index: An integer specifying the index for an example in the batch.
kp_params: A `KeypointEstimationParams` object with parameters for a
single keypoint class.
Returns:
A tuple of
refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor
with refined keypoints for a single class in a single image, expressed
in the output (strided) coordinate frame. Note that `num_instances` is a
dynamic dimension, and corresponds to the number of valid detections
for the specific class.
refined_scores: A [1, num_instances, num_keypoints] float32 tensor with
keypoint scores.
"""
keypoint_indices = kp_params.keypoint_indices
num_keypoints = len(keypoint_indices)
keypoint_heatmap = tf.nn.sigmoid(
keypoint_heatmap[batch_index:batch_index+1, ...])
keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...]
keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...]
y_indices = y_indices[batch_index:batch_index+1, ...]
x_indices = x_indices[batch_index:batch_index+1, ...]
# Gather the feature map locations corresponding to the object class.
y_indices_for_kpt_class = tf.gather(y_indices, indices_with_kpt_class,
axis=1)
x_indices_for_kpt_class = tf.gather(x_indices, indices_with_kpt_class,
axis=1)
boxes_for_kpt_class = tf.gather(boxes, indices_with_kpt_class, axis=1)
# Gather the regressed keypoints. Final tensor has shape
# [1, num_instances, num_keypoints, 2].
regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(
keypoint_regression, y_indices_for_kpt_class, x_indices_for_kpt_class)
regressed_keypoints_for_objects = tf.reshape(
regressed_keypoints_for_objects, [1, -1, num_keypoints, 2])
# Get the candidate keypoints and scores.
# The shape of keypoint_candidates and keypoint_scores is:
# [1, num_candidates_per_keypoint, num_keypoints, 2] and
# [1, num_candidates_per_keypoint, num_keypoints], respectively.
keypoint_candidates, keypoint_scores, num_keypoint_candidates = (
prediction_tensors_to_keypoint_candidates(
keypoint_heatmap, keypoint_offsets,
keypoint_score_threshold=(
kp_params.keypoint_candidate_score_threshold),
max_pool_kernel_size=kp_params.peak_max_pool_kernel_size,
max_candidates=kp_params.num_candidates_per_keypoint))
# Get the refined keypoints and scores, of shape
# [1, num_instances, num_keypoints, 2] and
# [1, num_instances, num_keypoints], respectively.
refined_keypoints, refined_scores = refine_keypoints(
regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores,
num_keypoint_candidates, bboxes=boxes_for_kpt_class,
unmatched_keypoint_score=kp_params.unmatched_keypoint_score,
box_scale=kp_params.box_scale,
candidate_search_scale=kp_params.candidate_search_scale,
candidate_ranking_mode=kp_params.candidate_ranking_mode)
return refined_keypoints, refined_scores
def regularization_losses(self):
return []
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
raise RuntimeError('CenterNetMetaArch not supported under TF1.x.')
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (not implemented
in CenterNet) is intended to be used to restore Slim-based models when
running Tensorflow 1.x.
TODO(jonathanhuang): Make this function consistent with other
meta-architectures.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`, `fine_tune`.
Default 'detection'.
'detection': used when loading models pre-trained on other detection
tasks. With this checkpoint type the weights of the feature extractor
are expected under the attribute 'feature_extractor'.
'classification': used when loading models pre-trained on an image
classification task. Note that only the encoder section of the network
is loaded and not the upsampling layers. With this checkpoint type,
the weights of only the encoder section are expected under the
attribute 'feature_extractor'.
'fine_tune': used when loading the entire CenterNet feature extractor
pre-trained on other tasks. The checkpoints saved during CenterNet
model training can be directly loaded using this type. With this
checkpoint type, the weights of the feature extractor are expected
under the attribute 'model._feature_extractor'.
For more details, see the tensorflow section on Loading mechanics.
https://www.tensorflow.org/guide/checkpoint#loading_mechanics
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
supported_types = self._feature_extractor.supported_sub_model_types
supported_types += ['fine_tune']
if fine_tune_checkpoint_type not in supported_types:
message = ('Checkpoint type "{}" not supported for {}. '
'Supported types are {}')
raise ValueError(
message.format(fine_tune_checkpoint_type,
self._feature_extractor.__class__.__name__,
supported_types))
elif fine_tune_checkpoint_type == 'fine_tune':
feature_extractor_model = tf.train.Checkpoint(
_feature_extractor=self._feature_extractor)
return {'model': feature_extractor_model}
else:
return {'feature_extractor': self._feature_extractor.get_sub_model(
fine_tune_checkpoint_type)}
def updates(self):
raise RuntimeError('This model is intended to be used with model_lib_v2 '
'which does not support updates()')
|
the-stack_0_2869 | def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Gender, obj[4]: Age, obj[5]: Children, obj[6]: Education, obj[7]: Occupation, obj[8]: Income, obj[9]: Bar, obj[10]: Coffeehouse, obj[11]: Restaurant20to50, obj[12]: Direction_same, obj[13]: Distance
# {"feature": "Occupation", "instances": 34, "metric_value": 0.9774, "depth": 1}
if obj[7]<=7:
# {"feature": "Distance", "instances": 21, "metric_value": 0.7919, "depth": 2}
if obj[13]>1:
# {"feature": "Restaurant20to50", "instances": 13, "metric_value": 0.3912, "depth": 3}
if obj[11]<=2.0:
return 'True'
elif obj[11]>2.0:
return 'False'
else: return 'False'
elif obj[13]<=1:
# {"feature": "Income", "instances": 8, "metric_value": 1.0, "depth": 3}
if obj[8]<=7:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.7219, "depth": 4}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
# {"feature": "Age", "instances": 2, "metric_value": 1.0, "depth": 5}
if obj[4]>1:
return 'True'
elif obj[4]<=1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[8]>7:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]>7:
# {"feature": "Education", "instances": 13, "metric_value": 0.8905, "depth": 2}
if obj[6]<=3:
# {"feature": "Time", "instances": 11, "metric_value": 0.684, "depth": 3}
if obj[1]>0:
return 'False'
elif obj[1]<=0:
# {"feature": "Passanger", "instances": 4, "metric_value": 1.0, "depth": 4}
if obj[0]<=2:
# {"feature": "Coffeehouse", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[10]>1.0:
return 'False'
elif obj[10]<=1.0:
return 'True'
else: return 'True'
elif obj[0]>2:
return 'True'
else: return 'True'
else: return 'False'
elif obj[6]>3:
return 'True'
else: return 'True'
else: return 'False'
|
the-stack_0_2871 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from . import AbstractCostFunction
from .gap_close import AbstractGapCloseCostFunction
from ...trajectories import Trajectories
__all__ = ["BrownianLinkCostFunction", "BrownianGapCloseCostFunction"]
class BrownianLinkCostFunction(AbstractCostFunction):
"""This class generates cost matrices for brownian motion
trajectories.
The cost between two position is given by the square of their
distance
Attributes
----------
parameters: dict
Used by the `build` method, with the following keys:
- 'distance_metric': a string, default 'euclidean',
passed to `scipy.spatial.distance.cdist`
(see this function documentation for more)
- 'coords': a list of column names on which to compute the distance,
default ['x', 'y', 'z']
- 'max_speed': a float, default 1. All the values of the cost matrix
for which the distance *divided by the time difference* is higher than
this parameter's value are set to np.nan
context: dict
Context is used to store vectors.
- pos_in: :class:`pandas.DataFrame`
The object coordinates to link from
- pos_out: :class:`pandas.DataFrame`
The object coordinates to link to
"""
def __init__(self, parameters):
"""
"""
_parameters = {'distance_metric': 'euclidean',
'max_speed': 1.,
'coords': ['x', 'y', 'z']}
_parameters.update(parameters)
super(BrownianLinkCostFunction, self).__init__(context={}, parameters=_parameters)
def _build(self):
"""
"""
# Get parameters
coords = self.parameters['coords']
distance_metric = self.parameters['distance_metric']
max_speed = self.parameters['max_speed']
# Check context
pos_in = self.check_context('pos_in', pd.DataFrame)
pos_out = self.check_context('pos_out', pd.DataFrame)
# Chech vectors
self.check_columns([pos_in, pos_out], list(coords) + ['t'])
if pos_out.empty or pos_in.empty:
return pd.DataFrame([])
dt = pos_out['t'].iloc[0] - pos_in['t'].iloc[0]
# Build matrix block
distances = cdist(pos_in[coords].astype(np.float),
pos_out[coords].astype(np.float),
metric=distance_metric)
distances /= np.abs(dt)
distances[distances > max_speed] = np.nan
distances = distances ** 2
return distances
class BrownianGapCloseCostFunction(AbstractGapCloseCostFunction):
"""
"""
def __init__(self, parameters):
"""
"""
_parameters = {'distance_metric': 'euclidean',
'max_speed': 1.,
'coords': ['x', 'y', 'z']}
_parameters.update(parameters)
super(self.__class__, self).__init__(context={}, parameters=_parameters)
def _build(self,):
"""
"""
self.check_idxs_length()
# Get parameters
coords = self.parameters['coords']
distance_metric = self.parameters['distance_metric']
if distance_metric != 'euclidean':
raise Exception("Only 'euclidean' distance are supported for now.")
max_speed = self.parameters['max_speed']
# Check context
idxs_in = self.check_context('idxs_in', list)
idxs_out = self.check_context('idxs_out', list)
trajs = self.check_context('trajs', Trajectories)
# Just in case the parent didn't do it
trajs.relabel_fromzero('label', inplace=True)
# Init 2d distances array
mat = np.empty((len(trajs.labels),
len(trajs.labels)))
mat.fill(np.nan)
# Compute distance between all_pos_out and all_pos_in
all_pos_in = trajs.loc[idxs_in]
all_pos_out = trajs.loc[idxs_out]
vecs = [(all_pos_in[c].values - all_pos_out[c].values) ** 2 for c in coords]
all_dist = np.sqrt(np.sum(vecs, axis=0))
# Get all dt
all_dt = np.abs(all_pos_in['t'].values - all_pos_out['t'].values)
# Compute speeds
speeds = all_dist / all_dt
# Remove speeds greater than 'max_speed'
speeds[speeds > max_speed] = np.nan
# Fill 2d distances array
i_in = np.array(idxs_in)[:, 1].astype(int)
i_out = np.array(idxs_out)[:, 1].astype(int)
mat[i_in, i_out] = speeds
mat = mat ** 2
return mat
|
the-stack_0_2872 | from germanium.impl import _filter_one_for_action
def _element(germanium, selector):
"""
Finds a single element for doing a visual action.
:param germanium:
:param selector:
:return:
"""
element = None
if selector:
items = germanium.S(selector).element_list(only_visible=False)
element = _filter_one_for_action(germanium, items)
return element
|
the-stack_0_2873 | #Matt Morrow spcID2412353 COURSE: COP1000
#Statement 1: create random numbers between 1 and 25
#Statement 2: sort the numbers
#Statement 3: display in values
#Statement 4: display values in order
#Statement 5: Determine odds/evens and display
import random
def main():
nums = []
for value in range(10):
nums.append(random.randint(1,25))
numbers = sorted(nums)
for x in range(1):
print(*nums)
print(*numbers)
start = numbers[:4]
print(start)
finish = numbers[-5:]
print(finish)
odd_even(numbers)
def odd_even(numbers):
even_count = 0
odd_count = 0
for val in numbers:
if val % 2 == 0:
even_count += 1
if val % 2 != 0:
odd_count += 1
print("List had" + ' ' + str(even_count) + ' ' + "evens and" + ' ' + str(odd_count) + ' ' + "odds")
print("The 6th element in sorted nums is" + ' ' + str(numbers[5]))
main()
|
the-stack_0_2874 | import glymur
import os
import numpy as np
import tempfile
class jpeg(object):
@staticmethod
def name():
'''No Encoding
'''
return 'JPEG2000'
@staticmethod
def compress(data, *args, **kwargs):
'''JPEG2000 compression
'''
TMPFOLDER = tempfile.mkdtemp()
compressed_data = ''
sizes = []
for iz in range(0, data.shape[0]):
img = data[iz, :, :]
colorized = np.zeros(
(3, img.shape[0], img.shape[1]), dtype=np.uint16
)
# for every value split into three 16 bit samples
colorized[0, :, :] = img % (2**16)
img = img >> 16
colorized[1, :, :] = img % (2**16)
img = img >> 32
colorized[2, :, :] = img % (2**16)
#print colorized.shape
glymur.Jp2k(TMPFOLDER+'/tmp_' + str(iz) + '.jp2', colorized)
#glymur.Jp2k('JPEG_TMP/tmp_' + str(iz) + '.jp2', img.astype(np.uint16))
with open(TMPFOLDER+'/tmp_' + str(iz) + '.jp2', 'rb') as fd:
c_data = fd.read()
compressed_data += c_data
sizes.append(len(c_data))
frames = np.zeros((len(sizes)), dtype=np.uint64)
for i,s in enumerate(sizes):
frames[i] = s
#
#
# no of frames
output = np.uint64(len(sizes)).tobytes()
# frame sizes
output += frames.tobytes()
output += compressed_data
# print sizes
return output
@staticmethod
def decompress(data, *args, **kwargs):
'''JPEG2000 decompression
'''
TMPFOLDER = tempfile.mkdtemp()
# grab no of frames
no_frames = np.fromstring(data[0:8], dtype=np.uint64)
# print no_frames, len(data), data[8:8*no_frames]
no_frames = no_frames[0]
frame_sizes = data[8:8+int(8*no_frames)]
# print no_frames, frame_sizes
# grab frame sizes
sizes = np.fromstring(frame_sizes, dtype=np.uint64)
# store each frame to TMP FOLDER
data_start_byte = 8 + 8*no_frames
current_byte_pointer = data_start_byte
for i in range(sizes.shape[0]):
# print 'writing',i,current_byte_pointer,current_byte_pointer+sizes[i]
current_bytes = data[int(current_byte_pointer):int(current_byte_pointer+sizes[i])]
with open(TMPFOLDER+'/tmp_'+str(i)+'.jp2', 'wb') as f:
f.write(current_bytes)
current_byte_pointer = current_byte_pointer+sizes[i]
nfiles = len(os.listdir(TMPFOLDER))
for ie, filename in enumerate(os.listdir(TMPFOLDER)):
input_filename = TMPFOLDER + '/' + filename
colorized = glymur.Jp2k(input_filename)
index = int(filename.split('_')[1].split('.')[0])
if (ie == 0):
decompressed_data = np.zeros(
(nfiles, colorized.shape[1], colorized.shape[2]),
dtype=np.uint64
)
decompressed_data[index, :, :] = (
colorized[0, :, :] +
colorized[1, :, :] * (2 ** 16) +
colorized[2, :, :] * (2 ** 16)
)
return decompressed_data
|
the-stack_0_2875 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad
import math
from itertools import chain
from ._deeplab import ASPP
# -------------------------------------------
# Multi-Knowledge Aggregation
# -------------------------------------------
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view((x.size(0),)+self.shape)
class DeepLab_AUX(nn.Module):
def __init__(self, agg_ch, in_channels, num_classes=11, aspp_dilate=[6, 12, 18]):
super(DeepLab_AUX, self).__init__()
# self.agg = nn.Sequential(
# nn.Conv2d(agg_ch, in_channels, 1),
# nn.BatchNorm2d(in_channels),
# nn.ReLU()
# )
self.aspp = ASPP(in_channels, aspp_dilate)
self.head = nn.Sequential(
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, num_classes, 1)
)
def forward(self, x, input_size):
# ka = self.agg(x)
out = self.aspp(x)
out = self.head(out)
out = F.interpolate(out, size=input_size, mode='bilinear', align_corners=False)
return out
class MKAT_F(nn.Module):
def __init__(self, s_shape, t_shape, nz=256, kn_list=range(5)):
super(MKAT_F, self).__init__()
self.nz = nz
self.kn_list = kn_list
self.num_k = len(kn_list)
agg_ch = self.num_k * nz
in_channels = 512
def conv1x1(in_channels, out_channels, stride=1):
return nn.Conv2d(
in_channels, out_channels,
kernel_size=1, padding=0,
bias=False, stride=stride)
at_shape = (s_shape[0], 1, s_shape[2] * s_shape[3])
jac_shape = (s_shape[0], 3, 768, 768)
af_shape = s_shape
sa_shape = s_shape
ca_shape = s_shape
cm_shape = s_shape
gm_shape = s_shape
self.at_enc_s = nn.Sequential(
conv1x1(at_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.af_enc_s = nn.Sequential(
conv1x1(af_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.sa_enc_s = nn.Sequential(
conv1x1(sa_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.ca_enc_s = nn.Sequential(
conv1x1(ca_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.cm_enc_s = nn.Sequential(
conv1x1(cm_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.gm_enc_s = nn.Sequential(
conv1x1(gm_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.jac_enc_s = nn.Sequential(
nn.Conv2d(jac_shape[1], nz//8, 5, 1),
nn.BatchNorm2d(nz//8),
nn.ReLU6(inplace=True),
nn.Conv2d(nz//8, nz//4, 5, 3, 1),
nn.BatchNorm2d(nz//4),
nn.ReLU6(inplace=True),
conv1x1(nz//4, nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
at_shape = (t_shape[0], 1, t_shape[2] * t_shape[3])
jac_shape = (t_shape[0], 3, 768, 768)
af_shape = t_shape
sa_shape = t_shape
ca_shape = t_shape
cm_shape = t_shape
gm_shape = t_shape
self.at_enc_t = nn.Sequential(
conv1x1(at_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.af_enc_t = nn.Sequential(
conv1x1(af_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.sa_enc_t = nn.Sequential(
conv1x1(sa_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.ca_enc_t = nn.Sequential(
conv1x1(ca_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.cm_enc_t = nn.Sequential(
conv1x1(cm_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.gm_enc_t = nn.Sequential(
conv1x1(gm_shape[1], nz, 1),
nn.BatchNorm2d(nz),
nn.ReLU()
)
self.agg_s = nn.Sequential(
nn.Conv2d(agg_ch, in_channels, 1),
nn.BatchNorm2d(in_channels),
nn.ReLU()
)
self.agg_t = nn.Sequential(
nn.Conv2d(agg_ch, in_channels, 1),
nn.BatchNorm2d(in_channels),
nn.ReLU()
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
@staticmethod
def adapt_wh(f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
return f_s, f_t
def forward(self, f_s, f_t):
f_s, f_t = self.adapt_wh(f_s, f_t)
at_s, at_t = self.at(f_s), self.at(f_t)
af_s, af_t = self.af(f_s), self.af(f_t)
cm_s, cm_t = self.cm(f_s), self.cm(f_t)
sa_s, sa_t = self.sa(f_s), self.sa(f_t)
ca_s, ca_t = self.ca(f_s), self.ca(f_t)
gm_s, gm_t = self.gram(f_s), self.gram(f_t)
at_em_s, at_em_t = self.at_enc_s(at_s), self.at_enc_t(at_t)
af_em_s, af_em_t = self.af_enc_s(af_s), self.af_enc_t(af_t)
cm_em_s, cm_em_t = self.cm_enc_s(cm_s), self.cm_enc_t(cm_t)
sa_em_s, sa_em_t = self.sa_enc_s(sa_s), self.sa_enc_t(sa_t)
ca_em_s, ca_em_t = self.ca_enc_s(ca_s), self.ca_enc_t(ca_t)
gm_em_s, gm_em_t = self.gm_enc_s(gm_s), self.gm_enc_t(gm_t)
stack_s = [at_em_s, af_em_s, cm_em_s, sa_em_s, ca_em_s, gm_em_s]
stack_t = [at_em_t, af_em_t, cm_em_t, sa_em_t, ca_em_t, gm_em_t]
feat_stack_s = torch.cat([stack_s[i] for i in self.kn_list], dim=1) #
feat_stack_t = torch.cat([stack_t[i] for i in self.kn_list], dim=1) #
feat_s = self.agg_s(feat_stack_s)
feat_t = self.agg_t(feat_stack_t)
return feat_stack_s, feat_stack_t, feat_s, feat_t
''' get params '''
def enc_s_params(self):
return chain(self.at_enc_s.parameters(), self.af_enc_s.parameters(), self.ca_enc_s.parameters(),
self.sa_enc_s.parameters(), self.cm_enc_s.parameters(), self.gm_enc_s.parameters(), self.agg_s.parameters())
def enc_t_params(self):
return chain(self.at_enc_t.parameters(), self.af_enc_t.parameters(), self.ca_enc_t.parameters(),
self.sa_enc_t.parameters(), self.cm_enc_t.parameters(), self.gm_enc_t.parameters(), self.agg_t.parameters())
''' ---- 6/7 forms of knowledge ---- '''
@staticmethod
# attention
def at(f, p=2):
return F.normalize(f.pow(p).mean(1).view(f.size(0), -1)).reshape((f.size(0), 1, f.size(2), f.size(3)))
@staticmethod
# correlation matrix -- dual affinity
def cm(f, P_order=2, gamma=0.4):
f = F.normalize(f, p=2, dim=-1)
f_trans = torch.transpose(f, 2, 3)
sim_mat = torch.matmul(f_trans, torch.matmul(f, f_trans)) # (H*W)x[(W*H)x(H*W)] = (H*W)
corr_mat1 = torch.zeros_like(sim_mat)
for p in range(P_order+1):
corr_mat1 += math.exp(-2*gamma) * (2*gamma)**p / \
math.factorial(p) * torch.pow(sim_mat, p)
corr_mat1 = torch.transpose(corr_mat1, 2, 3)
sim_mat2 = torch.matmul(f, torch.matmul(f_trans, f)) # (W*H)x[(H*W)x(W*H)] = (W*H)
corr_mat2 = torch.zeros_like(sim_mat2)
for p in range(P_order+1):
corr_mat2 += math.exp(-2*gamma) * (2*gamma)**p / \
math.factorial(p) * torch.pow(sim_mat2, p)
corr_mat = corr_mat1 + corr_mat2
return corr_mat
@staticmethod
# grad cam
def cam(out, f, target):
target_out = torch.gather(out, 2, torch.unsqueeze(target, 1))
grad_fm = grad(outputs=target_out, inputs=f,
grad_outputs=torch.ones_like(target_out),
create_graph=True, retain_graph=True, only_inputs=True)[0]
weights = F.adaptive_avg_pool2d(grad_fm, 1)
cam = torch.sum(torch.mul(weights, grad_fm), dim=1, keepdim=True)
cam = F.relu(cam)
cam = cam.view(cam.size(0), -1)
norm_cam = F.normalize(cam, p=2, dim=1)
return norm_cam
@staticmethod
# grad norm
def jacobian_grad(out, img, target):
target_out = torch.gather(out, 2, torch.unsqueeze(target, 1))
grad_ = grad(outputs=target_out, inputs=img,
grad_outputs=torch.ones_like(target_out),
create_graph=True, retain_graph=True, only_inputs=True)[0]
norm_grad = F.normalize(grad_.view(grad_.size(0), -1), p=2, dim=1)
return norm_grad
@staticmethod
# attention feature norm
def af(f, eps=1e-6):
fm_norm = torch.norm(f, dim=(2,3), keepdim=True)
af = torch.div(f, fm_norm + eps)
return af
@staticmethod
# spatial attention
def sa(f, gamma=0.4):
m_batchsize, C, height, width = f.size()
proj_query = f.view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = f.view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = F.softmax(energy, dim=-1)
proj_value = f.view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
# out = gamma*out + f
return out
@staticmethod
# channel attention
def ca(f, gamma=0.4):
m_batchsize, C, height, width = f.size()
proj_query = f.view(m_batchsize, C, -1)
proj_key = f.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = F.softmax(energy_new, dim=-1)
proj_value = f.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
# out = gamma*out + f
return out
# gram matrix
@staticmethod
def gram(f):
shape = f.shape
f = f.view(f.size(0), f.size(1), -1)
fm = F.normalize(f, dim=2)
gram_matrix = torch.bmm(fm, fm.transpose(1, 2))
trans_gram = torch.bmm(gram_matrix, f)
trans_gram = trans_gram.view(shape)
return trans_gram
|
the-stack_0_2877 | """
Binary Ninja plugin that imports a capa report,
produced via `capa --json /path/to/sample`,
into the current database.
It will mark up functions with their capa matches, like:
; capa: print debug messages (host-interaction/log/debug/write-event)
; capa: delete service (host-interaction/service/delete)
; Attributes: bp-based frame
public UninstallService
UninstallService proc near
...
To use, invoke from the Binary Ninja Tools menu, or from the command-palette.
Adapted for Binary Ninja by @psifertex
This script will verify that the report matches the workspace.
Check the log window for any errors, and/or the summary of changes.
Derived from: https://github.com/fireeye/capa/blob/master/scripts/import-to-ida.py
"""
import os
import json
from binaryninja import *
def append_func_cmt(bv, va, cmt):
"""
add the given comment to the given function,
if it doesn't already exist.
"""
func = bv.get_function_at(va)
if not func:
raise ValueError("not a function")
if cmt in func.comment:
return
func.comment = func.comment + "\n" + cmt
def load_analysis(bv):
shortname = os.path.splitext(os.path.basename(bv.file.filename))[0]
dirname = os.path.dirname(bv.file.filename)
log_info(f"dirname: {dirname}\nshortname: {shortname}\n")
if os.access(os.path.join(dirname, shortname + ".js"), os.R_OK):
path = os.path.join(dirname, shortname + ".js")
elif os.access(os.path.join(dirname, shortname + ".json"), os.R_OK):
path = os.path.join(dirname, shortname + ".json")
else:
path = interaction.get_open_filename_input("capa report:", "JSON (*.js *.json);;All Files (*)")
if not path or not os.access(path, os.R_OK):
log_error("Invalid filename.")
return 0
log_info("Using capa file %s" % path)
with open(path, "rb") as f:
doc = json.loads(f.read().decode("utf-8"))
if "meta" not in doc or "rules" not in doc:
log_error("doesn't appear to be a capa report")
return -1
a = doc["meta"]["sample"]["md5"].lower()
md5 = Transform["MD5"]
rawhex = Transform["RawHex"]
b = rawhex.encode(md5.encode(bv.parent_view.read(bv.parent_view.start, bv.parent_view.end))).decode("utf-8")
if not a == b:
log_error("sample mismatch")
return -2
rows = []
for rule in doc["rules"].values():
if rule["meta"].get("lib"):
continue
if rule["meta"].get("capa/subscope"):
continue
if rule["meta"]["scope"] != "function":
continue
name = rule["meta"]["name"]
ns = rule["meta"].get("namespace", "")
for va in rule["matches"].keys():
va = int(va)
rows.append((ns, name, va))
# order by (namespace, name) so that like things show up together
rows = sorted(rows)
for ns, name, va in rows:
if ns:
cmt = "%s (%s)" % (name, ns)
else:
cmt = "%s" % (name,)
log_info("0x%x: %s" % (va, cmt))
try:
# message will look something like:
#
# capa: delete service (host-interaction/service/delete)
append_func_cmt(bv, va, "capa: " + cmt)
except ValueError:
continue
log_info("ok")
PluginCommand.register("Load capa file", "Loads an analysis file from capa", load_analysis)
|
the-stack_0_2879 | # Copyright 2017: GoDaddy Inc.
import collections
import datetime
import logging
import random
import threading
import futurist
import futurist.rejection
import monotonic
import requests
from netmet.utils import ping
from netmet.utils import pusher
from netmet.utils import secure
LOG = logging.getLogger(__name__)
class Collector(object):
pinger_failed_msg = "Pinger failed to ping"
def __init__(self, netmet_server, client_host, tasks):
self.client_host = client_host
self.tasks = tasks
self.pusher = None
if netmet_server:
netmet_server = netmet_server.rstrip("/")
self.pusher = pusher.Pusher("%s/api/v1/metrics" % netmet_server,
extra_headers=secure.gen_hmac_headers)
self.lock = threading.Lock()
self.queue = collections.deque()
self.death = threading.Event()
self.started = False
self.main_thread = None
self.processing_thread = None
def gen_periodic_ping(self, task):
ip = (task["north-south"]["dest"] if "north-south" in task else
task["east-west"]["dest"]["ip"])
settings = task[task.keys()[0]]["settings"]
pinger = ping.Ping(ip, timeout=settings["timeout"],
packet_size=settings["packet_size"])
def ping_():
try:
result = pinger.ping()
metric = {
"client_src": self.client_host,
"protocol": "icmp",
"timestamp": result["timestamp"],
"latency": result["rtt"],
"packet_size": result["packet_size"],
"lost": int(bool(result["ret_code"])),
"transmitted": int(not bool(result["ret_code"])),
"ret_code": result["ret_code"]
}
if "north-south" in task:
metric["dest"] = task["north-south"]["dest"]
self.queue.append({"north-south": metric})
else:
metric["client_dest"] = task["east-west"]["dest"]
self.queue.append({"east-west": metric})
except Exception:
LOG.exception(self.pinger_failed_msg)
return ping_
def gen_periodic_http_ping(self, task):
def http_ping():
try:
started_at = monotonic.monotonic()
metric = {
"client_src": self.client_host,
"protocol": "http",
"timestamp": datetime.datetime.now().isoformat(),
"packet_size": 0,
"latency": 0,
"lost": 1,
"transmitted": 0,
"ret_code": 504
}
settings = task[task.keys()[0]]["settings"]
if "east-west" in task:
dest = task["east-west"]["dest"]
metric["client_dest"] = dest
dest = "http://%s:%s" % (dest["host"], dest["port"])
else:
dest = task["north-south"]["dest"]
metric["dest"] = dest
r = requests.get(dest, timeout=settings["timeout"])
metric.update({
"latency": (monotonic.monotonic() - started_at) * 1000,
"packet_size": len(r.content),
"lost": int(r.status_code != 200),
"transmitted": int(r.status_code == 200),
"ret_code": r.status_code
})
except requests.exceptions.ConnectionError:
pass
except Exception:
LOG.exception("Collector failed to call another clinet API")
finally:
type_ = "east-west" if "east-west" in task else "north-south"
self.queue.append({type_: metric})
return http_ping
def process_results(self):
while self.queue or not self.death.is_set():
while self.queue:
item = self.queue.popleft()
if self.pusher:
self.pusher.add(item) # push to netmet server data
else:
print(item) # netmet client standalone mode
self.death.wait(0.1)
def _job_per_period(self, callables, period):
def helper():
delay = period / float(len(callables))
pool = futurist.ThreadPoolExecutor(
max_workers=50,
check_and_reject=futurist.rejection.reject_when_reached(50))
with pool:
while not self.death.is_set():
for item in callables:
while not self.death.is_set():
try:
pool.submit(item)
break
except futurist.RejectedSubmission:
LOG.warning("Collector: Feed me! Mre threads!")
self.death.wait(delay)
self.death.wait(delay)
# up to 0.1 second delay between runs of tasks
self.death.wait(random.random() * min(delay, 1) / 10.0)
return helper
def _job(self):
generators = {
"icmp": self.gen_periodic_ping,
"http": self.gen_periodic_http_ping
}
period_tasks = {}
for task in self.tasks:
task_data = task.values()[0]
period_ = task_data["settings"]["period"]
protocol = task_data["protocol"]
period_tasks.setdefault(period_, [])
if protocol in generators:
period_tasks[period_].append(generators[protocol](task))
else:
LOG.warning("Allowed protocols are: %s" % generators.keys())
pool = futurist.ThreadPoolExecutor(max_workers=len(period_tasks))
with pool:
min_period = min(period_tasks)
min_lag = float(min_period) / len(period_tasks[min_period])
lag = min(min_lag / len(period_tasks), 1)
LOG.info(period_tasks)
for period, callables in period_tasks.iteritems():
pool.submit(self._job_per_period(callables, period))
self.death.wait(lag)
def start(self):
with self.lock:
if not self.started:
self.started = True
self.death = threading.Event()
else:
return
if self.pusher:
self.pusher.start()
self.main_thread = threading.Thread(target=self._job)
self.main_thread.daemon = True
self.main_thread.start()
self.processing_thread = threading.Thread(target=self.process_results)
self.processing_thread.deamon = True
self.processing_thread.start()
return True
def stop(self):
with self.lock:
if self.started and not self.death.is_set():
self.death.set()
self.main_thread.join()
self.processing_thread.join()
if self.pusher:
self.pusher.stop()
self.started = False
|
the-stack_0_2881 | """
This module provides means of connecting to a QCoDeS database file and
initialising it. Note that connecting/initialisation take into account
database version and possibly perform database upgrades.
"""
import io
import sqlite3
import sys
from contextlib import contextmanager
from os.path import expanduser, normpath
from typing import Union, Iterator, Tuple, Optional
import numpy as np
from numpy import ndarray
from qcodes.dataset.sqlite.connection import ConnectionPlus
from qcodes.dataset.sqlite.db_upgrades import _latest_available_version, \
get_user_version, perform_db_upgrade
from qcodes.dataset.sqlite.initial_schema import init_db
import qcodes.config
from qcodes.utils.types import complex_types, complex_type_union
# utility function to allow sqlite/numpy type
def _adapt_array(arr: ndarray) -> sqlite3.Binary:
"""
See this:
https://stackoverflow.com/questions/3425320/sqlite3-programmingerror-you-must-not-use-8-bit-bytestrings-unless-you-use-a-te
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def _convert_array(text: bytes) -> ndarray:
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
def _convert_complex(text: bytes) -> complex_type_union:
out = io.BytesIO(text)
out.seek(0)
return np.load(out)[0]
this_session_default_encoding = sys.getdefaultencoding()
def _convert_numeric(value: bytes) -> Union[float, int, str]:
"""
This is a converter for sqlite3 'numeric' type class.
This converter is capable of deducting whether a number is a float or an
int.
Note sqlite3 allows to save data to columns even if their type is not
compatible with the table type class (for example, it is possible to save
integers into 'text' columns). Due to this fact, and for the reasons of
flexibility, the numeric converter is also made capable of handling
strings. An obvious exception to this is 'nan' (case insensitive) which
gets converted to `np.nan`. Another exception to this is 'inf', which
gets converted to 'np.inf'.
"""
try:
# First, try to convert bytes to float
numeric = float(value)
except ValueError as e:
# If an exception has been raised, we first need to find out
# if the reason was the conversion to float, and, if so, we are sure
# that we need to return a string
if "could not convert string to float" in str(e):
return str(value, encoding=this_session_default_encoding)
else:
# otherwise, the exception is forwarded up the stack
raise e
# If that worked, e.g. did not raise an exception, then we check if the
# outcome is 'nan'
if np.isnan(numeric):
return numeric
# Then we check if the outcome is 'inf', includes +inf and -inf
if np.isinf(numeric):
return numeric
# If it is not 'nan' and not 'inf', then we need to see if the value is
# really an integer or with floating point digits
numeric_int = int(numeric)
if numeric != numeric_int:
return numeric
else:
return numeric_int
def _adapt_float(fl: float) -> Union[float, str]:
if np.isnan(fl):
return "nan"
return float(fl)
def _adapt_complex(value: complex_type_union) -> sqlite3.Binary:
out = io.BytesIO()
np.save(out, np.array([value]))
out.seek(0)
return sqlite3.Binary(out.read())
def connect(name: str, debug: bool = False,
version: int = -1) -> ConnectionPlus:
"""
Connect or create database. If debug the queries will be echoed back.
This function takes care of registering the numpy/sqlite type
converters that we need.
Args:
name: name or path to the sqlite file
debug: whether or not to turn on tracing
version: which version to create. We count from 0. -1 means 'latest'.
Should always be left at -1 except when testing.
Returns:
conn: connection object to the database (note, it is
`ConnectionPlus`, not `sqlite3.Connection`
"""
# register numpy->binary(TEXT) adapter
# the typing here is ignored due to what we think is a flaw in typeshed
# see https://github.com/python/typeshed/issues/2429
sqlite3.register_adapter(np.ndarray, _adapt_array)
# register binary(TEXT) -> numpy converter
# for some reasons mypy complains about this
sqlite3.register_converter("array", _convert_array)
sqlite3_conn = sqlite3.connect(name, detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=True)
conn = ConnectionPlus(sqlite3_conn)
latest_supported_version = _latest_available_version()
db_version = get_user_version(conn)
if db_version > latest_supported_version:
raise RuntimeError(f"Database {name} is version {db_version} but this "
f"version of QCoDeS supports up to "
f"version {latest_supported_version}")
# sqlite3 options
conn.row_factory = sqlite3.Row
# Make sure numpy ints and floats types are inserted properly
for numpy_int in [
np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64
]:
sqlite3.register_adapter(numpy_int, int)
sqlite3.register_converter("numeric", _convert_numeric)
for numpy_float in [np.float, np.float16, np.float32, np.float64]:
sqlite3.register_adapter(numpy_float, _adapt_float)
for complex_type in complex_types:
sqlite3.register_adapter(complex_type, _adapt_complex)
sqlite3.register_converter("complex", _convert_complex)
if debug:
conn.set_trace_callback(print)
init_db(conn)
perform_db_upgrade(conn, version=version)
return conn
def get_db_version_and_newest_available_version(path_to_db: str) -> Tuple[int,
int]:
"""
Connect to a DB without performing any upgrades and get the version of
that database file along with the newest available version (the one that
a normal "connect" will automatically upgrade to)
Args:
path_to_db: the absolute path to the DB file
Returns:
A tuple of (db_version, latest_available_version)
"""
conn = connect(path_to_db, version=0)
db_version = get_user_version(conn)
return db_version, _latest_available_version()
def get_DB_location() -> str:
return normpath(expanduser(qcodes.config["core"]["db_location"]))
def get_DB_debug() -> bool:
return bool(qcodes.config["core"]["db_debug"])
def initialise_database(journal_mode: Optional[str] = 'WAL') -> None:
"""
Initialise a database in the location specified by the config object
and set ``atomic commit and rollback mode`` of the db. The db is created
with the latest supported version. If the database already exists the
``atomic commit and rollback mode`` is set and the database is upgraded
to the latest version.
Args:
journal_mode: Which `journal_mode` should be used for atomic commit and rollback.
Options are DELETE, TRUNCATE, PERSIST, MEMORY, WAL and OFF. If set to None
no changes are made.
"""
# calling connect performs all the needed actions to create and upgrade
# the db to the latest version.
conn = connect(get_DB_location(), get_DB_debug())
if journal_mode is not None:
set_journal_mode(conn, journal_mode)
conn.close()
del conn
def set_journal_mode(conn: ConnectionPlus, journal_mode: str) -> None:
"""
Set the ``atomic commit and rollback mode`` of the sqlite database.
See https://www.sqlite.org/pragma.html#pragma_journal_mode for details.
Args:
conn: Connection to the database.
journal_mode: Which `journal_mode` should be used for atomic commit and rollback.
Options are DELETE, TRUNCATE, PERSIST, MEMORY, WAL and OFF. If set to None
no changes are made.
"""
valid_journal_modes = ["DELETE", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "OFF"]
if journal_mode not in valid_journal_modes:
raise RuntimeError(f"Invalid journal_mode {journal_mode} "
f"Valid modes are {valid_journal_modes}")
query = f"PRAGMA journal_mode={journal_mode};"
cursor = conn.cursor()
cursor.execute(query)
def initialise_or_create_database_at(db_file_with_abs_path: str,
journal_mode: Optional[str] = 'WAL') -> None:
"""
This function sets up QCoDeS to refer to the given database file. If the
database file does not exist, it will be initiated.
Args:
db_file_with_abs_path
Database file name with absolute path, for example
``C:\\mydata\\majorana_experiments.db``
journal_mode: Which `journal_mode` should be used for atomic commit and rollback.
Options are DELETE, TRUNCATE, PERSIST, MEMORY, WAL and OFF. If set to None
no changes are made.
"""
qcodes.config.core.db_location = db_file_with_abs_path
initialise_database(journal_mode)
@contextmanager
def initialised_database_at(db_file_with_abs_path: str) -> Iterator[None]:
"""
Initializes or creates a database and restores the 'db_location' afterwards.
Args:
db_file_with_abs_path
Database file name with absolute path, for example
``C:\\mydata\\majorana_experiments.db``
"""
db_location = qcodes.config["core"]["db_location"]
try:
initialise_or_create_database_at(db_file_with_abs_path)
yield
finally:
qcodes.config["core"]["db_location"] = db_location
def conn_from_dbpath_or_conn(conn: Optional[ConnectionPlus],
path_to_db: Optional[str]) \
-> ConnectionPlus:
"""
A small helper function to abstract the logic needed for functions
that take either a `ConnectionPlus` or the path to a db file.
If neither is given this will fall back to the default db location.
It is an error to supply both.
Args:
conn: A ConnectionPlus object pointing to a sqlite database
path_to_db: The path to a db file.
Returns:
A `ConnectionPlus` object
"""
if path_to_db is not None and conn is not None:
raise ValueError('Received BOTH conn and path_to_db. Please '
'provide only one or the other.')
if conn is None and path_to_db is None:
path_to_db = get_DB_location()
if conn is None and path_to_db is not None:
conn = connect(path_to_db, get_DB_debug())
elif conn is not None:
conn = conn
else:
# this should be impossible but left here to keep mypy happy.
raise RuntimeError("Could not obtain a connection from"
"supplied information.")
return conn
|
the-stack_0_2882 | """ foxtail/clinics/tests/test_models.py """
import pytest
from .factories import ClinicFactory
pytestmark = pytest.mark.django_db
def test_get_organization():
clinic = ClinicFactory()
org = clinic.organization
assert clinic.get_organization() == org.name
|
the-stack_0_2883 | from itertools import chain
import multiprocessing as mp
try:
from multiprocessing import SimpleQueue as MPQueue
except ImportError:
from multiprocessing.queues import SimpleQueue as MPQueue
import os
import threading
from ddtrace import Span
from ddtrace import tracer
from ddtrace.internal import _rand
from ddtrace.internal import forksafe
from ddtrace.internal.compat import Queue
def test_random():
m = set()
for i in range(0, 2 ** 16):
n = _rand.rand64bits()
assert 0 <= n <= 2 ** 64 - 1
assert n not in m
m.add(n)
def test_fork_no_pid_check():
q = MPQueue()
pid = os.fork()
# Generate random numbers in the parent and child processes after forking.
# The child sends back their numbers to the parent where we check to see
# if we get collisions or not.
if pid > 0:
# parent
rns = {_rand.rand64bits() for _ in range(100)}
child_rns = q.get()
assert rns & child_rns == set()
else:
# child
try:
rngs = {_rand.rand64bits() for _ in range(100)}
q.put(rngs)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def test_fork_pid_check():
q = MPQueue()
pid = os.fork()
# Generate random numbers in the parent and child processes after forking.
# The child sends back their numbers to the parent where we check to see
# if we get collisions or not.
if pid > 0:
# parent
rns = {_rand.rand64bits() for _ in range(100)}
child_rns = q.get()
assert rns & child_rns == set()
else:
# child
try:
rngs = {_rand.rand64bits() for _ in range(100)}
q.put(rngs)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def _test_multiprocess_target(q):
assert sum((_ is _rand.seed for _ in forksafe._registry)) == 1
q.put([_rand.rand64bits() for _ in range(100)])
def test_multiprocess():
q = MPQueue()
ps = [mp.Process(target=_test_multiprocess_target, args=(q,)) for _ in range(30)]
for p in ps:
p.start()
for p in ps:
p.join()
assert p.exitcode == 0
ids_list = [_rand.rand64bits() for _ in range(1000)]
ids = set(ids_list)
assert len(ids_list) == len(ids), "Collisions found in ids"
while not q.empty():
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids_list) == len(child_ids), "Collisions found in subprocess ids"
assert ids & child_ids == set()
ids = ids | child_ids # accumulate the ids
def _test_threadsafe_target(q):
# Generate a bunch of numbers to try to maximize the chance that
# two threads will be calling rand64bits at the same time.
rngs = [_rand.rand64bits() for _ in range(200000)]
q.put(rngs)
def test_threadsafe():
# Check that the PRNG is thread-safe.
# This obviously won't guarantee thread safety, but it's something
# at least.
# To provide some validation of this method I wrote a slow, unsafe RNG:
#
# state = 4101842887655102017
#
# def bad_random():
# global state
# state ^= state >> 21
# state ^= state << 35
# state ^= state >> 4
# return state * 2685821657736338717
#
# which consistently fails this test.
q = Queue()
ts = [threading.Thread(target=_test_threadsafe_target, args=(q,)) for _ in range(5)]
for t in ts:
t.start()
for t in ts:
t.join()
ids = set()
while not q.empty():
new_ids_list = q.get()
new_ids = set(new_ids_list)
assert len(new_ids) == len(new_ids_list), "Collision found in ids"
assert ids & new_ids == set()
ids = ids | new_ids
assert len(ids) > 0
def test_tracer_usage_fork():
q = MPQueue()
pid = os.fork()
# Similar test to test_fork() above except we use the tracer API.
# In this case we expect to never have collisions.
if pid > 0:
# parent
parent_ids_list = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
)
parent_ids = set(parent_ids_list)
assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
assert parent_ids & child_ids == set()
else:
# child
try:
child_ids = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
)
q.put(child_ids)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def _test_tracer_usage_multiprocess_target(q):
ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(10)]))
q.put(ids_list)
def test_tracer_usage_multiprocess():
q = MPQueue()
# Similar to test_multiprocess(), ensures that no collisions are
# generated between parent and child processes while using
# multiprocessing.
# Note that we have to be wary of the size of the underlying
# pipe in the queue: https://bugs.python.org/msg143081
ps = [mp.Process(target=_test_tracer_usage_multiprocess_target, args=(q,)) for _ in range(30)]
for p in ps:
p.start()
for p in ps:
p.join()
ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)]))
ids = set(ids_list)
assert len(ids) == len(ids_list), "Collisions found in ids"
while not q.empty():
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in subprocess ids"
assert ids & child_ids == set()
ids = ids | child_ids # accumulate the ids
def test_span_api_fork():
q = MPQueue()
pid = os.fork()
if pid > 0:
# parent
parent_ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None) for _ in range(100)]))
parent_ids = set(parent_ids_list)
assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
assert parent_ids & child_ids == set()
else:
# child
try:
child_ids = list(chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None) for _ in range(100)]))
q.put(child_ids)
finally:
os._exit(0)
|
the-stack_0_2884 | import unittest
class Test(unittest.TestCase):
def test(self):
# docs checkpoint 0
import numpy as np
import openmdao.api as om
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
from openmdao.utils.assert_utils import assert_near_equal
from openaerostruct.utils.testing import assert_check_totals
# Create a dictionary to store options about the mesh
mesh_dict = {"num_y": 7, "num_x": 2, "wing_type": "CRM", "symmetry": True, "num_twist_cp": 5}
# Generate the aerodynamic mesh based on the previous dictionary
mesh, twist_cp = generate_mesh(mesh_dict)
# Create a dictionary with info and options about the aerodynamic
# lifting surface
surface = {
# Wing definition
"name": "wing", # name of the surface
"symmetry": True, # if true, model one half of wing
# reflected across the plane y = 0
"groundplane": True,
"S_ref_type": "wetted", # how we compute the wing area,
# can be 'wetted' or 'projected'
"fem_model_type": "tube",
"twist_cp": twist_cp,
"mesh": mesh,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
"CL0": 0.0, # CL of the surface at alpha=0
"CD0": 0.015, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
"k_lam": 0.05, # percentage of chord with laminar
# flow, used for viscous drag
"t_over_c_cp": np.array([0.15]), # thickness over chord ratio (NACA0015)
"c_max_t": 0.303, # chordwise location of maximum (NACA0015)
# thickness
"with_viscous": True, # if true, compute viscous drag
"with_wave": False, # if true, compute wave drag
}
# Create the OpenMDAO problem
prob = om.Problem()
# Create an independent variable component that will supply the flow
# conditions to the problem.
indep_var_comp = om.IndepVarComp()
indep_var_comp.add_output("v", val=248.136, units="m/s")
indep_var_comp.add_output("alpha", val=5.0, units="deg")
indep_var_comp.add_output("Mach_number", val=0.84)
indep_var_comp.add_output("re", val=1.0e6, units="1/m")
indep_var_comp.add_output("rho", val=0.38, units="kg/m**3")
indep_var_comp.add_output("cg", val=np.zeros((3)), units="m")
indep_var_comp.add_output("height_agl", val=8000.0, units="m")
# Add this IndepVarComp to the problem model
prob.model.add_subsystem("prob_vars", indep_var_comp, promotes=["*"])
# Create and add a group that handles the geometry for the
# aerodynamic lifting surface
geom_group = Geometry(surface=surface)
prob.model.add_subsystem(surface["name"], geom_group)
# Create the aero point group, which contains the actual aerodynamic
# analyses
aero_group = AeroPoint(surfaces=[surface])
point_name = "aero_point_0"
prob.model.add_subsystem(
point_name, aero_group, promotes_inputs=["v", "alpha", "Mach_number", "re", "rho", "cg", "height_agl"]
)
name = surface["name"]
# Connect the mesh from the geometry component to the analysis point
prob.model.connect(name + ".mesh", point_name + "." + name + ".def_mesh")
# Perform the connections with the modified names within the
# 'aero_states' group.
prob.model.connect(name + ".mesh", point_name + ".aero_states." + name + "_def_mesh")
prob.model.connect(name + ".t_over_c", point_name + "." + name + "_perf." + "t_over_c")
# Import the Scipy Optimizer and set the driver of the problem to use
# it, which defaults to an SLSQP optimization method
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options["tol"] = 1e-9
recorder = om.SqliteRecorder("aero.db")
prob.driver.add_recorder(recorder)
prob.driver.recording_options["record_derivatives"] = True
prob.driver.recording_options["includes"] = ["*"]
# Setup problem and add design variables, constraint, and objective
prob.model.add_design_var("height_agl", lower=10.0, upper=8000.0)
prob.model.add_design_var("wing.twist_cp", lower=-10.0, upper=15.0)
prob.model.add_constraint(point_name + ".wing_perf.CL", equals=0.5)
prob.model.add_objective(point_name + ".wing_perf.CD", scaler=1e4)
# Set up and run the optimization problem
prob.setup()
prob.run_driver()
# docs checkpoint 1
assert_near_equal(prob["aero_point_0.wing_perf.CD"][0], 0.033389699871650073, 1e-6)
assert_near_equal(prob["aero_point_0.wing_perf.CL"][0], 0.5, 1e-6)
assert_near_equal(prob["aero_point_0.CM"][1], -1.7885550372372376, 1e-6)
prob["height_agl"] = 10.0
prob.run_driver()
assert_near_equal(prob["aero_point_0.wing_perf.CD"][0], 0.029145613948518813, 1e-6)
assert_near_equal(prob["aero_point_0.wing_perf.CL"][0], 0.5, 1e-6)
assert_near_equal(prob["aero_point_0.CM"][1], -1.7719184423417516, 1e-6)
totals = prob.check_totals(
of=["aero_point_0.wing_perf.CD", "aero_point_0.wing_perf.CL"],
wrt=["wing.twist_cp", "height_agl"],
compact_print=True,
out_stream=None,
)
assert_check_totals(totals, atol=1e-2, rtol=1e-5)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_2886 | from unit_test_common import execute_csv2_command, initialize_csv2_request, ut_id, sanity_commands
from sys import argv
# lno: CV - error code identifier.
def main(gvar):
if not gvar:
gvar = {}
if len(argv) > 1:
initialize_csv2_request(gvar, selections=argv[1])
else:
initialize_csv2_request(gvar)
# 01 - 14
sanity_commands(gvar, 'cloud', 'metadata-list')
# 15
execute_csv2_command(
gvar, 1, None, 'The following command line arguments were invalid: metadata-mime-type',
['cloud', 'metadata-list', '-mmt', 'invalid-unit-test', '-g', ut_id(gvar, 'clg1'), '-su', ut_id(gvar, 'clu3')]
)
# 16
execute_csv2_command(
gvar, 0, None, None,
['cloud', 'metadata-list', '-NV', '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata', expected_columns={'Group', 'Cloud', 'Metadata Filename', 'Enabled', 'Priority', 'MIME Type'}
)
# 17
execute_csv2_command(
gvar, 0, None, 'Rows: 0',
['cloud', 'metadata-list', '--cloud-name', 'valid-unit-test', '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata'
)
# 18
execute_csv2_command(
gvar, 0, None, 'Rows: 0',
['cloud', 'metadata-list', '--metadata-name', 'valid-unit-test', '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata'
)
# 19
execute_csv2_command(
gvar, 0, None, 'Server: unit-test, Active User: {}, Active Group: {}'.format(ut_id(gvar, 'clu3'), ut_id(gvar, 'clg1')),
['cloud', 'metadata-list', '--cloud-name', ut_id(gvar, 'clc2'), '--metadata-name', ut_id(gvar, 'clm2'), '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata'
)
# 20
execute_csv2_command(
gvar, 0, None, 'Rows: 1',
['cloud', 'metadata-list', '--cloud-name', ut_id(gvar, 'clc2'), '--metadata-name', ut_id(gvar, 'clm2'), '-su', ut_id(gvar, 'clu3')],
expected_list='Clouds/Metadata'
)
# 21
execute_csv2_command(
gvar, 0, None, 'cloud metadata-list, 1. Clouds/Metadata: keys=group_name,cloud_name,metadata_name, columns=enabled,priority,mime_type',
['cloud', 'metadata-list', '--view-columns', '-su', ut_id(gvar, 'clu3')]
)
if __name__ == "__main__":
main(None)
|
the-stack_0_2887 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.compat import expanduser
from pip.download import path_to_url, unpack_url
from pip.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks,
)
from pip.utils.ui import open_spinner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = canonicalize_name(package_name)
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in {errno.ENOENT, errno.ENOTDIR}:
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path))
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
# get the entry points and then the script names
entry_points = pkg_resources.EntryPoint.parse_map(data)
console = entry_points.get('console_scripts', {})
gui = entry_points.get('gui_scripts', {})
def _split_ep(s):
"""get the string representation of EntryPoint, remove space and split
on '='"""
return str(s).replace(" ", "").split("=")
# convert the EntryPoint objects into strings with module:function
console = dict(_split_ep(v) for v in console.values())
gui = dict(_split_ep(v) for v in gui.values())
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False, prefix=None):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = r"""# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
# Record pip as the installer
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((normpath(f, lib_dir), h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir, python_tag=None):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd, python_tag=python_tag):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
finally:
rmtree(tempd)
def _base_setup_args(self, req):
return [
sys.executable, "-u", '-c',
SETUPTOOLS_SHIM % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
return True
except:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif autobuilding and req.editable:
pass
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
|
the-stack_0_2888 | # encoding: utf-8
import datetime
import logging
from sqlalchemy.sql import and_, or_
from sqlalchemy import orm, types, Column, Table, ForeignKey
from ckan.common import config
from ckan.model import (
meta,
core,
license as _license,
types as _types,
domain_object,
activity,
extension,
)
import ckan.lib.maintain as maintain
logger = logging.getLogger(__name__)
__all__ = ['Package', 'package_table', 'PackageMember', 'package_member_table',
'PACKAGE_NAME_MAX_LENGTH', 'PACKAGE_NAME_MIN_LENGTH',
'PACKAGE_VERSION_MAX_LENGTH',
]
PACKAGE_NAME_MAX_LENGTH = 100
PACKAGE_NAME_MIN_LENGTH = 2
PACKAGE_VERSION_MAX_LENGTH = 100
# Our Domain Object Tables
package_table = Table('package', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('name', types.Unicode(PACKAGE_NAME_MAX_LENGTH),
nullable=False, unique=True),
Column('title', types.UnicodeText, doc='remove_if_not_provided'),
Column('version', types.Unicode(PACKAGE_VERSION_MAX_LENGTH),
doc='remove_if_not_provided'),
Column('url', types.UnicodeText, doc='remove_if_not_provided'),
Column('author', types.UnicodeText, doc='remove_if_not_provided'),
Column('author_email', types.UnicodeText, doc='remove_if_not_provided'),
Column('maintainer', types.UnicodeText, doc='remove_if_not_provided'),
Column('maintainer_email', types.UnicodeText, doc='remove_if_not_provided'),
Column('notes', types.UnicodeText, doc='remove_if_not_provided'),
Column('license_id', types.UnicodeText, doc='remove_if_not_provided'),
Column('type', types.UnicodeText, default=u'dataset'),
Column('owner_org', types.UnicodeText),
Column('creator_user_id', types.UnicodeText),
Column('metadata_created', types.DateTime, default=datetime.datetime.utcnow),
Column('metadata_modified', types.DateTime, default=datetime.datetime.utcnow),
Column('private', types.Boolean, default=False),
Column('state', types.UnicodeText, default=core.State.ACTIVE),
)
package_member_table = Table(
'package_member',
meta.metadata,
Column('package_id', ForeignKey('package.id'), primary_key=True),
Column('user_id', ForeignKey('user.id'), primary_key = True),
Column('capacity', types.UnicodeText, nullable=False),
Column('modified', types.DateTime, default=datetime.datetime.utcnow),
)
## -------------------
## Mapped classes
class Package(core.StatefulObjectMixin,
domain_object.DomainObject):
text_search_fields = ['name', 'title']
def __init__(self, **kw):
from ckan import model
super(Package, self).__init__(**kw)
@classmethod
def search_by_name(cls, text_query):
text_query = text_query
return meta.Session.query(cls).filter(cls.name.contains(text_query.lower()))
@classmethod
def get(cls, reference, for_update=False):
'''Returns a package object referenced by its id or name.'''
if not reference:
return None
q = meta.Session.query(cls)
if for_update:
q = q.with_for_update()
pkg = q.get(reference)
if pkg == None:
pkg = cls.by_name(reference, for_update=for_update)
return pkg
# Todo: Make sure package names can't be changed to look like package IDs?
@property
def resources(self):
return [resource for resource in
self.resources_all
if resource.state != 'deleted']
def related_packages(self):
return [self]
def add_resource(self, url, format=u'', description=u'', hash=u'', **kw):
from ckan.model import resource
self.resources_all.append(resource.Resource(
package_id=self.id,
url=url,
format=format,
description=description,
hash=hash,
**kw)
)
def add_tag(self, tag):
import ckan.model as model
if tag in self.get_tags(tag.vocabulary):
return
else:
package_tag = model.PackageTag(self, tag)
meta.Session.add(package_tag)
def add_tags(self, tags):
for tag in tags:
self.add_tag(tag)
def add_tag_by_name(self, tag_name, vocab=None, autoflush=True):
"""Add a tag with the given name to this package's tags.
By default the given tag_name will be searched for among the free tags
(tags which do not belong to any vocabulary) only. If the optional
argument `vocab` is given then the named vocab will be searched for the
tag name instead.
If no tag with the given name is found, one will be created. If the
optional argument vocab is given and there is no tag with the given
name in the given vocabulary, then a new tag will be created and added
to the vocabulary.
"""
from ckan.model.tag import Tag
if not tag_name:
return
# Get the named tag.
tag = Tag.by_name(tag_name, vocab=vocab, autoflush=autoflush)
if not tag:
# Tag doesn't exist yet, make a new one.
if vocab:
tag = Tag(name=tag_name, vocabulary_id=vocab.id)
else:
tag = Tag(name=tag_name)
assert tag is not None
self.add_tag(tag)
def get_tags(self, vocab=None):
"""Return a sorted list of this package's tags
Tags are sorted by their names.
"""
import ckan.model as model
query = meta.Session.query(model.Tag)
query = query.join(model.PackageTag)
query = query.filter(model.PackageTag.tag_id == model.Tag.id)
query = query.filter(model.PackageTag.package_id == self.id)
query = query.filter(model.PackageTag.state == 'active')
if vocab:
query = query.filter(model.Tag.vocabulary_id == vocab.id)
else:
query = query.filter(model.Tag.vocabulary_id == None)
query = query.order_by(model.Tag.name)
tags = query.all()
return tags
def remove_tag(self, tag):
import ckan.model as model
query = meta.Session.query(model.PackageTag)
query = query.filter(model.PackageTag.package_id == self.id)
query = query.filter(model.PackageTag.tag_id == tag.id)
package_tag = query.one()
package_tag.delete()
meta.Session.commit()
def isopen(self):
if self.license and self.license.isopen():
return True
return False
def get_average_rating(self):
total = 0
for rating in self.ratings:
total += rating.rating
if total == 0:
return None
else:
return total / len(self.ratings)
def as_dict(self, ref_package_by='name', ref_group_by='name'):
_dict = domain_object.DomainObject.as_dict(self)
# Set 'license' in _dict to cater for old clients.
# Todo: Remove from Version 2?
_dict['license'] = self.license.title if self.license else _dict.get('license_id', '')
_dict['isopen'] = self.isopen()
tags = [tag.name for tag in self.get_tags()]
tags.sort() # so it is determinable
_dict['tags'] = tags
groups = [getattr(group, ref_group_by) for group in self.get_groups()]
groups.sort()
_dict['groups'] = groups
_dict['extras'] = {key: value for key, value in self.extras.items()}
_dict['ratings_average'] = self.get_average_rating()
_dict['ratings_count'] = len(self.ratings)
_dict['resources'] = [res.as_dict(core_columns_only=False) \
for res in self.resources]
site_url = config.get('ckan.site_url', None)
if site_url:
_dict['ckan_url'] = '%s/dataset/%s' % (site_url, self.name)
_dict['relationships'] = [rel.as_dict(self, ref_package_by=ref_package_by) for rel in self.get_relationships()]
_dict['metadata_modified'] = self.metadata_modified.isoformat() \
if self.metadata_modified else None
_dict['metadata_created'] = self.metadata_created.isoformat() \
if self.metadata_created else None
import ckan.lib.helpers as h
_dict['notes_rendered'] = h.render_markdown(self.notes)
_dict['type'] = self.type or u'dataset'
return _dict
def add_relationship(self, type_, related_package, comment=u''):
'''Creates a new relationship between this package and a
related_package. It leaves the caller to commit the change.
Raises KeyError if the type_ is invalid.
'''
from ckan.model import package_relationship
if type_ in package_relationship.PackageRelationship.get_forward_types():
subject = self
object_ = related_package
direction = "forward"
elif type_ in package_relationship.PackageRelationship.get_reverse_types():
type_ = package_relationship.PackageRelationship.reverse_to_forward_type(type_)
assert type_
subject = related_package
object_ = self
direction = "reverse"
else:
raise KeyError('Package relationship type: %r' % type_)
rels = self.get_relationships(with_package=related_package,
type=type_, active=False, direction=direction)
if rels:
rel = rels[0]
if comment:
rel.comment=comment
if rel.state == core.State.DELETED:
rel.undelete()
else:
rel = package_relationship.PackageRelationship(
subject=subject,
object=object_,
type=type_,
comment=comment)
meta.Session.add(rel)
return rel
def get_relationships(self, with_package=None, type=None, active=True,
direction='both'):
'''Returns relationships this package has.
Keeps stored type/ordering (not from pov of self).'''
assert direction in ('both', 'forward', 'reverse')
if with_package:
assert isinstance(with_package, Package)
from ckan.model.package_relationship import PackageRelationship
forward_filters = [PackageRelationship.subject==self]
reverse_filters = [PackageRelationship.object==self]
if with_package:
forward_filters.append(PackageRelationship.object==with_package)
reverse_filters.append(PackageRelationship.subject==with_package)
if active:
forward_filters.append(PackageRelationship.state==core.State.ACTIVE)
reverse_filters.append(PackageRelationship.state==core.State.ACTIVE)
if type:
forward_filters.append(PackageRelationship.type==type)
reverse_type = PackageRelationship.reverse_type(type)
reverse_filters.append(PackageRelationship.type==reverse_type)
q = meta.Session.query(PackageRelationship)
if direction == 'both':
q = q.filter(or_(
and_(*forward_filters),
and_(*reverse_filters),
))
elif direction == 'forward':
q = q.filter(and_(*forward_filters))
elif direction == 'reverse':
q = q.filter(and_(*reverse_filters))
return q.all()
def get_relationships_with(self, other_package, type=None, active=True):
return self.get_relationships(with_package=other_package,
type=type,
active=active)
def get_relationships_printable(self):
'''Returns a list of tuples describing related packages, including
non-direct relationships (such as siblings).
@return: e.g. [(annakarenina, u"is a parent"), ...]
'''
from ckan.model.package_relationship import PackageRelationship
rel_list = []
for rel in self.get_relationships():
if rel.subject == self:
type_printable = PackageRelationship.make_type_printable(rel.type)
rel_list.append((rel.object, type_printable, rel.comment))
else:
type_printable = PackageRelationship.make_type_printable(\
PackageRelationship.forward_to_reverse_type(
rel.type)
)
rel_list.append((rel.subject, type_printable, rel.comment))
# sibling types
# e.g. 'gary' is a child of 'mum', looking for 'bert' is a child of 'mum'
# i.e. for each 'child_of' type relationship ...
for rel_as_subject in self.get_relationships(direction='forward'):
if rel_as_subject.state != core.State.ACTIVE:
continue
# ... parent is the object
parent_pkg = rel_as_subject.object
# Now look for the parent's other relationships as object ...
for parent_rel_as_object in parent_pkg.get_relationships(direction='reverse'):
if parent_rel_as_object.state != core.State.ACTIVE:
continue
# and check children
child_pkg = parent_rel_as_object.subject
if (child_pkg != self and
parent_rel_as_object.type == rel_as_subject.type and
child_pkg.state == core.State.ACTIVE):
type_printable = PackageRelationship.inferred_types_printable['sibling']
rel_list.append((child_pkg, type_printable, None))
return sorted(list(set(rel_list)))
#
## Licenses are currently integrated into the domain model here.
@classmethod
def get_license_register(cls):
if not hasattr(cls, '_license_register'):
cls._license_register = _license.LicenseRegister()
return cls._license_register
@classmethod
def get_license_options(cls):
register = cls.get_license_register()
return [(l.title, l.id) for l in register.values()]
def get_license(self):
if self.license_id:
try:
license = self.get_license_register()[self.license_id]
except KeyError:
license = None
else:
license = None
return license
def set_license(self, license):
if type(license) == _license.License:
self.license_id = license.id
elif type(license) == dict:
self.license_id = license['id']
else:
msg = "Value not a license object or entity: %s" % repr(license)
raise Exception(msg)
license = property(get_license, set_license)
@property
@maintain.deprecated('`is_private` attriute of model.Package is ' +
'deprecated and should not be used. Use `private`')
def is_private(self):
"""
DEPRECATED in 2.1
A package is private if belongs to any private groups
"""
return self.private
def is_in_group(self, group):
return group in self.get_groups()
def get_groups(self, group_type=None, capacity=None):
import ckan.model as model
# Gets [ (group, capacity,) ...]
groups = model.Session.query(model.Group,model.Member.capacity).\
join(model.Member, model.Member.group_id == model.Group.id and \
model.Member.table_name == 'package' ).\
join(model.Package, model.Package.id == model.Member.table_id).\
filter(model.Member.state == 'active').\
filter(model.Member.table_id == self.id).all()
caps = [g[1] for g in groups]
groups = [g[0] for g in groups ]
if group_type:
groups = [g for g in groups if g.type == group_type]
if capacity:
groupcaps = zip( groups,caps )
groups = [g[0] for g in groupcaps if g[1] == capacity]
return groups
@staticmethod
def get_fields(core_only=False, fields_to_ignore=None):
'''Returns a list of the properties of a package.
@param core_only - limit it to fields actually in the package table and
not those on related objects, such as tags & extras.
@param fields_to_ignore - a list of names of fields to not return if
present.
'''
# ['id', 'name', 'title', 'version', 'url', 'author', 'author_email', 'maintainer', 'maintainer_email', 'notes', 'license_id', 'state']
fields = Package.revisioned_fields()
if not core_only:
fields += ['resources', 'tags', 'groups', 'extras', 'relationships']
if fields_to_ignore:
for field in fields_to_ignore:
fields.remove(field)
return fields
def activity_stream_item(self, activity_type, user_id):
import ckan.model
import ckan.logic
assert activity_type in ("new", "changed"), (
str(activity_type))
# Handle 'deleted' objects.
# When the user marks a package as deleted this comes through here as
# a 'changed' package activity. We detect this and change it to a
# 'deleted' activity.
if activity_type == 'changed' and self.state == u'deleted':
if meta.Session.query(activity.Activity).filter_by(
object_id=self.id, activity_type='deleted').all():
# A 'deleted' activity for this object has already been emitted
# FIXME: What if the object was deleted and then activated
# again?
return None
else:
# Emit a 'deleted' activity for this object.
activity_type = 'deleted'
try:
# We save the entire rendered package dict so we can support
# viewing the past packages from the activity feed.
dictized_package = ckan.logic.get_action('package_show')({
'model': ckan.model,
'session': ckan.model.Session,
'for_view': False, # avoid ckanext-multilingual translating it
'ignore_auth': True
}, {
'id': self.id,
'include_tracking': False
})
except ckan.logic.NotFound:
# This happens if this package is being purged and therefore has no
# current revision.
# TODO: Purge all related activity stream items when a model object
# is purged.
return None
actor = meta.Session.query(ckan.model.User).get(user_id)
return activity.Activity(
user_id,
self.id,
"%s package" % activity_type,
{
'package': dictized_package,
# We keep the acting user name around so that actions can be
# properly displayed even if the user is deleted in the future.
'actor': actor.name if actor else None
}
)
def set_rating(self, user_or_ip, rating):
'''Record a user's rating of this package.
The caller function is responsible for doing the commit.
If a rating is outside the range MAX_RATING - MIN_RATING then a
RatingValueException is raised.
@param user_or_ip - user object or an IP address string
'''
user = None
from ckan.model.user import User
from ckan.model.rating import Rating, MAX_RATING, MIN_RATING
if isinstance(user_or_ip, User):
user = user_or_ip
rating_query = meta.Session.query(Rating)\
.filter_by(package=self, user=user)
else:
ip = user_or_ip
rating_query = meta.Session.query(Rating)\
.filter_by(package=self, user_ip_address=ip)
try:
rating = float(rating)
except TypeError:
raise RatingValueException
except ValueError:
raise RatingValueException
if rating > MAX_RATING or rating < MIN_RATING:
raise RatingValueException
if rating_query.count():
rating_obj = rating_query.first()
rating_obj.rating = rating
elif user:
rating = Rating(package=self,
user=user,
rating=rating)
meta.Session.add(rating)
else:
rating = Rating(package=self,
user_ip_address=ip,
rating=rating)
meta.Session.add(rating)
@property
@maintain.deprecated()
def extras_list(self):
'''DEPRECATED in 2.9
Returns a list of the dataset's extras, as PackageExtra object
NB includes deleted ones too (state='deleted')
'''
from ckan.model.package_extra import PackageExtra
return meta.Session.query(PackageExtra) \
.filter_by(package_id=self.id) \
.all()
class PackageMember(domain_object.DomainObject):
pass
class RatingValueException(Exception):
pass
# import here to prevent circular import
from ckan.model import tag
meta.mapper(Package, package_table, properties={
# delete-orphan on cascade does NOT work!
# Why? Answer: because of way SQLAlchemy/our code works there are points
# where PackageTag object is created *and* flushed but does not yet have
# the package_id set (this cause us other problems ...). Some time later a
# second commit happens in which the package_id is correctly set.
# However after first commit PackageTag does not have Package and
# delete-orphan kicks in to remove it!
'package_tags':orm.relation(tag.PackageTag, backref='package',
cascade='all, delete', #, delete-orphan',
),
},
order_by=package_table.c.name,
extension=[extension.PluginMapperExtension()],
)
meta.mapper(tag.PackageTag, tag.package_tag_table, properties={
'pkg':orm.relation(Package, backref='package_tag_all',
cascade='none',
)
},
order_by=tag.package_tag_table.c.id,
extension=[extension.PluginMapperExtension()],
)
meta.mapper(PackageMember, package_member_table)
|
the-stack_0_2889 | # Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
from torch.distributed._sharded_tensor import (
shard_parameter,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedEmbedding(ShardedTensorTestBase):
def _run_sharded_embedding(
self,
spec,
input_size,
num_embeddings,
embedding_dim,
sharded_dim=None,
max_norm=None,
norm_type=2.0,
padding_idx=None,
):
# Use same seed.
torch.manual_seed(0)
local_embedding = torch.nn.Embedding(
num_embeddings,
embedding_dim,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
).cuda(self.rank)
sharded_embedding = torch.nn.Embedding(
num_embeddings,
embedding_dim,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
# Copy the weights from local embedding
sharded_embedding.weight = torch.nn.Parameter(
local_embedding.weight.detach().clone()
)
# Shard the parameter.
shard_parameter(sharded_embedding, "weight", spec)
# Run sharded computation
torch.manual_seed(self.rank) # inputs different on each rank
inp = torch.randint(0, num_embeddings, tuple(input_size)).cuda(self.rank)
sharded_output = sharded_embedding(inp)
# If max_norm is set, we need to ensure that the renorm has been applied across
# inputs from all ranks.
if max_norm is not None:
gathered_inputs = [torch.zeros_like(inp) for _ in range(TEST_GPU_NUM)]
dist.all_gather(gathered_inputs, inp)
unique_inp = torch.unique(torch.cat(gathered_inputs))
local_embedding(unique_inp)
# Run local computation
local_output = local_embedding(inp)
# Compare local weight and shared one to ensure the renorm
# as expected.
if max_norm is not None:
sharded_weight = sharded_embedding.weight.local_shards()[0].tensor
(start_pos, chunk_size) = generate_local_weight_sharding_params_for_test(
local_embedding.weight, sharded_dim, TEST_GPU_NUM, spec, self.rank
)
local_weight_narrowed = local_embedding.weight.narrow(
sharded_dim, start_pos, chunk_size
)
self.assertEqual(local_weight_narrowed, sharded_weight)
# Verify
self.assertEqual(local_output, sharded_output)
# Validate for torch.nn.functional.embedding version.
local_output = torch.nn.functional.embedding(
inp,
local_embedding.weight,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
sharded_output = torch.nn.functional.embedding(
inp,
sharded_embedding.weight,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
self.assertEqual(local_output, sharded_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_colwise(self):
for spec in generate_chunk_sharding_specs_for_test(1):
self._run_sharded_embedding(spec, [5, 4], 17, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13)
self._run_sharded_embedding(spec, [8, 6, 5, 4, 7], 23, 16)
self._run_sharded_embedding(spec, [4], 15, 14)
self._run_sharded_embedding(spec, [34], 15, 14, padding_idx=10)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13, padding_idx=12)
self._run_sharded_embedding(
spec, [4, 5, 6], 23, 13, max_norm=2.5, sharded_dim=1
)
self._run_sharded_embedding(
spec, [12, 7, 16], 23, 13, max_norm=2.5, sharded_dim=1
)
self._run_sharded_embedding(
spec, [8, 16, 20], 12, 12, max_norm=1.25, norm_type=1.0, sharded_dim=1
)
self._run_sharded_embedding(spec, [30], 15, 14, max_norm=2.0, sharded_dim=1)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_rowwise(self):
for spec in generate_chunk_sharding_specs_for_test(0):
# Test even split.
self._run_sharded_embedding(spec, [5, 12], 16, 22)
self._run_sharded_embedding(spec, [5, 4], 32, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11)
self._run_sharded_embedding(
spec, [5, 12], 16, 22, max_norm=2.5, sharded_dim=0
)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11, padding_idx=30)
self._run_sharded_embedding(
spec, [6, 5, 3], 26, 11, max_norm=2.0, sharded_dim=0
)
# Test uneven split.
self._run_sharded_embedding(spec, [8, 6, 5, 4], 19, 11)
self._run_sharded_embedding(spec, [6, 7, 6], 21, 11)
self._run_sharded_embedding(spec, [4], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 21, 11, padding_idx=10)
self._run_sharded_embedding(
spec, [12, 16, 8], 27, 11, max_norm=2.0, sharded_dim=0
)
self._run_sharded_embedding(spec, [4], 14, 11, max_norm=2.5, sharded_dim=0)
if __name__ == "__main__":
run_tests()
|
the-stack_0_2890 | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
# Copyright 2013 Alexey Kardapoltsev
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from deployutils import *
import copy
import time
import re
import sys
verbose = False
remoteExec = False
remoteHost = None
log_file = "/tmp/{}-deploy.log".format(COMPANY_NAME)
log = None
def shell(args):
_call(args.cmd.split())
def copy_scripts(args):
_call(["scp", "deployutils.py", "deploy-target.py", "{}:".format(args.target)])
def publish(args):
modules = _extract_modules(args)
stage = args.env
if args.target:
stage = stages[args.target]
_log("will publish {} modules to stage {}".format(modules, stage))
if args.clean:
_clean()
for m in modules:
_publish(m, stage)
if not args.no_docs:
_publish_docs(stage)
def publish_docs(args):
stage = args.env
if args.target:
stage = stages[args.target]
_log("will publish docs to stage {}".format(stage))
if args.clean:
_clean()
_call(["sbt", "compile"])
_publish_docs(stage)
def install(args):
modules = _extract_modules(args)
_log("installing {}".format(modules))
if args.env == "prod":
if not confirm("Are u really wanna install to prod?"):
_log("Good buy!")
sys.exit(0)
if args.target:
if args.update:
_update_target(args.target, args.full_update)
_log("will install {} to {}".format(modules, args.target))
_install(args.target, modules)
else:
env = environments[args.env]
for server in env:
seeds = []
if is_seed(server):
seeds = list(groups["seed"])
t_modules = set.intersection(modules, server["modules"] + seeds)
if t_modules:
if args.update:
_update_target(server["host"], args.full_update)
_log("will install {} to {}".format(t_modules, server["host"]))
_install(server["host"], t_modules)
def chick(args):
publish(copy.deepcopy(args))
args.update = True
install(args)
def restart_cluster(args):
env = environments[args.env]
# stop non seed modules
for server in env:
_check_version(server["host"])
modules = [m for m in server["modules"] if not m in groups["seed"]]
if modules:
_log("will stop {} at {}".format(" ".join(modules), server["host"]))
_call(["ssh", "{}".format(server["host"]), "sudo ~/deploy-target.py restart -a stop -m {}".format(" ".join(modules))])
# stop seed modules
for server in env:
_check_version(server["host"])
modules = [m for m in server["modules"] if m in groups["seed"]]
if modules:
_log("will stop {} at {}".format(" ".join(modules), server["host"]))
_call(["ssh", "{}".format(server["host"]), "sudo ~/deploy-target.py restart -a stop -m {}".format(" ".join(modules))])
# start seed
for server in env:
if is_seed(server):
_log("starting seed on {}".format(server["host"]))
for s in groups["seed"]:
_call(["ssh", "{}".format(server["host"]), "sudo ~/deploy-target.py restart -a start -m {}".format(s)])
# wait for seed start up
time.sleep(3)
# start all other modules
for server in env:
modules = list(server["modules"])
_log("starting {} on {}".format(" ".join(modules), server["host"]))
_call(["ssh", "{}".format(server["host"]), "sudo ~/deploy-target.py restart -a start -m {}".format(" ".join(modules))])
def restart_module(args):
modules = _extract_modules(args)
if not modules:
_log("Please specify at least one module or group")
_check_version(args.target)
for m in modules:
_call(["ssh", "{}".format(args.target), "sudo ~/deploy-target.py restart -a {} -m {}".format(args.action, m)])
def start(args):
if args.clean:
_clean()
modules = list(_extract_modules(args))
if not len(modules) == 1:
_log("Exact one module name expected")
sys.exit(1)
_start(modules[0], args.hostType, args.hostname)
def print_log(args):
with open(log_file, 'r') as fin:
print(fin.read())
def _check_version(target):
cmd = ["ssh", target, "~/deploy-target.py version"]
std = subprocess.check_output(cmd).decode("utf-8")
t_version = int(std)
if t_version < SCRIPT_VERSION:
_log("old version of script at {}, updating...".format(target))
_call(["scp", "deployutils.py", "deploy-target.py", "{}:".format(target)])
elif t_version > SCRIPT_VERSION:
_log("target version is newer than local script")
exit(1)
def _start(module, hostType, hostname):
_log("starting module {} with hostType {} on {}".format(module, hostType, hostname))
module_name = module[8:]
_call(["sbt", "'project {}'".format(module), "'runMain some.main.Class -t {} -h {}'".format(module_name, hostType, hostname)])
def _extract_modules(args):
modules = set()
if hasattr(args, "modules"):
for m in args.modules:
modules.add(m)
if hasattr(args, "groups"):
for g in args.groups:
for m in groups[g]:
modules.add(m)
return modules
def _restart(host, modules, action):
_check_version(host)
_call(["ssh", "{}".format(host),
"sudo ~/deploy-target.py restart -a {} -m {}".format(action, " ".join(modules))])
def _install(host, modules):
if(modules):
_check_version(host)
_log("installing modules {} to {}".format(" ".join(modules), host))
_call(["ssh", "{}".format(host), "sudo ~/deploy-target.py install -m {}".format(" ".join(modules))])
def _update_target(host, is_full):
_check_version(host)
if is_full:
_call(["ssh", "{}".format(host), "sudo ~/deploy-target.py update --full"])
else:
_call(["ssh", "{}".format(host), "sudo ~/deploy-target.py update"])
def _clean():
_log("cleaning...")
_call(["sbt", "clean"])
_call(["sbt", "update"])
def _publish(module, stage):
_log("publishing module {}".format(module))
_call(["sbt", "project {}".format(module), "set debRepoStage := \"{}\"".format(stage), "publishDebs"])
_base_docs_url = "http://doc.{}/docs/{}/"
_doc_user=""
_doc_password=""
def _publish_docs(stage):
_log("publishing docs to {}".format(stage))
try:
for schema in ["v1.api.json"]:
url = _base_docs_url.format(DOMAIN, stage) + schema
latest_schema = re.sub("v[\d]+", "latest", schema)
latest_url = _base_docs_url.format(DOMAIN, stage) + latest_schema
schemaPath = "schema/schemas/generated/{}".format(schema)
_call(["curl", "--user", "{}:{}".format(_doc_user, _doc_password), "-T", schemaPath, url])
_call(["curl", "--user", "{}:{}".format(_doc_user, _doc_password), "-T", schemaPath, latest_url])
#_call(["asciidoctor", "-o", "api.html", "api.ad"])
#_call(["curl", "--user", "{}:{}".format(_doc_user, _doc_password), "-T", "api.html", _base_docs_url.format(stage)])
_call(["curl", "--user", "{}:{}".format(_doc_user, _doc_password), "-T", "api_changes.md", _base_docs_url.format(stage)])
except Exception as e:
_log("ERROR: {}".format(e))
_log("docs was not published!")
pass
def _call(cmd):
_log("will execute {}".format(cmd))
exit_code = subprocess.call(cmd, stdout=log, stderr=log)
if exit_code != 0:
raise Exception("Failed to execute cmd: {}".format(cmd))
def _log(msg):
print(msg)
if log:
m = msg
if not m.endswith("\n"):
m = m + "\n"
m = time.strftime('%X %x') + ": " + m
log.write(m)
def _sync_sources():
sync_cmd = ['rsync', '--delete', '--exclude=.**', '--exclude=target', '--exclude=logs', '--exclude=__pycache__', '-avzh', '.', "{}:{}".format(remoteHost, REPO_NAME)]
exit_code = subprocess.call(sync_cmd, stdout=log, stderr=log)
topParser = argparse.ArgumentParser()
topParser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help = "do not redirect output to /dev/null")
topParser.add_argument("-r", "--remote", dest="remote", choices=["build00"], help = "execute all commands at the remote host")
subParsers = topParser.add_subparsers(title = "Command categories")
cleanParser = argparse.ArgumentParser(add_help = False)
cleanParser.add_argument("-c", "--clean", dest="clean", action="store_true", help = "run `sbt clean` before building")
noDocsParser = argparse.ArgumentParser(add_help = False)
noDocsParser.add_argument("--no-docs", dest="no_docs", action="store_true", help = "skip docs publishing")
updateParser = argparse.ArgumentParser(add_help = False)
updateParser.add_argument("--no-update", dest="update", action="store_false", help = "do not run apt-get update before installing")
updateParser.add_argument("--full-update", dest="full_update", default=False, action="store_true", help = "run apt-get update from all sources before installing")
startParser = subParsers.add_parser("start", description = "start backend module on local machine", parents = [cleanParser, modulesParser])
startParser.add_argument("-t", "--hosttype", dest="hostType", default="local", help = "backend host type", choices=["local"])
startParser.add_argument("-d", "--domain", dest="hostname", default="localhost", help = "akka hostname conf")
startParser.set_defaults(func = start)
shellParser = subParsers.add_parser("shell", description = "run shell command")
shellParser.add_argument("cmd")
shellParser.set_defaults(func = shell)
installParser = subParsers.add_parser("install", description = "installing backend modules to host",
parents = [modulesParser, groupsParser, hostParser, updateParser])
installParser.add_argument("-r", "--restart", dest="restart", action="store_true", help = "restart service after installation")
installParser.set_defaults(func = install)
publishParser = subParsers.add_parser("publish", description = "publishing deb to nexus repo", parents = [modulesParser, hostParser, groupsParser, cleanParser, noDocsParser])
publishParser.set_defaults(func = publish)
chickParser = subParsers.add_parser("chick", description = "hubot chick dev",
parents = [modulesParser, groupsParser, hostParser, cleanParser, updateParser, noDocsParser])
chickParser.set_defaults(func = chick)
deployParser = subParsers.add_parser("deploy", description = "deploy helper scripts to target", parents = [hostParser])
deployParser.set_defaults(func = copy_scripts)
deployDocsParser = subParsers.add_parser("publishdocs", description = "publish docs and api scheme", parents = [hostParser, cleanParser])
deployDocsParser.set_defaults(func = publish_docs)
restartParser = subParsers.add_parser("restart", description = "restart backend module", parents = [hostParser, modulesParser, groupsParser, actionParser])
restartParser.set_defaults(func = restart_module)
restartClusterParser = subParsers.add_parser("restartcluster", description = "start, stop backend", parents = [hostParser])
restartClusterParser.set_defaults(func = restart_cluster)
logParser = subParsers.add_parser("log", description = "print last deploy log to stdout")
logParser.set_defaults(func = print_log)
logParser.set_defaults(verbose = True) # in non verbose mode logs will be cleaned up at the beginning
try:
import argcomplete
argcomplete.autocomplete(topParser)
except ImportError:
print("Try install python argcomplete :)")
pass
parsed = topParser.parse_args()
start = time.time()
try:
if parsed.verbose:
verbose = True
else:
open(log_file, 'w').close() #clean up log file
log = open(log_file, 'a')
verbose = False
if parsed.remote:
remoteExec = True
remoteHost = parsed.remote
_sync_sources()
cmd = []
for a in sys.argv:
if a != "-r" and a != remoteHost:
cmd.append(a)
cmd = ["'" + arg + "'" for arg in cmd]
cmd = ["cd", REPO_NAME, ";"] + cmd
c = ' '.join(cmd)
cmd = ["ssh", remoteHost, c]
_call(cmd)
else:
parsed.func(parsed)
except Exception as e:
_log("ERROR: {}".format(e))
end = time.time()
_log("total time: {:.0f} sec".format(end - start))
sys.exit(1)
end = time.time()
_log("total time: {:.0f} sec".format(end - start))
# vim: set tabstop=8 expandtab shiftwidth=4 softtabstop=4:
|
the-stack_0_2891 | import asyncio
import json
import os
import random
import unittest
from datetime import datetime, timedelta
import boto3
import pytest
import redislite
from mock import MagicMock, Mock, patch
from mockredis import mock_strict_redis_client
from moto import (
mock_config,
mock_dynamodb2,
mock_iam,
mock_s3,
mock_ses,
mock_sns,
mock_sqs,
mock_sts,
)
from tornado.concurrent import Future
# This must be set before loading ConsoleMe's configuration
os.environ["CONFIG_LOCATION"] = "example_config/example_config_test.yaml"
MOCK_ROLE = {
"arn": "arn:aws:iam::123456789012:role/FakeRole",
"name": "FakeRole",
"accountId": "123456789012",
"ttl": 1557325374,
"policy": {
"Path": "/",
"RoleId": "ABCDEFG",
"Arn": "arn:aws:iam::123456789012:role/FakeRole",
"CreateDate": "2019-01-15T22:55:53Z",
"AssumeRolePolicyDocument": {
"Version": "2008-10-17",
"Statement": [
{
"Sid": "2",
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::123456789012:role/FakeRole"},
"Action": "sts:AssumeRole",
},
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:role/ConsoleMeInstanceProfile"
},
"Action": "sts:AssumeRole",
},
],
},
"Tags": [],
"AttachedManagedPolicies": [
{
"PolicyName": "test1-Example.com",
"PolicyArn": "arn:aws:iam::123456789012:policy/testPolicy",
}
],
"InstanceProfileList": [],
"RolePolicyList": [
{
"PolicyName": "iam",
"PolicyDocument": {
"Statement": [
{
"Action": [
"iam:GetAccountAuthorizationDetails",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:ListInstanceProfiles",
"iam:ListInstanceProfilesForRole",
"iam:ListRolePolicies",
"iam:ListRoles",
"iam:ListAttachedRolePolicies",
"iam:ListRoleTags",
"s3:listallmybuckets",
"sqs:ListQueues",
"sqs:getqueueattributes",
"sns:ListTopics",
],
"Effect": "Allow",
"Resource": ["*"],
"Sid": "iam",
}
],
"Version": "2012-10-17",
},
}
],
},
"templated": "fake/file.json",
}
MOCK_REDIS_DB_PATH = "/tmp/consoleme_unit_test.rdb"
if os.path.exists(MOCK_REDIS_DB_PATH):
os.remove(MOCK_REDIS_DB_PATH)
if os.path.exists(f"{MOCK_REDIS_DB_PATH}.settings"):
os.remove(f"{MOCK_REDIS_DB_PATH}.settings")
all_roles = None
class AioTestCase(unittest.TestCase):
# noinspection PyPep8Naming
def __init__(self, methodName="runTest", loop=None):
self.loop = loop or asyncio.get_event_loop()
self._function_cache = {}
super(AioTestCase, self).__init__(methodName=methodName)
def coroutine_function_decorator(self, func):
def wrapper(*args, **kw):
return self.loop.run_until_complete(func(*args, **kw))
return wrapper
def __getattribute__(self, item):
attr = object.__getattribute__(self, item)
if asyncio.iscoroutinefunction(attr):
if item not in self._function_cache:
self._function_cache[item] = self.coroutine_function_decorator(attr)
return self._function_cache[item]
return attr
class MockBaseHandler:
async def authorization_flow(
self, user=None, console_only=True, refresh_cache=False
):
self.user = "[email protected]"
self.ip = "1.2.3.4"
self.groups = ["group1", "group2"]
self.contractor = False
self.red = mock_strict_redis_client()
class MockBaseMtlsHandler:
async def authorization_flow_user(self):
self.request_uuid = 1234
self.ip = "1.2.3.4"
self.requester = {"type": "user"}
async def authorization_flow_app(self):
self.request_uuid = 1234
self.ip = "1.2.3.4"
self.requester = {"type": "application", "name": "fakeapp"}
class MockAuth:
def __init__(
self, restricted=False, compliance_restricted=False, get_groups_val=None
):
if get_groups_val is None:
get_groups_val = []
self.restricted = restricted
self.compliance_restricted = compliance_restricted
self.get_groups_val = get_groups_val
async def get_groups(self, *kvargs):
return self.get_groups_val
class MockRedis:
def __init__(self, return_value=None):
self.return_value = return_value
def get(self, tag):
print(f"MockRedis GET called with argument {tag}")
return self.return_value
def setex(self, *args):
print(f"MockRedis SETEX called with args {args}")
def hgetall(self, *args):
print(f"MockRedis HGETALL called with args {args}")
return self.return_value
class MockRedisHandler:
def __init__(self, return_value=None):
self.return_value = return_value
async def redis(self):
redis_client = MockRedis(return_value=self.return_value)
return redis_client
mock_accountdata_redis = MagicMock(
return_value=MockRedisHandler(
return_value=json.dumps(
{"123456789012": ["awsaccount", "[email protected]"]}
)
)
)
class AWSHelper:
async def random_account_id(self):
return str(random.randrange(100000000000, 999999999999))
@pytest.fixture(scope="session")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
@pytest.fixture(autouse=True, scope="session")
def sts(aws_credentials):
"""Mocked STS Fixture."""
with mock_sts():
yield boto3.client("sts", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def iam(aws_credentials):
"""Mocked IAM Fixture."""
with mock_iam():
yield boto3.client("iam", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def aws_config(aws_credentials):
"""Mocked Config Fixture."""
with mock_config():
yield boto3.client("config", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def s3(aws_credentials):
"""Mocked S3 Fixture."""
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def ses(aws_credentials):
"""Mocked SES Fixture."""
with mock_ses():
client = boto3.client("ses", region_name="us-east-1")
client.verify_email_address(EmailAddress="[email protected]")
yield client
@pytest.fixture(autouse=True, scope="session")
def sqs(aws_credentials):
"""Mocked SQS Fixture."""
with mock_sqs():
yield boto3.client("sqs", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def sns(aws_credentials):
"""Mocked S3 Fixture."""
with mock_sns():
yield boto3.client("sns", region_name="us-east-1")
@pytest.fixture(autouse=True, scope="session")
def create_default_resources(s3, iam, redis, iam_sync_roles, iamrole_table):
from asgiref.sync import async_to_sync
from consoleme.config import config
from consoleme.lib.cache import store_json_results_in_redis_and_s3
global all_roles
buckets = [config.get("cache_roles_across_accounts.all_roles_combined.s3.bucket")]
for bucket in buckets:
s3.create_bucket(Bucket=bucket)
if all_roles:
async_to_sync(store_json_results_in_redis_and_s3)(
all_roles,
s3_bucket=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get("cache_roles_across_accounts.all_roles_combined.s3.file"),
)
return
from consoleme.celery.celery_tasks import cache_roles_for_account
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.redis import RedisHandler
red = RedisHandler().redis_sync()
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
for account_id in accounts_d.keys():
cache_roles_for_account(account_id)
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
all_roles = red.hgetall(cache_key)
async_to_sync(store_json_results_in_redis_and_s3)(
all_roles,
s3_bucket=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get("cache_roles_across_accounts.all_roles_combined.s3.file"),
)
@pytest.fixture(autouse=True, scope="session")
def dynamodb(aws_credentials):
"""Mocked DynamoDB Fixture."""
with mock_dynamodb2():
# Remove the config value for the DynamoDB Server
from consoleme.config.config import CONFIG
old_value = CONFIG.config.pop("dynamodb_server", None)
yield boto3.client("dynamodb", region_name="us-east-1")
# Reset the config value:
CONFIG.config["dynamodb_server"] = old_value
@pytest.fixture(autouse=True, scope="session")
def retry():
"""Mock the retry library so that it doesn't retry."""
class MockRetry:
def __init__(self, *args, **kwargs):
pass
def call(self, f, *args, **kwargs):
return f(*args, **kwargs)
patch_retry = patch("retrying.Retrying", MockRetry)
yield patch_retry.start()
patch_retry.stop()
@pytest.fixture(autouse=True, scope="session")
def iamrole_table(dynamodb):
# Create the table:
dynamodb.create_table(
TableName="consoleme_iamroles_global",
AttributeDefinitions=[
{"AttributeName": "arn", "AttributeType": "S"},
{"AttributeName": "accountId", "AttributeType": "S"},
],
KeySchema=[
{"AttributeName": "arn", "KeyType": "HASH"},
{"AttributeName": "accountId", "KeyType": "RANGE"},
],
ProvisionedThroughput={"ReadCapacityUnits": 1000, "WriteCapacityUnits": 1000},
)
# Apply a TTL:
dynamodb.update_time_to_live(
TableName="consoleme_iamroles_global",
TimeToLiveSpecification={"Enabled": True, "AttributeName": "ttl"},
)
yield dynamodb
@pytest.fixture(autouse=True, scope="session")
def policy_requests_table(dynamodb):
# Create the table:
dynamodb.create_table(
TableName="consoleme_policy_requests",
KeySchema=[{"AttributeName": "request_id", "KeyType": "HASH"}], # Partition key
AttributeDefinitions=[
{"AttributeName": "request_id", "AttributeType": "S"},
{"AttributeName": "arn", "AttributeType": "S"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "arn-request_id-index",
"KeySchema": [{"AttributeName": "arn", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {
"ReadCapacityUnits": 123,
"WriteCapacityUnits": 123,
},
}
],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
yield dynamodb
@pytest.fixture(autouse=True, scope="session")
def requests_table(dynamodb):
# Create the table:
dynamodb.create_table(
TableName="consoleme_requests_global",
AttributeDefinitions=[{"AttributeName": "request_id", "AttributeType": "S"}],
KeySchema=[{"AttributeName": "request_id", "KeyType": "HASH"}],
ProvisionedThroughput={"ReadCapacityUnits": 1000, "WriteCapacityUnits": 1000},
)
yield dynamodb
@pytest.fixture(autouse=True, scope="session")
def users_table(dynamodb):
# Create the table:
dynamodb.create_table(
TableName="consoleme_users_global",
AttributeDefinitions=[{"AttributeName": "username", "AttributeType": "S"}],
KeySchema=[{"AttributeName": "username", "KeyType": "HASH"}],
ProvisionedThroughput={"ReadCapacityUnits": 1000, "WriteCapacityUnits": 1000},
)
yield dynamodb
@pytest.fixture(autouse=True, scope="session")
def dummy_requests_data(requests_table):
user = {
"request_id": {"S": "abc-def-ghi"},
"aws:rep:deleting": {"BOOL": False},
"aws:rep:updateregion": {"S": "us-west-2"},
"aws:rep:updatetime": {"N": "1547848006"},
"group": {"S": "test_group"},
"justification": {"S": "some reason"},
"last_updated": {"N": "1245678901"},
"request_time": {"N": "1234567890"},
"status": {"S": "pending"},
"updated_by": {"S": "[email protected]"},
"username": {"S": "[email protected]"},
"reviewer_commnets": {"S": "All the access!"},
}
from consoleme.lib.dynamo import BaseDynamoHandler
requests_table.put_item(
TableName="consoleme_requests_global",
Item=BaseDynamoHandler()._data_to_dynamo_replace(user),
)
yield requests_table
@pytest.fixture(autouse=True, scope="session")
def dummy_users_data(users_table):
user = {
"username": {"S": "[email protected]"},
"aws:rep:deleting": {"BOOL": False},
"aws:rep:updateregion": {"S": "us-west-2"},
"last_udpated": {"N": "1547848006"},
"requests": {"L": [{"S": "abc-def-ghi"}]},
}
from consoleme.lib.dynamo import BaseDynamoHandler
users_table.put_item(
TableName="consoleme_users_global",
Item=BaseDynamoHandler()._data_to_dynamo_replace(user),
)
yield users_table
@pytest.fixture(autouse=True, scope="session")
def iam_sync_roles(iam):
statement_policy = json.dumps(
{
"Statement": [{"Effect": "Deny", "Action": "*", "Resource": "*"}],
"Version": "2012-10-17",
}
)
assume_role_policy = json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:role/ConsoleMeInstanceProfile"
},
"Action": "sts:AssumeRole",
}
],
}
)
# Create the role that CloudAux will assume:
iam.create_role(RoleName="ConsoleMe", AssumeRolePolicyDocument=assume_role_policy)
# Create a generic test instance profile
iam.create_role(
RoleName="TestInstanceProfile", AssumeRolePolicyDocument=assume_role_policy
)
# Create a managed policy:
policy_one = iam.create_policy(
PolicyName="policy-one", PolicyDocument=statement_policy
)["Policy"]["Arn"]
policy_two = iam.create_policy(
PolicyName="policy-two", PolicyDocument=statement_policy
)["Policy"]["Arn"]
# Create 50 IAM roles for syncing:
for x in range(0, 10):
iam.create_role(
RoleName=f"RoleNumber{x}", AssumeRolePolicyDocument=assume_role_policy
)
iam.put_role_policy(
RoleName=f"RoleNumber{x}",
PolicyName="SomePolicy",
PolicyDocument=statement_policy,
)
iam.tag_role(
RoleName=f"RoleNumber{x}",
Tags=[
{"Key": "Number", "Value": f"{x}"},
{"Key": "authorized_groups", "Value": f"group{x}:group{x}@example.com"},
{
"Key": "authorized_groups_cli_only",
"Value": f"group{x}-cli:group{x}[email protected]",
},
],
)
iam.attach_role_policy(RoleName=f"RoleNumber{x}", PolicyArn=policy_one)
iam.attach_role_policy(RoleName=f"RoleNumber{x}", PolicyArn=policy_two)
# Create the dynamic user role:
iam.create_role(
RoleName="awsaccount_user", AssumeRolePolicyDocument=assume_role_policy
)
iam.put_role_policy(
RoleName="awsaccount_user",
PolicyName="SomePolicy",
PolicyDocument=statement_policy,
)
iam.attach_role_policy(RoleName="awsaccount_user", PolicyArn=policy_one)
# Create another dynamic user role
iam.create_role(
RoleName="cm_someuser_N", AssumeRolePolicyDocument=assume_role_policy
)
iam.put_role_policy(
RoleName="cm_someuser_N",
PolicyName="SomePolicy",
PolicyDocument=statement_policy,
)
iam.attach_role_policy(RoleName="cm_someuser_N", PolicyArn=policy_one)
iam.create_role(RoleName="rolename", AssumeRolePolicyDocument=assume_role_policy)
iam.attach_role_policy(RoleName="rolename", PolicyArn=policy_one)
yield iam
@pytest.fixture(autouse=True, scope="session")
def www_user():
return json.loads(
"""{
"Path": "/",
"RoleName": "rolename",
"RoleId": "AROAI5FHPGAEE6FRM5Q2Y",
"Arn": "arn:aws:iam::123456789012:role/rolename",
"CreateDate": "2017-10-06T22:07:23Z",
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::123456789012:saml-provider/saml"
},
"Action": "sts:AssumeRoleWithSAML",
"Condition": {
"StringEquals": {
"SAML:aud": "https://signin.aws.amazon.com/saml"
}
}
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:role/consoleme"
},
"Action": "sts:AssumeRole"
}
]
},
"InstanceProfileList": [],
"RolePolicyList": [
{
"PolicyName": "user",
"PolicyDocument": {
"Statement": [
{
"Action": [
"ec2:Describe*",
"lambda:Describe*",
"sns:List*",
"sqs:List*"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:List*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}
}
],
"AttachedManagedPolicies": [
{
"PolicyName": "Abc",
"PolicyArn": "arn:aws:iam::123456789012:policy/Abc"
},
{
"PolicyName": "Encrypt",
"PolicyArn": "arn:aws:iam::123456789012:policy/Encrypt"
},
{
"PolicyName": "ReadOnlyAccess",
"PolicyArn": "arn:aws:iam::aws:policy/ReadOnlyAccess"
},
{
"PolicyName": "Tag",
"PolicyArn": "arn:aws:iam::123456789012:policy/Tag"
}
],
"Tags": []
}"""
)
class FakeRedis(redislite.StrictRedis):
def __init__(self, *args, **kwargs):
if kwargs.get("connection_pool"):
del kwargs["connection_pool"]
super(FakeRedis, self).__init__(
MOCK_REDIS_DB_PATH, *args, **kwargs, decode_responses=True
)
@pytest.fixture(autouse=True, scope="session")
def redis(session_mocker):
session_mocker.patch("redis.Redis", FakeRedis)
session_mocker.patch("redis.StrictRedis", FakeRedis)
session_mocker.patch("consoleme.lib.redis.redis.StrictRedis", FakeRedis)
session_mocker.patch("consoleme.lib.redis.redis.Redis", FakeRedis)
session_mocker.patch(
"consoleme.lib.redis.RedisHandler.redis_sync", return_value=FakeRedis()
)
session_mocker.patch(
"consoleme.lib.redis.RedisHandler.redis", return_value=FakeRedis()
)
return True
class MockParliament:
def __init__(self, return_value=None):
self.return_value = return_value
@property
def findings(self):
return self.return_value
class Finding:
issue = ""
detail = ""
location = {}
severity = ""
title = ""
description = ""
def __init__(
self,
issue,
detail,
location,
severity,
title,
description,
):
self.issue = issue
self.detail = detail
self.location = location
self.severity = severity
self.title = title
self.description = description
@pytest.fixture(scope="session")
def parliament(session_mocker):
session_mocker.patch(
"parliament.analyze_policy_string",
return_value=MockParliament(
return_value=[
{
"issue": "RESOURCE_MISMATCH",
"title": "No resources match for the given action",
"severity": "MEDIUM",
"description": "",
"detail": [
{"action": "s3:GetObject", "required_format": "arn:*:s3:::*/*"}
],
"location": {"line": 3, "column": 18, "filepath": "test.json"},
}
]
),
)
session_mocker.patch(
"parliament.enhance_finding",
return_value=Finding(
issue="RESOURCE_MISMATCH",
title="No resources match for the given action",
severity="MEDIUM",
description="",
detail="",
location={},
),
)
@pytest.fixture(scope="session")
def user_iam_role(iamrole_table, www_user):
from consoleme.lib.dynamo import IAMRoleDynamoHandler
ddb = IAMRoleDynamoHandler()
role_entry = {
"arn": www_user.pop("Arn"),
"name": www_user.pop("RoleName"),
"accountId": "123456789012",
"ttl": int((datetime.utcnow() + timedelta(hours=36)).timestamp()),
"policy": ddb.convert_role_to_json(www_user),
}
ddb.sync_iam_role_for_account(role_entry)
@pytest.fixture(autouse=True, scope="session")
def mock_exception_stats():
p = patch("consoleme.exceptions.exceptions.get_plugin_by_name")
yield p.start()
p.stop()
@pytest.fixture(autouse=True, scope="session")
def mock_celery_stats(mock_exception_stats):
p = patch("consoleme.celery.celery_tasks.stats")
yield p.start()
p.stop()
@pytest.fixture(scope="session")
def mock_async_http_client():
p_return_value = Mock()
p_return_value.body = "{}"
p = patch("tornado.httpclient.AsyncHTTPClient")
p.return_value.fetch.return_value = create_future(p_return_value)
yield p.start()
p.stop()
@pytest.fixture(autouse=True, scope="session")
def populate_caches(
redis,
user_iam_role,
iam_sync_roles,
dummy_users_data,
dummy_requests_data,
policy_requests_table,
iamrole_table,
create_default_resources,
s3,
sns,
sqs,
iam,
www_user,
parliament,
):
from asgiref.sync import async_to_sync
from consoleme.celery import celery_tasks as celery
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme_default_plugins.plugins.celery_tasks import (
celery_tasks as default_celery_tasks,
)
celery.cache_cloud_account_mapping()
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
default_celery_tasks.cache_application_information()
for account_id in accounts_d.keys():
celery.cache_roles_for_account(account_id)
celery.cache_s3_buckets_for_account(account_id)
celery.cache_sns_topics_for_account(account_id)
celery.cache_sqs_queues_for_account(account_id)
celery.cache_managed_policies_for_account(account_id)
# celery.cache_resources_from_aws_config_for_account(account_id) # No select_resource_config in moto yet
celery.cache_policies_table_details()
celery.cache_policy_requests()
celery.cache_credential_authorization_mapping()
class MockAioHttpResponse:
status = 200
responses = []
@classmethod
async def json(cls):
try:
return cls.responses.pop(0)
except Exception: # noqa
return []
class MockAioHttpRequest:
@classmethod
async def get(cls, *args, **kwargs):
return MockAioHttpResponse()
@classmethod
async def post(cls, *args, **kwargs):
return MockAioHttpResponse()
def create_future(ret_val=None):
future = Future()
future.set_result(ret_val)
return future
|
the-stack_0_2892 | import unittest
import cupy
from cupy import testing
class TestCArray(unittest.TestCase):
def test_size(self):
x = cupy.arange(3).astype('i')
y = cupy.ElementwiseKernel(
'raw int32 x', 'int32 y', 'y = x.size()', 'test_carray_size',
)(x, size=1)
self.assertEqual(int(y[0]), 3)
def test_shape(self):
x = cupy.arange(6).reshape((2, 3)).astype('i')
y = cupy.ElementwiseKernel(
'raw int32 x', 'int32 y', 'y = x.shape()[i]', 'test_carray_shape',
)(x, size=2)
testing.assert_array_equal(y, (2, 3))
def test_strides(self):
x = cupy.arange(6).reshape((2, 3)).astype('i')
y = cupy.ElementwiseKernel(
'raw int32 x', 'int32 y', 'y = x.strides()[i]',
'test_carray_strides',
)(x, size=2)
testing.assert_array_equal(y, (12, 4))
def test_getitem_int(self):
x = cupy.arange(24).reshape((2, 3, 4)).astype('i')
y = cupy.empty_like(x)
y = cupy.ElementwiseKernel(
'raw T x', 'int32 y', 'y = x[i]', 'test_carray_getitem_int',
)(x, y)
testing.assert_array_equal(y, x)
def test_getitem_idx(self):
x = cupy.arange(24).reshape((2, 3, 4)).astype('i')
y = cupy.empty_like(x)
y = cupy.ElementwiseKernel(
'raw T x', 'int32 y',
'ptrdiff_t idx[] = {i / 12, i / 4 % 3, i % 4}; y = x[idx]',
'test_carray_getitem_idx',
)(x, y)
testing.assert_array_equal(y, x)
|
the-stack_0_2893 | from functools import partial
from typing import (
AsyncIterator,
Callable,
Type,
)
from async_generator import asynccontextmanager
from async_service import background_asyncio_service
from p2p.abc import ConnectionAPI
from .abc import ExchangeAPI, NormalizerAPI, ValidatorAPI
from .candidate_stream import ResponseCandidateStream
from .manager import ExchangeManager
from .typing import TResult, TRequestCommand, TResponseCommand
class BaseExchange(ExchangeAPI[TRequestCommand, TResponseCommand, TResult]):
_request_command_type: Type[TRequestCommand]
_response_command_type: Type[TResponseCommand]
_manager: ExchangeManager[TRequestCommand, TResponseCommand, TResult]
def __init__(self) -> None:
self.tracker = self.tracker_class()
@asynccontextmanager
async def run_exchange(self, connection: ConnectionAPI) -> AsyncIterator[None]:
protocol = connection.get_protocol_for_command_type(self.get_request_cmd_type())
response_stream: ResponseCandidateStream[TRequestCommand, TResponseCommand] = ResponseCandidateStream( # noqa: E501
connection,
protocol,
self.get_response_cmd_type(),
)
async with background_asyncio_service(response_stream):
self._manager = ExchangeManager(
connection,
response_stream,
)
yield
async def get_result(
self,
request: TRequestCommand,
normalizer: NormalizerAPI[TResponseCommand, TResult],
result_validator: ValidatorAPI[TResult],
payload_validator: Callable[[TRequestCommand, TResponseCommand], None],
timeout: float = None) -> TResult:
"""
This is a light convenience wrapper around the ExchangeManager's get_result() method.
It makes sure that:
- the manager service is running
- the payload validator is primed with the request payload
"""
# bind the outbound request payload to the payload validator
message_validator = partial(payload_validator, request.payload)
return await self._manager.get_result(
request,
normalizer,
result_validator.validate_result,
message_validator,
self.tracker,
timeout,
)
@classmethod
def get_response_cmd_type(cls) -> Type[TResponseCommand]:
return cls._response_command_type
@classmethod
def get_request_cmd_type(cls) -> Type[TRequestCommand]:
return cls._request_command_type
@property
def is_requesting(self) -> bool:
return self._manager.is_requesting
|
the-stack_0_2894 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Probe(object):
def __init__(self, initialDelaySeconds=None, periodSeconds=None, timeoutSeconds=None, failureThreshold=None, successThreshold=None, exec=None, httpGet=None, tcpSocket=None):
"""
:param initialDelaySeconds: (Optional) 容器启动多久后触发探针。
:param periodSeconds: (Optional) 探测的时间间隔。
:param timeoutSeconds: (Optional) 探测的超时时间。
:param failureThreshold: (Optional) 在成功状态后,连续探活失败的次数,认为探活失败。
:param successThreshold: (Optional) 在失败状态后,连续探活成功的次数,认为探活成功。
:param exec: (Optional) 在容器内执行指定命令;如果命令退出时返回码为 0 则认为诊断成功。
:param httpGet: (Optional) 对指定的端口和路径上的容器的 IP 地址执行 HTTP Get 请求。如果响应的状态码大于等于 200 且小于 400,则认为诊断成功。
:param tcpSocket: (Optional) 对指定端口上的容器的 IP 地址进行 TCP 检查;如果端口打开,则认为诊断成功。
"""
self.initialDelaySeconds = initialDelaySeconds
self.periodSeconds = periodSeconds
self.timeoutSeconds = timeoutSeconds
self.failureThreshold = failureThreshold
self.successThreshold = successThreshold
self.exec = exec
self.httpGet = httpGet
self.tcpSocket = tcpSocket
|
the-stack_0_2896 | # -*- coding: utf-8 -*-
"""Analysis plugin to look up files in nsrlsvr and tag events."""
import socket
from plaso.analysis import hash_tagging
from plaso.analysis import logger
from plaso.analysis import manager
class NsrlsvrAnalyzer(hash_tagging.HashAnalyzer):
"""Analyzes file hashes by consulting an nsrlsvr instance.
Attributes:
analyses_performed (int): number of analysis batches completed by this
analyzer.
hashes_per_batch (int): maximum number of hashes to analyze at once.
seconds_spent_analyzing (int): number of seconds this analyzer has spent
performing analysis (as opposed to waiting on queues, etc.)
wait_after_analysis (int): number of seconds the analyzer will sleep for
after analyzing a batch of hashes.
"""
_RECEIVE_BUFFER_SIZE = 4096
_SOCKET_TIMEOUT = 3
SUPPORTED_HASHES = ['md5', 'sha1']
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
"""Initializes an nsrlsvr analyzer thread.
Args:
hash_queue (Queue.queue): contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): that the analyzer will append
HashAnalysis objects this queue.
"""
super(NsrlsvrAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._host = None
self._port = None
self.hashes_per_batch = 100
def _GetSocket(self):
"""Establishes a connection to an nsrlsvr instance.
Returns:
socket._socketobject: socket connected to an nsrlsvr instance or None if
a connection cannot be established.
"""
try:
return socket.create_connection(
(self._host, self._port), self._SOCKET_TIMEOUT)
except socket.error as exception:
logger.error('Unable to connect to nsrlsvr with error: {0!s}.'.format(
exception))
def _QueryHash(self, nsrl_socket, digest):
"""Queries nsrlsvr for a specific hash.
Args:
nsrl_socket (socket._socketobject): socket of connection to nsrlsvr.
digest (str): hash to look up.
Returns:
bool: True if the hash was found, False if not or None on error.
"""
try:
query = 'QUERY {0:s}\n'.format(digest).encode('ascii')
except UnicodeDecodeError:
logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest))
return False
response = None
try:
nsrl_socket.sendall(query)
response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE)
except socket.error as exception:
logger.error('Unable to query nsrlsvr with error: {0!s}.'.format(
exception))
if not response:
return False
# Strip end-of-line characters since they can differ per platform on which
# nsrlsvr is running.
response = response.strip()
# nsrlsvr returns "OK 1" if the has was found or "OK 0" if not.
return response == b'OK 1'
def Analyze(self, hashes):
"""Looks up hashes in nsrlsvr.
Args:
hashes (list[str]): hash values to look up.
Returns:
list[HashAnalysis]: analysis results, or an empty list on error.
"""
logger.debug('Opening connection to {0:s}:{1:d}'.format(
self._host, self._port))
nsrl_socket = self._GetSocket()
if not nsrl_socket:
self.SignalAbort()
return []
hash_analyses = []
for digest in hashes:
response = self._QueryHash(nsrl_socket, digest)
if response is None:
continue
hash_analysis = hash_tagging.HashAnalysis(digest, response)
hash_analyses.append(hash_analysis)
nsrl_socket.close()
logger.debug('Closed connection to {0:s}:{1:d}'.format(
self._host, self._port))
return hash_analyses
def SetHost(self, host):
"""Sets the address or hostname of the server running nsrlsvr.
Args:
host (str): IP address or hostname to query.
"""
self._host = host
def SetPort(self, port):
"""Sets the port where nsrlsvr is listening.
Args:
port (int): port to query.
"""
self._port = port
def TestConnection(self):
"""Tests the connection to nsrlsvr.
Checks if a connection can be set up and queries the server for the
MD5 of an empty file and expects a response. The value of the response
is not checked.
Returns:
bool: True if nsrlsvr instance is reachable.
"""
response = None
nsrl_socket = self._GetSocket()
if nsrl_socket:
response = self._QueryHash(
nsrl_socket, 'd41d8cd98f00b204e9800998ecf8427e')
nsrl_socket.close()
return response is not None
class NsrlsvrAnalysisPlugin(hash_tagging.HashTaggingAnalysisPlugin):
"""Analysis plugin for looking up hashes in nsrlsvr."""
# The NSRL contains files of all different types, and can handle a high load
# so look up all files.
DATA_TYPES = ['fs:stat', 'fs:stat:ntfs']
NAME = 'nsrlsvr'
def __init__(self):
"""Initializes an nsrlsvr analysis plugin."""
super(NsrlsvrAnalysisPlugin, self).__init__(NsrlsvrAnalyzer)
self._label = None
def GenerateLabels(self, hash_information):
"""Generates a list of strings that will be used in the event tag.
Args:
hash_information (bool): whether the analyzer received a response from
nsrlsvr indicating that the hash was present in its loaded NSRL set.
Returns:
list[str]: strings describing the results from nsrlsvr.
"""
if hash_information:
return [self._label]
# TODO: Renable when tagging is removed from the analysis report.
# return ['nsrl_not_present']
return []
def SetLabel(self, label):
"""Sets the tagging label.
Args:
label (str): label to apply to events extracted from files that are
present in nsrlsvr.
"""
self._label = label
def SetHost(self, host):
"""Sets the address or hostname of the server running nsrlsvr.
Args:
host (str): IP address or hostname to query.
"""
self._analyzer.SetHost(host)
def SetPort(self, port):
"""Sets the port where nsrlsvr is listening.
Args:
port (int): port to query.
"""
self._analyzer.SetPort(port)
def TestConnection(self):
"""Tests the connection to nsrlsvr.
Returns:
bool: True if nsrlsvr instance is reachable.
"""
return self._analyzer.TestConnection()
manager.AnalysisPluginManager.RegisterPlugin(NsrlsvrAnalysisPlugin)
|
the-stack_0_2897 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import datetime as dt
import itertools
import pydoc
import tenacity
import weakref
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import reflection
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import identifier
from heat.common import short_id
from heat.common import timeutils
from heat.engine import attributes
from heat.engine.cfn import template as cfn_tmpl
from heat.engine import clients
from heat.engine import environment
from heat.engine import event
from heat.engine import function
from heat.engine.hot import template as hot_tmpl
from heat.engine import node_data
from heat.engine import properties
from heat.engine import resources
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import status
from heat.engine import support
from heat.engine import sync_point
from heat.engine import template
from heat.objects import resource as resource_objects
from heat.objects import resource_data as resource_data_objects
from heat.objects import resource_properties_data as rpd_objects
from heat.rpc import client as rpc_client
cfg.CONF.import_opt('action_retry_limit', 'heat.common.config')
cfg.CONF.import_opt('observe_on_update', 'heat.common.config')
cfg.CONF.import_opt('error_wait_time', 'heat.common.config')
LOG = logging.getLogger(__name__)
datetime = dt.datetime
def _register_class(resource_type, resource_class):
resources.global_env().register_class(resource_type, resource_class)
# Attention developers about to move/delete this: STOP IT!!!
UpdateReplace = exception.UpdateReplace
# Attention developers about to move this: STOP IT!!!
class NoActionRequired(Exception):
"""Exception raised when a signal is ignored.
Resource subclasses should raise this exception from handle_signal() to
suppress recording of an event corresponding to the signal.
"""
def __init__(self, res_name='Unknown', reason=''):
msg = (_("The resource %(res)s could not perform "
"scaling action: %(reason)s") %
{'res': res_name, 'reason': reason})
super(Exception, self).__init__(six.text_type(msg))
class PollDelay(Exception):
"""Exception to delay polling of the resource.
This exception may be raised by a Resource subclass's check_*_complete()
methods to indicate that it need not be polled again immediately. If this
exception is raised, the check_*_complete() method will not be called
again until the nth time that the resource becomes eligible for polling.
A PollDelay period of 1 is equivalent to returning False.
"""
def __init__(self, period):
assert period >= 1
self.period = period
@six.python_2_unicode_compatible
class Resource(status.ResourceStatus):
BASE_ATTRIBUTES = (SHOW, ) = (attributes.SHOW_ATTR, )
LOCK_ACTIONS = (
LOCK_NONE, LOCK_ACQUIRE, LOCK_RELEASE, LOCK_RESPECT,
) = (
None, 1, -1, 0,
)
# If True, this resource must be created before it can be referenced.
strict_dependency = True
# Resource implementation set this to the subset of resource properties
# supported for handle_update, used by update_template_diff_properties
update_allowed_properties = ()
# Resource implementations set this to the name: description dictionary
# that describes the appropriate resource attributes
attributes_schema = {}
# Resource implementations set this to update policies
update_policy_schema = {}
# Default entity of resource, which is used for during resolving
# show attribute
entity = None
# Description dictionary, that describes the common attributes for all
# resources
base_attributes_schema = {
SHOW: attributes.Schema(
_("Detailed information about resource."),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.MAP
)
}
# If True, this resource may perform authenticated API requests
# throughout its lifecycle
requires_deferred_auth = False
# Limit to apply to physical_resource_name() size reduction algorithm.
# If set to None no limit will be applied.
physical_resource_name_limit = 255
support_status = support.SupportStatus()
# Default name to use for calls to self.client()
default_client_name = None
# Required service extension for this resource
required_service_extension = None
# no signal actions
no_signal_actions = (status.ResourceStatus.SUSPEND,
status.ResourceStatus.DELETE)
# Whether all other resources need a metadata_update() after
# a signal to this resource
signal_needs_metadata_updates = True
def __new__(cls, name, definition, stack):
"""Create a new Resource of the appropriate class for its type."""
assert isinstance(definition, rsrc_defn.ResourceDefinition)
if cls != Resource:
# Call is already for a subclass, so pass it through
ResourceClass = cls
else:
registry = stack.env.registry
ResourceClass = registry.get_class_to_instantiate(
definition.resource_type,
resource_name=name)
assert issubclass(ResourceClass, Resource)
return super(Resource, cls).__new__(ResourceClass)
@classmethod
def _validate_service_availability(cls, context, resource_type):
try:
(svc_available, reason) = cls.is_service_available(context)
except Exception as exc:
LOG.exception("Resource type %s unavailable",
resource_type)
ex = exception.ResourceTypeUnavailable(
resource_type=resource_type,
service_name=cls.default_client_name,
reason=six.text_type(exc))
raise ex
else:
if not svc_available:
ex = exception.ResourceTypeUnavailable(
resource_type=resource_type,
service_name=cls.default_client_name,
reason=reason)
LOG.info(six.text_type(ex))
raise ex
def __init__(self, name, definition, stack):
def _validate_name(res_name):
if '/' in res_name:
message = _('Resource name may not contain "/"')
raise exception.StackValidationFailed(message=message)
_validate_name(name)
self.stack = stack
self.context = stack.context
self.name = name
self.t = definition
self.reparse(client_resolve=False)
self.update_policy = self.t.update_policy(self.update_policy_schema,
self.context)
self._update_allowed_properties = self.calc_update_allowed(
self.properties)
self.attributes_schema.update(self.base_attributes_schema)
self.attributes = attributes.Attributes(self.name,
self.attributes_schema,
self._make_resolver(
weakref.ref(self)))
self.abandon_in_progress = False
self.resource_id = None
# if the stack is being deleted, assume we've already been deleted.
# or if the resource has not been created yet, and the stack was
# rollback, we set the resource to rollback
if stack.action == stack.DELETE or stack.action == stack.ROLLBACK:
self.action = stack.action
else:
self.action = self.INIT
self.status = self.COMPLETE
self.status_reason = ''
self.id = None
self.uuid = None
self._data = None
self._attr_data_id = None
self._rsrc_metadata = None
self._rsrc_prop_data_id = None
self._stored_properties_data = None
self.created_time = stack.created_time
self.updated_time = stack.updated_time
self._rpc_client = None
self.needed_by = []
self.requires = []
self.replaces = None
self.replaced_by = None
self.current_template_id = None
self.root_stack_id = None
self._calling_engine_id = None
self._atomic_key = None
self.converge = False
if not self.stack.in_convergence_check:
resource = stack.db_resource_get(name)
if resource:
self._load_data(resource)
else:
proxy = self.stack.defn[self.name]
node_data = proxy._resource_data
if node_data is not None:
self.action, self.status = proxy.state
self.id = node_data.primary_key
self.uuid = node_data.uuid
def rpc_client(self):
"""Return a client for making engine RPC calls."""
if not self._rpc_client:
self._rpc_client = rpc_client.EngineClient()
return self._rpc_client
def _load_data(self, resource):
"""Load the resource state from its DB representation."""
self.resource_id = resource.physical_resource_id
self.action = resource.action
self.status = resource.status
self.status_reason = resource.status_reason
self.id = resource.id
self.uuid = resource.uuid
try:
self._data = resource_data_objects.ResourceData.get_all(
self, resource.data)
except exception.NotFound:
self._data = {}
self.attributes.cached_attrs = resource.attr_data or None
self._attr_data_id = resource.attr_data_id
self._rsrc_metadata = resource.rsrc_metadata
self._stored_properties_data = resource.properties_data
self._rsrc_prop_data_id = resource.rsrc_prop_data_id
self.created_time = resource.created_at
self.updated_time = resource.updated_at
self.needed_by = resource.needed_by
self.requires = resource.requires
self.replaces = resource.replaces
self.replaced_by = resource.replaced_by
self.current_template_id = resource.current_template_id
self.root_stack_id = resource.root_stack_id
self._atomic_key = resource.atomic_key
@property
def external_id(self):
return self.t.external_id()
@classmethod
def getdoc(cls):
if cls.__doc__ is None:
return _('No description available')
return pydoc.getdoc(cls)
@property
def stack(self):
stack = self._stackref()
assert stack is not None, "Need a reference to the Stack object"
return stack
@stack.setter
def stack(self, stack):
self._stackref = weakref.ref(stack)
@classmethod
def load(cls, context, resource_id, current_traversal, is_update, data):
"""Load a specified resource from the database to check.
Returns a tuple of the Resource, the StackDefinition corresponding to
the resource's ResourceDefinition (i.e. the one the resource was last
updated to if it has already been created, or the one it will be
created with if it hasn't been already), and the Stack containing the
latest StackDefinition (i.e. the one that the latest traversal is
updating to.
The latter two must remain in-scope, because the Resource holds weak
references to them.
"""
from heat.engine import stack as stack_mod
db_res = resource_objects.Resource.get_obj(context, resource_id)
curr_stack = stack_mod.Stack.load(context, stack_id=db_res.stack_id,
cache_data=data)
initial_stk_defn = latest_stk_defn = curr_stack.defn
if (db_res.current_template_id != curr_stack.t.id and
(db_res.action != cls.INIT or
not is_update or
current_traversal != curr_stack.current_traversal)):
# load the definition associated with the resource's template
current_template_id = db_res.current_template_id
current_template = template.Template.load(context,
current_template_id)
initial_stk_defn = curr_stack.defn.clone_with_new_template(
current_template,
curr_stack.identifier())
curr_stack.defn = initial_stk_defn
res_defn = initial_stk_defn.resource_definition(db_res.name)
res_type = initial_stk_defn.env.registry.get_class_to_instantiate(
res_defn.resource_type, resource_name=db_res.name)
# If the resource type has changed and the new one is a valid
# substitution, use that as the class to instantiate.
if is_update and (latest_stk_defn is not initial_stk_defn):
try:
new_res_defn = latest_stk_defn.resource_definition(db_res.name)
except KeyError:
pass
else:
new_registry = latest_stk_defn.env.registry
new_res_type = new_registry.get_class_to_instantiate(
new_res_defn.resource_type, resource_name=db_res.name)
if res_type.check_is_substituted(new_res_type):
res_type = new_res_type
# Load only the resource in question; don't load all resources
# by invoking stack.resources. Maintain light-weight stack.
resource = res_type(db_res.name, res_defn, curr_stack)
resource._load_data(db_res)
curr_stack.defn = latest_stk_defn
return resource, initial_stk_defn, curr_stack
def make_replacement(self, new_tmpl_id):
"""Create a replacement resource in the database.
Returns the DB ID of the new resource, or None if the new resource
cannot be created (generally because the template ID does not exist).
Raises UpdateInProgress if another traversal has already locked the
current resource.
"""
# 1. create the replacement with "replaces" = self.id
# Don't set physical_resource_id so that a create is triggered.
rs = {'stack_id': self.stack.id,
'name': self.name,
'rsrc_prop_data_id': None,
'needed_by': self.needed_by,
'requires': self.requires,
'replaces': self.id,
'action': self.INIT,
'status': self.COMPLETE,
'current_template_id': new_tmpl_id,
'stack_name': self.stack.name,
'root_stack_id': self.root_stack_id}
update_data = {'status': self.COMPLETE}
# Retry in case a signal has updated the atomic_key
attempts = max(cfg.CONF.client_retry_limit, 0) + 1
def prepare_attempt(fn, attempt):
if attempt > 1:
res_obj = resource_objects.Resource.get_obj(
self.context, self.id)
if (res_obj.engine_id is not None or
res_obj.updated_at != self.updated_time):
raise exception.UpdateInProgress(resource_name=self.name)
self._atomic_key = res_obj.atomic_key
@tenacity.retry(
stop=tenacity.stop_after_attempt(attempts),
retry=tenacity.retry_if_exception_type(
exception.UpdateInProgress),
before=prepare_attempt,
wait=tenacity.wait_random(max=2),
reraise=True)
def create_replacement():
return resource_objects.Resource.replacement(self.context,
self.id,
update_data,
rs,
self._atomic_key)
new_rs = create_replacement()
if new_rs is None:
return None
self._incr_atomic_key(self._atomic_key)
self.replaced_by = new_rs.id
return new_rs.id
def reparse(self, client_resolve=True):
"""Reparse the resource properties.
Optional translate flag for property translation and
client_resolve flag for resolving properties by doing
client lookup.
"""
self.properties = self.t.properties(self.properties_schema,
self.context)
self.translate_properties(self.properties, client_resolve)
def calc_update_allowed(self, props):
update_allowed_set = set(self.update_allowed_properties)
for (psk, psv) in six.iteritems(props.props):
if psv.update_allowed():
update_allowed_set.add(psk)
return update_allowed_set
def __eq__(self, other):
"""Allow == comparison of two resources."""
# For the purposes of comparison, we declare two resource objects
# equal if their names and resolved templates are the same
if isinstance(other, Resource):
return ((self.name == other.name) and
(self.t.freeze() == other.t.freeze()))
return NotImplemented
def __ne__(self, other):
"""Allow != comparison of two resources."""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return id(self)
def metadata_get(self, refresh=False):
if refresh:
self._rsrc_metadata = None
if self.id is None or self.action == self.INIT:
return self.t.metadata()
if self._rsrc_metadata is not None:
return self._rsrc_metadata
rs = resource_objects.Resource.get_obj(self.stack.context, self.id,
refresh=True,
fields=('rsrc_metadata', ))
self._rsrc_metadata = rs.rsrc_metadata
return rs.rsrc_metadata
@resource_objects.retry_on_conflict
def metadata_set(self, metadata, merge_metadata=None):
"""Write new metadata to the database.
The caller may optionally provide a merge_metadata() function, which
takes two arguments - the metadata passed to metadata_set() and the
current metadata of the resource - and returns the merged metadata to
write. If merge_metadata is not provided, the metadata passed to
metadata_set() is written verbatim, overwriting any existing metadata.
If a race condition is detected, the write will be retried with the new
result of merge_metadata() (if it is supplied) or the verbatim data (if
it is not).
"""
if self.id is None or self.action == self.INIT:
raise exception.ResourceNotAvailable(resource_name=self.name)
refresh = merge_metadata is not None
db_res = resource_objects.Resource.get_obj(
self.stack.context, self.id, refresh=refresh,
fields=('name', 'rsrc_metadata', 'atomic_key', 'engine_id',
'action', 'status'))
if db_res.action == self.DELETE:
self._db_res_is_deleted = True
LOG.debug("resource %(name)s, id: %(id)s is DELETE_%(st)s, "
"not setting metadata",
{'name': self.name, 'id': self.id, 'st': db_res.status})
raise exception.ResourceNotAvailable(resource_name=self.name)
LOG.debug('Setting metadata for %s', six.text_type(self))
if refresh:
metadata = merge_metadata(metadata, db_res.rsrc_metadata)
if db_res.update_metadata(metadata):
self._incr_atomic_key(db_res.atomic_key)
self._rsrc_metadata = metadata
def handle_metadata_reset(self):
"""Default implementation; should be overridden by resources.
Now we override this method to reset the metadata for scale-policy
and scale-group resources, because their metadata might hang in a
wrong state ('scaling_in_progress' is always True) if engine restarts
while scaling.
"""
pass
@classmethod
def set_needed_by(cls, db_rsrc, needed_by, expected_engine_id=None):
if db_rsrc:
db_rsrc.select_and_update(
{'needed_by': needed_by},
atomic_key=db_rsrc.atomic_key,
expected_engine_id=expected_engine_id
)
@classmethod
def set_requires(cls, db_rsrc, requires):
if db_rsrc:
db_rsrc.update_and_save(
{'requires': requires}
)
def _break_if_required(self, action, hook):
"""Block the resource until the hook is cleared if there is one."""
if self.stack.env.registry.matches_hook(self.name, hook):
self.trigger_hook(hook)
self._add_event(self.action, self.status,
_("%(a)s paused until Hook %(h)s is cleared")
% {'a': action, 'h': hook})
LOG.info('Reached hook on %s', self)
while self.has_hook(hook):
try:
yield
except BaseException as exc:
self.clear_hook(hook)
self._add_event(
self.action, self.status,
"Failure occurred while waiting.")
if (isinstance(exc, AssertionError) or
not isinstance(exc, Exception)):
raise
def has_nested(self):
"""Return True if the resource has an existing nested stack.
For most resource types, this will always return False. StackResource
subclasses return True when appropriate. Resource subclasses that may
return True must also provide a nested_identifier() method to return
the identifier of the nested stack, and a nested() method to return a
Stack object for the nested stack.
"""
return False
def get_nested_parameters_stack(self):
"""Return the nested stack for schema validation.
Regular resources don't have such a thing.
"""
return
def has_hook(self, hook):
# Clear the cache to make sure the data is up to date:
self._data = None
return self.data().get(hook) == "True"
def trigger_hook(self, hook):
self.data_set(hook, "True")
def clear_hook(self, hook):
self.data_delete(hook)
def type(self):
return self.t.resource_type
def has_interface(self, resource_type):
"""Check if resource is mapped to resource_type or is "resource_type".
Check to see if this resource is either mapped to resource_type
or is a "resource_type".
"""
if self.type() == resource_type:
return True
try:
ri = self.stack.env.get_resource_info(self.type(),
self.name)
except exception.EntityNotFound:
return False
else:
return ri.name == resource_type
def identifier(self):
"""Return an identifier for this resource."""
return identifier.ResourceIdentifier(resource_name=self.name,
**self.stack.identifier())
def frozen_definition(self):
"""Return a frozen ResourceDefinition with stored property values.
The returned definition will contain the property values read from the
database, and will have all intrinsic functions resolved (note that
this makes it useless for calculating dependencies).
"""
if self._stored_properties_data is not None:
args = {'properties': self._stored_properties_data}
else:
args = {}
return self.t.freeze(**args)
@contextlib.contextmanager
def frozen_properties(self):
"""Context manager to use the frozen property values from the database.
The live property values are always substituted back when the context
ends.
"""
live_props = self.properties
props = self.frozen_definition().properties(self.properties_schema,
self.context)
try:
self.properties = props
yield props
finally:
self.properties = live_props
def update_template_diff(self, after, before):
"""Returns the difference between the before and after json snippets.
If something has been removed in after which exists in before we set it
to None.
"""
return after - before
def update_template_diff_properties(self, after_props, before_props):
"""Return changed Properties between the before and after properties.
If any property having immutable as True is updated, raises
NotSupported error.
If any properties have changed which are not in
update_allowed_properties, raises UpdateReplace.
"""
update_allowed_set = self.calc_update_allowed(after_props)
immutable_set = set()
for (psk, psv) in six.iteritems(after_props.props):
if psv.immutable():
immutable_set.add(psk)
def prop_changed(key):
try:
before = before_props.get(key)
except (TypeError, ValueError) as exc:
# We shouldn't get here usually, but there is a known issue
# with template resources and new parameters in non-convergence
# stacks (see bug 1543685). The error should be harmless
# because we're on the before properties, which have presumably
# already been validated.
LOG.warning('Ignoring error in old property value '
'%(prop_name)s: %(msg)s',
{'prop_name': key, 'msg': six.text_type(exc)})
return True
return before != after_props.get(key)
# Create a set of keys which differ (or are missing/added)
changed_properties_set = set(k for k in after_props if prop_changed(k))
# Create a list of updated properties offending property immutability
update_replace_forbidden = [k for k in changed_properties_set
if k in immutable_set]
if update_replace_forbidden:
msg = _("Update to properties %(props)s of %(name)s (%(res)s)"
) % {'props': ", ".join(sorted(update_replace_forbidden)),
'res': self.type(), 'name': self.name}
raise exception.NotSupported(feature=msg)
if changed_properties_set and self.needs_replace_with_prop_diff(
changed_properties_set,
after_props,
before_props):
raise UpdateReplace(self)
if not changed_properties_set.issubset(update_allowed_set):
raise UpdateReplace(self.name)
return dict((k, after_props.get(k)) for k in changed_properties_set)
def __str__(self):
class_name = reflection.get_class_name(self, fully_qualified=False)
if self.stack.id is not None:
if self.resource_id is not None:
text = '%s "%s" [%s] %s' % (class_name, self.name,
self.resource_id,
six.text_type(self.stack))
else:
text = '%s "%s" %s' % (class_name, self.name,
six.text_type(self.stack))
else:
text = '%s "%s"' % (class_name, self.name)
return six.text_type(text)
def add_explicit_dependencies(self, deps):
"""Add all dependencies explicitly specified in the template.
The deps parameter is a Dependencies object to which dependency pairs
are added.
"""
for dep in self.t.dependencies(self.stack):
deps += (self, dep)
deps += (self, None)
def add_dependencies(self, deps):
"""Add implicit dependencies specific to the resource type.
Some resource types may have implicit dependencies on other resources
in the same stack that are not linked by a property value (that would
be set using get_resource or get_attr for example, thus creating an
explicit dependency). Such dependencies are opaque to the user and
should be avoided wherever possible, however in some circumstances they
are required due to magic in the underlying API.
The deps parameter is a Dependencies object to which dependency pairs
may be added.
"""
return
def required_by(self):
"""List of resources that require this one as a dependency.
Returns a list of names of resources that depend on this resource
directly.
"""
try:
reqd_by = self.stack.dependencies.required_by(self)
except KeyError:
if self.stack.convergence:
# for convergence, fall back to building from needed_by
needed_by_ids = self.needed_by or set()
reqd_by = [r for r in self.stack.resources.values()
if r.id in needed_by_ids]
else:
LOG.error('Getting required_by list for Resource not in '
'dependency graph.')
return []
return [r.name for r in reqd_by]
def client(self, name=None, version=None):
client_name = name or self.default_client_name
assert client_name, "Must specify client name"
return self.stack.clients.client(client_name, version)
def client_plugin(self, name=None):
client_name = name or self.default_client_name
assert client_name, "Must specify client name"
return self.stack.clients.client_plugin(client_name)
@classmethod
def is_service_available(cls, context):
# NOTE(kanagaraj-manickam): return True to satisfy the cases like
# resource does not have endpoint, such as RandomString, OS::Heat
# resources as they are implemented within the engine.
if cls.default_client_name is None:
return (True, None)
client_plugin = clients.Clients(context).client_plugin(
cls.default_client_name)
if not client_plugin:
raise exception.ClientNotAvailable(
client_name=cls.default_client_name)
service_types = client_plugin.service_types
if not service_types:
return (True, None)
# NOTE(kanagaraj-manickam): if one of the service_type does
# exist in the keystone, then considered it as available.
for service_type in service_types:
endpoint_exists = client_plugin.does_endpoint_exist(
service_type=service_type,
service_name=cls.default_client_name)
if endpoint_exists:
req_extension = cls.required_service_extension
is_ext_available = (
not req_extension or client_plugin.has_extension(
req_extension))
if is_ext_available:
return (True, None)
else:
reason = _('Required extension {0} in {1} service '
'is not available.')
reason = reason.format(req_extension,
cls.default_client_name)
else:
reason = _('{0} {1} endpoint is not in service catalog.')
reason = reason.format(cls.default_client_name, service_type)
return (False, reason)
def keystone(self):
return self.client('keystone')
def nova(self):
return self.client('nova')
def swift(self):
return self.client('swift')
def neutron(self):
return self.client('neutron')
def cinder(self):
return self.client('cinder')
def trove(self):
return self.client('trove')
def ceilometer(self):
return self.client('ceilometer')
def heat(self):
return self.client('heat')
def glance(self):
return self.client('glance')
def _incr_atomic_key(self, last_key):
if last_key is None:
self._atomic_key = 1
else:
self._atomic_key = last_key + 1
def _should_lock_on_action(self, action):
"""Return whether we should take a resource-level lock for an action.
In the legacy path, we always took a lock at the Stack level and never
at the Resource level. In convergence, we lock at the Resource level
for most operations. However, there are currently some exceptions:
the SUSPEND, RESUME, SNAPSHOT, and CHECK actions, and stack abandon.
"""
return (self.stack.convergence and
not self.abandon_in_progress and
action in {self.ADOPT,
self.CREATE,
self.UPDATE,
self.ROLLBACK,
self.DELETE})
@contextlib.contextmanager
def _action_recorder(self, action, expected_exceptions=tuple()):
"""Return a context manager to record the progress of an action.
Upon entering the context manager, the state is set to IN_PROGRESS.
Upon exiting, the state will be set to COMPLETE if no exception was
raised, or FAILED otherwise. Non-exit exceptions will be translated
to ResourceFailure exceptions.
Expected exceptions are re-raised, with the Resource moved to the
COMPLETE state.
"""
attempts = 1
first_iter = [True] # work around no nonlocal in py27
if self.stack.convergence:
if self._should_lock_on_action(action):
lock_acquire = self.LOCK_ACQUIRE
lock_release = self.LOCK_RELEASE
else:
lock_acquire = lock_release = self.LOCK_RESPECT
if action != self.CREATE:
attempts += max(cfg.CONF.client_retry_limit, 0)
else:
lock_acquire = lock_release = self.LOCK_NONE
# retry for convergence DELETE or UPDATE if we get the usual
# lock-acquire exception of exception.UpdateInProgress
@tenacity.retry(
stop=tenacity.stop_after_attempt(attempts),
retry=tenacity.retry_if_exception_type(
exception.UpdateInProgress),
wait=tenacity.wait_random(max=2),
reraise=True)
def set_in_progress():
if not first_iter[0]:
res_obj = resource_objects.Resource.get_obj(
self.context, self.id)
self._atomic_key = res_obj.atomic_key
else:
first_iter[0] = False
self.state_set(action, self.IN_PROGRESS, lock=lock_acquire)
try:
set_in_progress()
yield
except exception.UpdateInProgress as ex:
with excutils.save_and_reraise_exception():
LOG.info('Update in progress for %s', self.name)
except expected_exceptions as ex:
with excutils.save_and_reraise_exception():
self.state_set(action, self.COMPLETE, six.text_type(ex),
lock=lock_release)
LOG.debug('%s', six.text_type(ex))
except Exception as ex:
LOG.info('%(action)s: %(info)s',
{"action": action,
"info": six.text_type(self)},
exc_info=True)
failure = exception.ResourceFailure(ex, self, action)
self.state_set(action, self.FAILED, six.text_type(failure),
lock=lock_release)
raise failure
except BaseException as exc:
with excutils.save_and_reraise_exception():
try:
reason = six.text_type(exc)
msg = '%s aborted' % action
if reason:
msg += ' (%s)' % reason
self.state_set(action, self.FAILED, msg,
lock=lock_release)
except Exception:
LOG.exception('Error marking resource as failed')
else:
self.state_set(action, self.COMPLETE, lock=lock_release)
def action_handler_task(self, action, args=None, action_prefix=None):
"""A task to call the Resource subclass's handler methods for action.
Calls the handle_<ACTION>() method for the given action and then calls
the check_<ACTION>_complete() method with the result in a loop until it
returns True. If the methods are not provided, the call is omitted.
Any args provided are passed to the handler.
If a prefix is supplied, the handler method handle_<PREFIX>_<ACTION>()
is called instead.
"""
args = args or []
handler_action = action.lower()
check = getattr(self, 'check_%s_complete' % handler_action, None)
if action_prefix:
handler_action = '%s_%s' % (action_prefix.lower(), handler_action)
handler = getattr(self, 'handle_%s' % handler_action, None)
if callable(handler):
handler_data = handler(*args)
yield
if callable(check):
try:
while True:
try:
done = check(handler_data)
except PollDelay as delay:
yield delay.period
else:
if done:
break
else:
yield
except Exception:
raise
except: # noqa
with excutils.save_and_reraise_exception():
canceller = getattr(
self,
'handle_%s_cancel' % handler_action,
None
)
if callable(canceller):
try:
canceller(handler_data)
except Exception:
LOG.exception(
'Error cancelling resource %s',
action
)
@scheduler.wrappertask
def _do_action(self, action, pre_func=None, resource_data=None):
"""Perform a transition to a new state via a specified action.
Action should be e.g self.CREATE, self.UPDATE etc, we set
status based on this, the transition is handled by calling the
corresponding handle_* and check_*_complete functions
Note pre_func is an optional function reference which will
be called before the handle_<action> function
If the resource does not declare a check_$action_complete function,
we declare COMPLETE status as soon as the handle_$action call has
finished, and if no handle_$action function is declared, then we do
nothing, useful e.g if the resource requires no action for a given
state transition
"""
assert action in self.ACTIONS, 'Invalid action %s' % action
with self._action_recorder(action):
if callable(pre_func):
pre_func()
handler_args = [resource_data] if resource_data is not None else []
yield self.action_handler_task(action, args=handler_args)
def _update_stored_properties(self):
old_props = self._stored_properties_data
self._stored_properties_data = function.resolve(self.properties.data)
if self._stored_properties_data != old_props:
self._rsrc_prop_data_id = None
self.attributes.reset_resolved_values()
def referenced_attrs(self, stk_defn=None,
in_resources=True, in_outputs=True,
load_all=False):
"""Return the set of all attributes referenced in the template.
This enables the resource to calculate which of its attributes will
be used. By default, attributes referenced in either other resources
or outputs will be included. Either can be excluded by setting the
`in_resources` or `in_outputs` parameters to False. To limit to a
subset of outputs, pass an iterable of the output names to examine
for the `in_outputs` parameter.
The set of referenced attributes is calculated from the
StackDefinition object provided, or from the stack's current
definition if none is passed.
"""
if stk_defn is None:
stk_defn = self.stack.defn
def get_dep_attrs(source):
return set(itertools.chain.from_iterable(s.dep_attrs(self.name,
load_all)
for s in source))
refd_attrs = set()
if in_resources:
enabled_resources = stk_defn.enabled_rsrc_names()
refd_attrs |= get_dep_attrs(stk_defn.resource_definition(r_name)
for r_name in enabled_resources)
subset_outputs = isinstance(in_outputs, collections.Iterable)
if subset_outputs or in_outputs:
if not subset_outputs:
in_outputs = stk_defn.enabled_output_names()
refd_attrs |= get_dep_attrs(stk_defn.output_definition(op_name)
for op_name in in_outputs)
if attributes.ALL_ATTRIBUTES in refd_attrs:
refd_attrs.remove(attributes.ALL_ATTRIBUTES)
refd_attrs |= (set(self.attributes) - {self.SHOW})
return refd_attrs
def node_data(self, stk_defn=None, for_resources=True, for_outputs=False):
"""Return a NodeData object representing the resource.
The NodeData object returned contains basic data about the resource,
including its name, ID and state, as well as its reference ID and any
attribute values that are used.
By default, those attribute values that are referenced by other
resources are included. These can be ignored by setting the
for_resources parameter to False. If the for_outputs parameter is
True, those attribute values that are referenced by stack outputs are
included. If the for_outputs parameter is an iterable of output names,
only those attribute values referenced by the specified stack outputs
are included.
The set of referenced attributes is calculated from the
StackDefinition object provided, or from the stack's current
definition if none is passed.
After calling this method, the resource's attribute cache is
populated with any cacheable attribute values referenced by stack
outputs, even if they are not also referenced by other resources.
"""
def get_attrs(attrs, cacheable_only=False):
for attr in attrs:
path = (attr,) if isinstance(attr, six.string_types) else attr
if (cacheable_only and
(self.attributes.get_cache_mode(path[0]) ==
attributes.Schema.CACHE_NONE)):
continue
if self.action == self.INIT:
if (path[0] in self.attributes or
(type(self).get_attribute != Resource.get_attribute or
type(self).FnGetAtt != Resource.FnGetAtt)):
# TODO(ricolin) make better placeholder values here
yield attr, None
else:
try:
yield attr, self.FnGetAtt(*path)
except exception.InvalidTemplateAttribute as ita:
# Attribute doesn't exist, so don't store it. Whatever
# tries to access it will get another
# InvalidTemplateAttribute exception at that point
LOG.info('%s', ita)
except Exception as exc:
# Store the exception that occurred. It will be
# re-raised when something tries to access it, or when
# we try to serialise the NodeData.
yield attr, exc
load_all = not self.stack.in_convergence_check
dep_attrs = self.referenced_attrs(stk_defn,
in_resources=for_resources,
in_outputs=for_outputs,
load_all=load_all)
# Ensure all attributes referenced in outputs get cached
if for_outputs is False and self.stack.convergence:
out_attrs = self.referenced_attrs(stk_defn, in_resources=False,
load_all=load_all)
for e in get_attrs(out_attrs - dep_attrs, cacheable_only=True):
pass
# Calculate attribute values *before* reference ID, to potentially
# save an extra RPC call in TemplateResource
attribute_values = dict(get_attrs(dep_attrs))
return node_data.NodeData(self.id, self.name, self.uuid,
self.FnGetRefId(), attribute_values,
self.action, self.status)
def preview(self):
"""Default implementation of Resource.preview.
This method should be overridden by child classes for specific
behavior.
"""
return self
def create_convergence(self, template_id, resource_data, engine_id,
timeout, progress_callback=None):
"""Creates the resource by invoking the scheduler TaskRunner."""
self._calling_engine_id = engine_id
self.requires = list(
set(data.primary_key for data in resource_data.values()
if data is not None)
)
self.current_template_id = template_id
if self.stack.adopt_stack_data is None:
runner = scheduler.TaskRunner(self.create)
else:
adopt_data = self.stack._adopt_kwargs(self)
runner = scheduler.TaskRunner(self.adopt, **adopt_data)
runner(timeout=timeout, progress_callback=progress_callback)
def validate_external(self):
if self.external_id is not None:
try:
self.resource_id = self.external_id
self._show_resource()
except Exception as ex:
if self.client_plugin().is_not_found(ex):
error_message = (_("Invalid external resource: Resource "
"%(external_id)s (%(type)s) can not "
"be found.") %
{'external_id': self.external_id,
'type': self.type()})
raise exception.StackValidationFailed(
message="%s" % error_message)
raise
@scheduler.wrappertask
def create(self):
"""Create the resource.
Subclasses should provide a handle_create() method to customise
creation.
"""
action = self.CREATE
if (self.action, self.status) != (self.INIT, self.COMPLETE):
exc = exception.Error(_('State %s invalid for create')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
if self.external_id is not None:
yield self._do_action(self.ADOPT,
resource_data={
'resource_id': self.external_id})
self.check()
return
# This method can be called when we replace a resource, too. In that
# case, a hook has already been dealt with in `Resource.update` so we
# shouldn't do it here again:
if self.stack.action == self.stack.CREATE:
yield self._break_if_required(
self.CREATE, environment.HOOK_PRE_CREATE)
LOG.info('creating %s', self)
# Re-resolve the template, since if the resource Ref's
# the StackId pseudo parameter, it will change after
# the parser.Stack is stored (which is after the resources
# are __init__'d, but before they are create()'d). We also
# do client lookups for RESOLVE translation rules here.
self.reparse()
self._update_stored_properties()
count = {self.CREATE: 0, self.DELETE: 0}
retry_limit = max(cfg.CONF.action_retry_limit, 0)
first_failure = None
while (count[self.CREATE] <= retry_limit and
count[self.DELETE] <= retry_limit):
pre_func = None
if count[action] > 0:
delay = timeutils.retry_backoff_delay(count[action],
jitter_max=2.0)
waiter = scheduler.TaskRunner(self.pause)
yield waiter.as_task(timeout=delay)
elif action == self.CREATE:
# Only validate properties in first create call.
pre_func = self.properties.validate
try:
yield self._do_action(action, pre_func)
if action == self.CREATE:
first_failure = None
break
else:
action = self.CREATE
except exception.ResourceFailure as failure:
if isinstance(failure.exc, exception.StackValidationFailed):
path = [self.t.name]
path.extend(failure.exc.path)
raise exception.ResourceFailure(
exception_or_error=exception.StackValidationFailed(
error=failure.exc.error,
path=path,
message=failure.exc.error_message
),
resource=failure.resource,
action=failure.action
)
if not isinstance(failure.exc, exception.ResourceInError):
raise failure
count[action] += 1
if action == self.CREATE:
action = self.DELETE
count[action] = 0
if first_failure is None:
# Save the first exception
first_failure = failure
if first_failure:
raise first_failure
if self.stack.action == self.stack.CREATE:
yield self._break_if_required(
self.CREATE, environment.HOOK_POST_CREATE)
@staticmethod
def pause():
try:
while True:
yield
except scheduler.Timeout:
return
def prepare_abandon(self):
self.abandon_in_progress = True
return {
'name': self.name,
'resource_id': self.resource_id,
'type': self.type(),
'action': self.action,
'status': self.status,
'metadata': self.metadata_get(),
'resource_data': self.data()
}
def adopt(self, resource_data):
"""Adopt the existing resource.
Resource subclasses can provide a handle_adopt() method to customise
adopt.
"""
self._update_stored_properties()
return self._do_action(self.ADOPT, resource_data=resource_data)
def handle_adopt(self, resource_data=None):
resource_id, data, metadata = self._get_resource_info(resource_data)
if not resource_id:
exc = Exception(_('Resource ID was not provided.'))
failure = exception.ResourceFailure(exc, self)
raise failure
# set resource id
self.resource_id_set(resource_id)
# save the resource data
if data and isinstance(data, dict):
for key, value in six.iteritems(data):
self.data_set(key, value)
# save the resource metadata
self.metadata_set(metadata)
def translation_rules(self, properties):
"""Return specified rules for resource."""
return []
def translate_properties(self, properties,
client_resolve=True):
"""Set resource specific rules for properties translation.
The properties parameter is a properties object and the
optional client_resolve flag is to specify whether to
do 'RESOLVE' translation with client lookup.
"""
rules = self.translation_rules(properties) or []
properties.update_translation(rules, client_resolve=client_resolve)
def cancel_grace_period(self):
canceller = getattr(self,
'handle_%s_cancel' % self.action.lower(),
None)
if callable(canceller):
return None
return cfg.CONF.error_wait_time
def _get_resource_info(self, resource_data):
if not resource_data:
return None, None, None
return (resource_data.get('resource_id'),
resource_data.get('resource_data'),
resource_data.get('metadata'))
def needs_replace(self, after_props):
"""Mandatory replace based on certain properties."""
return False
def needs_replace_with_prop_diff(self, changed_properties_set,
after_props, before_props):
"""Needs replace based on prop_diff."""
return False
def needs_replace_with_tmpl_diff(self, tmpl_diff):
"""Needs replace based on tmpl_diff."""
return False
def needs_replace_failed(self):
"""Needs replace if resource is in *_FAILED."""
return True
def _needs_update(self, after, before, after_props, before_props,
prev_resource, check_init_complete=True):
if self.status == self.FAILED:
# always replace when a resource is in CHECK_FAILED
if self.action == self.CHECK or self.needs_replace_failed():
raise UpdateReplace(self)
if self.state == (self.DELETE, self.COMPLETE):
raise UpdateReplace(self)
if (check_init_complete and
self.state == (self.INIT, self.COMPLETE)):
raise UpdateReplace(self)
if self.needs_replace(after_props):
raise UpdateReplace(self)
if before != after.freeze():
return True
try:
return before_props != after_props
except ValueError:
return True
def _check_for_convergence_replace(self, restricted_actions):
if 'replace' in restricted_actions:
ex = exception.ResourceActionRestricted(action='replace')
failure = exception.ResourceFailure(ex, self, self.UPDATE)
self._add_event(self.UPDATE, self.FAILED, six.text_type(ex))
raise failure
else:
raise UpdateReplace(self.name)
def update_convergence(self, template_id, resource_data, engine_id,
timeout, new_stack, progress_callback=None):
"""Update the resource synchronously.
Persist the resource's current_template_id to template_id and
resource's requires to list of the required resource ids from the given
resource_data and existing resource's requires, then updates the
resource by invoking the scheduler TaskRunner.
"""
def update_templ_id_and_requires(persist=True):
self.current_template_id = template_id
self.requires = list(
set(data.primary_key for data in resource_data.values()
if data is not None)
)
if not persist:
return
self.store(lock=self.LOCK_RESPECT)
self._calling_engine_id = engine_id
# Check that the resource type matches. If the type has changed by a
# legitimate substitution, the load()ed resource will already be of
# the new type.
registry = new_stack.env.registry
new_res_def = new_stack.defn.resource_definition(self.name)
new_res_type = registry.get_class_to_instantiate(
new_res_def.resource_type, resource_name=self.name)
if type(self) is not new_res_type:
restrictions = registry.get_rsrc_restricted_actions(self.name)
self._check_for_convergence_replace(restrictions)
action_rollback = self.stack.action == self.stack.ROLLBACK
status_in_progress = self.stack.status == self.stack.IN_PROGRESS
if action_rollback and status_in_progress and self.replaced_by:
try:
self.restore_prev_rsrc(convergence=True)
except Exception as e:
failure = exception.ResourceFailure(e, self, self.action)
self.state_set(self.UPDATE, self.FAILED,
six.text_type(failure))
raise failure
runner = scheduler.TaskRunner(
self.update, new_res_def,
update_templ_func=update_templ_id_and_requires)
try:
runner(timeout=timeout, progress_callback=progress_callback)
except UpdateReplace:
raise
except exception.UpdateInProgress:
raise
except BaseException:
with excutils.save_and_reraise_exception():
update_templ_id_and_requires(persist=True)
def preview_update(self, after, before, after_props, before_props,
prev_resource, check_init_complete=False):
"""Simulates update without actually updating the resource.
Raises UpdateReplace, if replacement is required or returns True,
if in-place update is required.
"""
if self._needs_update(after, before, after_props, before_props,
prev_resource, check_init_complete):
tmpl_diff = self.update_template_diff(after.freeze(), before)
if tmpl_diff and self.needs_replace_with_tmpl_diff(tmpl_diff):
raise UpdateReplace(self)
self.update_template_diff_properties(after_props, before_props)
return True
else:
return False
def _check_restricted_actions(self, actions, after, before,
after_props, before_props,
prev_resource):
"""Checks for restricted actions.
Raises ResourceActionRestricted, if the resource requires update
or replace and the required action is restricted.
Else, Raises UpdateReplace, if replacement is required or returns
True, if in-place update is required.
"""
try:
if self.preview_update(after, before, after_props, before_props,
prev_resource, check_init_complete=True):
if 'update' in actions:
raise exception.ResourceActionRestricted(action='update')
return True
except UpdateReplace:
if 'replace' in actions:
raise exception.ResourceActionRestricted(action='replace')
raise
return False
def _prepare_update_props(self, after, before):
before_props = before.properties(self.properties_schema,
self.context)
# Regenerate the schema, else validation would fail
self.regenerate_info_schema(after)
after.set_translation_rules(self.translation_rules(self.properties))
after_props = after.properties(self.properties_schema,
self.context)
self.translate_properties(after_props)
self.translate_properties(before_props)
if (cfg.CONF.observe_on_update or self.converge) and before_props:
if not self.resource_id:
raise UpdateReplace(self)
try:
resource_reality = self.get_live_state(before_props)
if resource_reality:
self._update_properties_with_live_state(before_props,
resource_reality)
except exception.EntityNotFound:
raise UpdateReplace(self)
except Exception as ex:
LOG.warning("Resource cannot be updated with it's "
"live state in case of next "
"error: %s", ex)
return after_props, before_props
def _prepare_update_replace_handler(self, action):
"""Return the handler method for preparing to replace a resource.
This may be either restore_prev_rsrc() (in the case of a legacy
rollback) or, more typically, prepare_for_replace().
If the plugin has not overridden the method, then None is returned in
place of the default method (which is empty anyway).
"""
if (self.stack.action == 'ROLLBACK' and
self.stack.status == 'IN_PROGRESS' and
not self.stack.convergence):
# handle case, when it's rollback and we should restore
# old resource
if self.restore_prev_rsrc != Resource.restore_prev_rsrc:
return self.restore_prev_rsrc
else:
if self.prepare_for_replace != Resource.prepare_for_replace:
return self.prepare_for_replace
return None
def _prepare_update_replace(self, action):
handler = self._prepare_update_replace_handler(action)
if handler is None:
return
try:
handler()
except Exception as e:
# if any exception happen, we should set the resource to
# FAILED, then raise ResourceFailure
failure = exception.ResourceFailure(e, self, action)
self.state_set(action, self.FAILED, six.text_type(failure))
raise failure
@classmethod
def check_is_substituted(cls, new_res_type):
support_status = getattr(cls, 'support_status', None)
if support_status:
is_substituted = support_status.is_substituted(new_res_type)
return is_substituted
return False
@scheduler.wrappertask
def update(self, after, before=None, prev_resource=None,
update_templ_func=None):
"""Return a task to update the resource.
Subclasses should provide a handle_update() method to customise update,
the base-class handle_update will fail by default.
"""
action = self.UPDATE
assert isinstance(after, rsrc_defn.ResourceDefinition)
if before is None:
before = self.frozen_definition()
after_external_id = after.external_id()
if self.external_id != after_external_id:
msg = _("Update to property %(prop)s of %(name)s (%(res)s)"
) % {'prop': hot_tmpl.HOTemplate20161014.RES_EXTERNAL_ID,
'res': self.type(), 'name': self.name}
exc = exception.NotSupported(feature=msg)
raise exception.ResourceFailure(exc, self, action)
elif after_external_id is not None:
LOG.debug("Skip update on external resource.")
return
after_props, before_props = self._prepare_update_props(after, before)
yield self._break_if_required(
self.UPDATE, environment.HOOK_PRE_UPDATE)
try:
registry = self.stack.env.registry
restr_actions = registry.get_rsrc_restricted_actions(self.name)
if restr_actions:
needs_update = self._check_restricted_actions(restr_actions,
after, before,
after_props,
before_props,
prev_resource)
else:
needs_update = self._needs_update(after, before,
after_props, before_props,
prev_resource)
except UpdateReplace:
with excutils.save_and_reraise_exception():
if self._prepare_update_replace_handler(action) is not None:
with self.lock(self._calling_engine_id):
self._prepare_update_replace(action)
except exception.ResourceActionRestricted as ae:
failure = exception.ResourceFailure(ae, self, action)
self._add_event(action, self.FAILED, six.text_type(ae))
raise failure
if not needs_update:
if update_templ_func is not None:
update_templ_func(persist=True)
if self.status == self.FAILED:
status_reason = _('Update status to COMPLETE for '
'FAILED resource neither update '
'nor replace.')
lock = (self.LOCK_RESPECT if self.stack.convergence
else self.LOCK_NONE)
self.state_set(self.action, self.COMPLETE,
status_reason, lock=lock)
return
if not self.stack.convergence:
if (self.action, self.status) in (
(self.CREATE, self.IN_PROGRESS),
(self.UPDATE, self.IN_PROGRESS),
(self.ADOPT, self.IN_PROGRESS)):
exc = Exception(_('Resource update already requested'))
raise exception.ResourceFailure(exc, self, action)
LOG.info('updating %s', self)
self.updated_time = datetime.utcnow()
with self._action_recorder(action, UpdateReplace):
after_props.validate()
self.properties = before_props
tmpl_diff = self.update_template_diff(after.freeze(), before)
try:
if tmpl_diff and self.needs_replace_with_tmpl_diff(tmpl_diff):
raise UpdateReplace(self)
prop_diff = self.update_template_diff_properties(after_props,
before_props)
yield self.action_handler_task(action,
args=[after, tmpl_diff,
prop_diff])
except UpdateReplace:
with excutils.save_and_reraise_exception():
self._prepare_update_replace(action)
self.t = after
self.reparse()
self._update_stored_properties()
if update_templ_func is not None:
# template/requires will be persisted by _action_recorder()
update_templ_func(persist=False)
yield self._break_if_required(
self.UPDATE, environment.HOOK_POST_UPDATE)
def prepare_for_replace(self):
"""Prepare resource for replacing.
Some resources requires additional actions before replace them.
If resource need to be changed before replacing, this method should
be implemented in resource class.
"""
pass
def restore_prev_rsrc(self, convergence=False):
"""Restore resource after rollback.
Some resources requires additional actions after rollback.
If resource need to be changed during rollback, this method should
be implemented in resource class.
"""
pass
def check(self):
"""Checks that the physical resource is in its expected state.
Gets the current status of the physical resource and updates the
database accordingly. If check is not supported by the resource,
default action is to fail and revert the resource's status to its
original state with the added message that check was not performed.
"""
action = self.CHECK
LOG.info('Checking %s', self)
if hasattr(self, 'handle_%s' % action.lower()):
if self.state == (self.INIT, self.COMPLETE):
reason = _('Can not check %s, resource not '
'created yet.') % self.name
self.state_set(action, self.FAILED, reason)
exc = Exception(_('Resource %s not created yet.') % self.name)
failure = exception.ResourceFailure(exc, self, action)
raise failure
with self.frozen_properties():
return self._do_action(action)
else:
reason = '%s not supported for %s' % (action, self.type())
self.state_set(action, self.COMPLETE, reason)
def _verify_check_conditions(self, checks):
def valid(check):
if isinstance(check['expected'], list):
return check['current'] in check['expected']
else:
return check['current'] == check['expected']
msg = _("'%(attr)s': expected '%(expected)s', got '%(current)s'")
invalid_checks = [
msg % check
for check in checks
if not valid(check)
]
if invalid_checks:
raise exception.Error('; '.join(invalid_checks))
def suspend(self):
"""Return a task to suspend the resource.
Subclasses should provide a handle_suspend() method to implement
suspend.
"""
action = self.SUSPEND
# Don't try to suspend the resource unless it's in a stable state
# or if the previous suspend failed
if (self.action == self.DELETE or
(self.action != self.SUSPEND and
self.status != self.COMPLETE)):
exc = exception.Error(_('State %s invalid for suspend')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
LOG.info('suspending %s', self)
with self.frozen_properties():
return self._do_action(action)
def resume(self):
"""Return a task to resume the resource.
Subclasses should provide a handle_resume() method to implement resume.
"""
action = self.RESUME
# Allow resume a resource if it's SUSPEND_COMPLETE
# or RESUME_FAILED or RESUME_COMPLETE. Recommend to check
# the real state of physical resource in handle_resume()
if self.state not in ((self.SUSPEND, self.COMPLETE),
(self.RESUME, self.FAILED),
(self.RESUME, self.COMPLETE)):
exc = exception.Error(_('State %s invalid for resume')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
LOG.info('resuming %s', self)
with self.frozen_properties():
return self._do_action(action)
def snapshot(self):
"""Snapshot the resource and return the created data, if any."""
LOG.info('snapshotting %s', self)
with self.frozen_properties():
return self._do_action(self.SNAPSHOT)
@scheduler.wrappertask
def delete_snapshot(self, data):
yield self.action_handler_task('delete_snapshot', args=[data])
def physical_resource_name(self):
if self.id is None or self.action == self.INIT:
return None
name = '%s-%s-%s' % (self.stack.name.rstrip('*'),
self.name,
short_id.get_id(self.uuid))
if self.physical_resource_name_limit:
name = self.reduce_physical_resource_name(
name, self.physical_resource_name_limit)
return name
@staticmethod
def reduce_physical_resource_name(name, limit):
"""Reduce length of physical resource name to a limit.
The reduced name will consist of the following:
* the first 2 characters of the name
* a hyphen
* the end of the name, truncated on the left to bring
the name length within the limit
:param name: The name to reduce the length of
:param limit: The max length limit
:returns: A name whose length is less than or equal to the limit
"""
if len(name) <= limit:
return name
if limit < 4:
raise ValueError(_('limit cannot be less than 4'))
postfix_length = limit - 3
return name[0:2] + '-' + name[-postfix_length:]
def validate(self):
"""Validate the resource.
This may be overridden by resource plugins to add extra
validation logic specific to the resource implementation.
"""
LOG.info('Validating %s', self)
return self.validate_template()
def validate_template(self):
"""Validate structural/syntax aspects of the resource definition.
Resource plugins should not override this, because this interface
is expected to be called pre-create so things normally valid
in an overridden validate() such as accessing properties
may not work.
"""
self._validate_service_availability(
self.stack.context,
self.t.resource_type
)
try:
self.t.validate()
self.validate_deletion_policy(self.t.deletion_policy())
self.t.update_policy(self.update_policy_schema,
self.context).validate()
validate = self.properties.validate(
with_value=self.stack.strict_validate)
except exception.StackValidationFailed as ex:
path = [self.stack.t.RESOURCES, self.t.name]
if ex.path:
path.append(self.stack.t.get_section_name(ex.path[0]))
path.extend(ex.path[1:])
raise exception.StackValidationFailed(
error=ex.error,
path=path,
message=ex.error_message)
return validate
@classmethod
def validate_deletion_policy(cls, policy):
path = rsrc_defn.DELETION_POLICY
if policy not in rsrc_defn.ResourceDefinition.DELETION_POLICIES:
msg = _('Invalid deletion policy "%s"') % policy
raise exception.StackValidationFailed(message=msg, path=path)
if policy == rsrc_defn.ResourceDefinition.SNAPSHOT:
if not callable(getattr(cls, 'handle_snapshot_delete', None)):
msg = _('"%s" deletion policy not supported') % policy
raise exception.StackValidationFailed(message=msg, path=path)
def _update_replacement_data(self, template_id):
# Update the replacement resource's needed_by and replaces
# fields. Make sure that the replacement belongs to the given
# template and there is no engine working on it.
if self.replaced_by is None:
return
try:
db_res = resource_objects.Resource.get_obj(
self.context, self.replaced_by,
fields=('current_template_id', 'atomic_key'))
except exception.NotFound:
LOG.info("Could not find replacement of resource %(name)s "
"with id %(id)s while updating needed_by.",
{'name': self.name, 'id': self.replaced_by})
return
if (db_res.current_template_id == template_id):
# Following update failure is ignorable; another
# update might have locked/updated the resource.
if db_res.select_and_update(
{'needed_by': self.needed_by,
'replaces': None},
atomic_key=db_res.atomic_key,
expected_engine_id=None):
self._incr_atomic_key(self._atomic_key)
def delete_convergence(self, template_id, input_data, engine_id, timeout,
progress_callback=None):
"""Destroys the resource if it doesn't belong to given template.
The given template is suppose to be the current template being
provisioned.
Also, since this resource is visited as part of clean-up phase,
the needed_by should be updated. If this resource was
replaced by more recent resource, then delete this and update
the replacement resource's needed_by and replaces fields.
"""
self._calling_engine_id = engine_id
self.needed_by = list(set(v for v in input_data.values()
if v is not None))
if self.current_template_id != template_id:
# just delete the resources in INIT state
if self.action == self.INIT:
try:
resource_objects.Resource.delete(self.context, self.id)
except exception.NotFound:
pass
else:
runner = scheduler.TaskRunner(self.delete)
runner(timeout=timeout,
progress_callback=progress_callback)
self._update_replacement_data(template_id)
def handle_delete(self):
"""Default implementation; should be overridden by resources."""
if self.entity and self.resource_id is not None:
try:
obj = getattr(self.client(), self.entity)
obj.delete(self.resource_id)
except Exception as ex:
if self.default_client_name is not None:
self.client_plugin().ignore_not_found(ex)
return None
raise
return self.resource_id
@scheduler.wrappertask
def delete(self):
"""A task to delete the resource.
Subclasses should provide a handle_delete() method to customise
deletion.
"""
@excutils.exception_filter
def should_retry(exc):
if count >= retry_limit:
return False
if self.default_client_name:
return (self.client_plugin().is_conflict(exc) or
isinstance(exc, exception.PhysicalResourceExists))
return isinstance(exc, exception.PhysicalResourceExists)
action = self.DELETE
if (self.action, self.status) == (self.DELETE, self.COMPLETE):
return
# No need to delete if the resource has never been created
if self.action == self.INIT:
return
initial_state = self.state
# This method can be called when we replace a resource, too. In that
# case, a hook has already been dealt with in `Resource.update` so we
# shouldn't do it here again:
if self.stack.action == self.stack.DELETE:
yield self._break_if_required(
self.DELETE, environment.HOOK_PRE_DELETE)
LOG.info('deleting %s', self)
if self._stored_properties_data is not None:
# On delete we can't rely on re-resolving the properties
# so use the stored frozen_definition instead
self.properties = self.frozen_definition().properties(
self.properties_schema, self.context)
self.translate_properties(self.properties)
with self._action_recorder(action):
if self.abandon_in_progress:
deletion_policy = self.t.RETAIN
else:
deletion_policy = self.t.deletion_policy()
if deletion_policy != self.t.RETAIN:
if deletion_policy == self.t.SNAPSHOT:
action_args = [[initial_state], 'snapshot']
else:
action_args = []
count = -1
retry_limit = max(cfg.CONF.action_retry_limit, 0)
while True:
count += 1
LOG.info('delete %(name)s attempt %(attempt)d' %
{'name': six.text_type(self), 'attempt': count+1})
if count:
delay = timeutils.retry_backoff_delay(count,
jitter_max=2.0)
waiter = scheduler.TaskRunner(self.pause)
yield waiter.as_task(timeout=delay)
with excutils.exception_filter(should_retry):
yield self.action_handler_task(action,
*action_args)
break
if self.stack.action == self.stack.DELETE:
yield self._break_if_required(
self.DELETE, environment.HOOK_POST_DELETE)
@scheduler.wrappertask
def destroy(self):
"""A task to delete the resource and remove it from the database."""
yield self.delete()
if self.id is None:
return
try:
resource_objects.Resource.delete(self.context, self.id)
except exception.NotFound:
# Don't fail on delete if the db entry has
# not been created yet.
pass
self.id = None
def resource_id_set(self, inst):
self.resource_id = inst
if self.id is not None:
try:
resource_objects.Resource.update_by_id(
self.context,
self.id,
{'physical_resource_id': self.resource_id})
except Exception as ex:
LOG.warning('db error %s', ex)
def store(self, set_metadata=False, lock=LOCK_NONE):
"""Create the resource in the database.
If self.id is set, we update the existing stack.
"""
if not self.root_stack_id:
self.root_stack_id = self.stack.root_stack_id()
rs = {'action': self.action,
'status': self.status,
'status_reason': six.text_type(self.status_reason),
'stack_id': self.stack.id,
'physical_resource_id': self.resource_id,
'name': self.name,
'rsrc_prop_data_id':
self._create_or_replace_rsrc_prop_data(),
'needed_by': self.needed_by,
'requires': self.requires,
'replaces': self.replaces,
'replaced_by': self.replaced_by,
'current_template_id': self.current_template_id,
'root_stack_id': self.root_stack_id,
'updated_at': self.updated_time,
'properties_data': None}
if set_metadata:
metadata = self.t.metadata()
rs['rsrc_metadata'] = metadata
self._rsrc_metadata = metadata
if self.id is not None:
if (lock == self.LOCK_NONE or
(lock in {self.LOCK_ACQUIRE, self.LOCK_RELEASE} and
self._calling_engine_id is None)):
resource_objects.Resource.update_by_id(
self.context, self.id, rs)
if lock != self.LOCK_NONE:
LOG.error('No calling_engine_id in store() %s',
six.text_type(rs))
else:
self._store_with_lock(rs, lock)
else:
new_rs = resource_objects.Resource.create(self.context, rs)
self.id = new_rs.id
self.uuid = new_rs.uuid
self.created_time = new_rs.created_at
def _store_with_lock(self, rs, lock):
if lock == self.LOCK_ACQUIRE:
rs['engine_id'] = self._calling_engine_id
expected_engine_id = None
elif lock == self.LOCK_RESPECT:
expected_engine_id = None
elif lock == self.LOCK_RELEASE:
expected_engine_id = self._calling_engine_id
rs['engine_id'] = None
else:
assert False, "Invalid lock action: %s" % lock
if resource_objects.Resource.select_and_update_by_id(
self.context, self.id, rs, expected_engine_id,
self._atomic_key):
self._incr_atomic_key(self._atomic_key)
else:
LOG.info('Resource %s is locked or does not exist',
six.text_type(self))
LOG.debug('Resource id:%(resource_id)s locked or does not exist. '
'Expected atomic_key:%(atomic_key)s, '
'accessing from engine_id:%(engine_id)s',
{'resource_id': self.id,
'atomic_key': self._atomic_key,
'engine_id': self._calling_engine_id})
raise exception.UpdateInProgress(self.name)
def _add_event(self, action, status, reason):
"""Add a state change event to the database."""
physical_res_id = self.resource_id or self.physical_resource_name()
ev = event.Event(self.context, self.stack, action, status, reason,
physical_res_id, self._rsrc_prop_data_id,
self._stored_properties_data, self.name, self.type())
ev.store()
self.stack.dispatch_event(ev)
@contextlib.contextmanager
def lock(self, engine_id):
self._calling_engine_id = engine_id
try:
if engine_id is not None:
self._store_with_lock({}, self.LOCK_ACQUIRE)
yield
except exception.UpdateInProgress:
raise
except BaseException:
with excutils.save_and_reraise_exception():
if engine_id is not None:
self._store_with_lock({}, self.LOCK_RELEASE)
else:
if engine_id is not None:
self._store_with_lock({}, self.LOCK_RELEASE)
def _resolve_any_attribute(self, attr):
"""Method for resolving any attribute, including base attributes.
This method uses basic _resolve_attribute method for resolving
plugin-specific attributes. Base attributes will be resolved with
corresponding method, which should be defined in each resource
class.
:param attr: attribute name, which will be resolved
:returns: method of resource class, which resolve base attribute
"""
if attr in self.base_attributes_schema:
# check resource_id, because usually it is required for getting
# information about resource
if not self.resource_id:
return None
try:
return getattr(self, '_{0}_resource'.format(attr))()
except Exception as ex:
if self.default_client_name is not None:
self.client_plugin().ignore_not_found(ex)
return None
raise
else:
try:
return self._resolve_attribute(attr)
except Exception as ex:
if self.default_client_name is not None:
self.client_plugin().ignore_not_found(ex)
return None
raise
def _show_resource(self):
"""Default implementation; should be overridden by resources.
:returns: the map of resource information or None
"""
if self.entity:
try:
obj = getattr(self.client(), self.entity)
resource = obj.get(self.resource_id)
if isinstance(resource, dict):
return resource
else:
return resource.to_dict()
except AttributeError as ex:
LOG.warning("Resolving 'show' attribute has failed : %s",
ex)
return None
def get_live_resource_data(self):
"""Default implementation; can be overridden by resources.
Get resource data and handle it with exceptions.
"""
try:
resource_data = self._show_resource()
except Exception as ex:
if (self.default_client_name is not None and
self.client_plugin().is_not_found(ex)):
raise exception.EntityNotFound(
entity='Resource', name=self.name)
raise
return resource_data
def parse_live_resource_data(self, resource_properties, resource_data):
"""Default implementation; can be overridden by resources.
Parse resource data for using it in updating properties with live
state.
:param resource_properties: properties of stored resource plugin.
:param resource_data: data from current live state of a resource.
"""
resource_result = {}
for key in self._update_allowed_properties:
if key in resource_data:
if key == 'name' and resource_properties.get(key) is None:
# We use `physical_resource_name` for name property in some
# resources when name not provided during create, so we
# shouldn't add name in resource_data if it's None in
# property (might just the cases that we using
# `physical_resource_name`).
continue
resource_result[key] = resource_data.get(key)
return resource_result
def get_live_state(self, resource_properties):
"""Default implementation; should be overridden by resources.
:param resource_properties: resource's object of Properties class.
:returns: dict of resource's real state of properties.
"""
resource_data = self.get_live_resource_data()
if resource_data is None:
return {}
return self.parse_live_resource_data(resource_properties,
resource_data)
def _update_properties_with_live_state(self, resource_properties,
live_properties):
"""Update resource properties data with live state properties.
Note, that live_properties can contains None values, so there's next
situation: property equals to some value, but live state has no such
property, i.e. property equals to None, so during update property
should be updated with None.
"""
for key in resource_properties:
if key in live_properties:
if resource_properties.get(key) != live_properties.get(key):
resource_properties.data.update(
{key: live_properties.get(key)})
def _resolve_attribute(self, name):
"""Default implementation of resolving resource's attributes.
Should be overridden by resources, that expose attributes.
:param name: The attribute to resolve
:returns: the resource attribute named key
"""
# By default, no attributes resolve
pass
def regenerate_info_schema(self, definition):
"""Default implementation; should be overridden by resources.
Should be overridden by resources that would require schema refresh
during update, ex. TemplateResource.
:definition: Resource Definition
"""
# By default, do not regenerate
pass
def state_reset(self):
"""Reset state to (INIT, COMPLETE)."""
self.action = self.INIT
self.status = self.COMPLETE
def state_set(self, action, status, reason="state changed",
lock=LOCK_NONE):
if action not in self.ACTIONS:
raise ValueError(_("Invalid action %s") % action)
if status not in self.STATUSES:
raise ValueError(_("Invalid status %s") % status)
old_state = (self.action, self.status)
new_state = (action, status)
set_metadata = self.action == self.INIT
self.action = action
self.status = status
self.status_reason = reason
self.store(set_metadata, lock=lock)
if new_state != old_state:
self._add_event(action, status, reason)
if status != self.COMPLETE:
self.clear_stored_attributes()
@property
def state(self):
"""Returns state, tuple of action, status."""
return (self.action, self.status)
def store_attributes(self):
assert self.id is not None
if self.status != self.COMPLETE or self.action in (self.INIT,
self.DELETE):
return
if not self.attributes.has_new_cached_attrs():
return
try:
attr_data_id = resource_objects.Resource.store_attributes(
self.context, self.id, self._atomic_key,
self.attributes.cached_attrs, self._attr_data_id)
if attr_data_id is not None:
self._attr_data_id = attr_data_id
except Exception as ex:
LOG.error('store_attributes rsrc %(name)s %(id)s DB error %(ex)s',
{'name': self.name, 'id': self.id, 'ex': ex})
def clear_stored_attributes(self):
if self._attr_data_id:
resource_objects.Resource.attr_data_delete(
self.context, self.id, self._attr_data_id)
self.attributes.reset_resolved_values()
def get_reference_id(self):
"""Default implementation for function get_resource.
This may be overridden by resource plugins to add extra
logic specific to the resource implementation.
"""
if self.resource_id is not None:
return six.text_type(self.resource_id)
else:
return six.text_type(self.name)
def FnGetRefId(self):
"""For the intrinsic function Ref.
:results: the id or name of the resource.
"""
return self.get_reference_id()
def physical_resource_name_or_FnGetRefId(self):
res_name = self.physical_resource_name()
if res_name is not None:
return six.text_type(res_name)
else:
return Resource.get_reference_id(self)
def get_attribute(self, key, *path):
"""Default implementation for function get_attr and Fn::GetAtt.
This may be overridden by resource plugins to add extra
logic specific to the resource implementation.
"""
try:
attribute = self.attributes[key]
except KeyError:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
return attributes.select_from_attribute(attribute, path)
def FnGetAtt(self, key, *path):
"""For the intrinsic function Fn::GetAtt.
:param key: the attribute key.
:param path: a list of path components to select from the attribute.
:returns: the attribute value.
"""
cache_custom = ((self.attributes.get_cache_mode(key) !=
attributes.Schema.CACHE_NONE) and
(type(self).get_attribute != Resource.get_attribute))
if cache_custom:
if path:
full_key = sync_point.str_pack_tuple((key,) + path)
else:
full_key = key
if full_key in self.attributes.cached_attrs:
return self.attributes.cached_attrs[full_key]
attr_val = self.get_attribute(key, *path)
if cache_custom:
self.attributes.set_cached_attr(full_key, attr_val)
return attr_val
def _signal_check_action(self):
if self.action in self.no_signal_actions:
self._add_event(self.action, self.status,
'Cannot signal resource during %s' % self.action)
msg = _('Signal resource during %s') % self.action
raise exception.NotSupported(feature=msg)
def _signal_check_hook(self, details):
if details and 'unset_hook' in details:
hook = details['unset_hook']
if not environment.valid_hook_type(hook):
msg = (_('Invalid hook type "%(hook)s" for %(resource)s') %
{'hook': hook, 'resource': six.text_type(self)})
raise exception.InvalidBreakPointHook(message=msg)
if not self.has_hook(hook):
msg = (_('The "%(hook)s" hook is not defined '
'on %(resource)s') %
{'hook': hook, 'resource': six.text_type(self)})
raise exception.InvalidBreakPointHook(message=msg)
def _unset_hook(self, details):
# Clear the hook without interfering with resources'
# `handle_signal` callbacks:
hook = details['unset_hook']
self.clear_hook(hook)
LOG.info('Clearing %(hook)s hook on %(resource)s',
{'hook': hook, 'resource': six.text_type(self)})
self._add_event(self.action, self.status,
"Hook %s is cleared" % hook)
def _handle_signal(self, details):
if not callable(getattr(self, 'handle_signal', None)):
raise exception.ResourceActionNotSupported(action='signal')
def get_string_details():
if details is None:
return 'No signal details provided'
if isinstance(details, six.string_types):
return details
if isinstance(details, dict):
if all(k in details for k in ('previous', 'current',
'reason')):
# this is from Ceilometer.
auto = '%(previous)s to %(current)s (%(reason)s)' % details
return 'alarm state changed from %s' % auto
return 'Unknown'
try:
signal_result = self.handle_signal(details)
if signal_result:
reason_string = "Signal: %s" % signal_result
else:
reason_string = get_string_details()
self._add_event('SIGNAL', self.status, reason_string)
except NoActionRequired:
# Don't log an event as it just spams the user.
pass
except Exception as ex:
if hasattr(self, '_db_res_is_deleted'):
# No spam required
return
LOG.info('signal %(name)s : %(msg)s',
{'name': six.text_type(self),
'msg': six.text_type(ex)},
exc_info=True)
failure = exception.ResourceFailure(ex, self)
raise failure
def signal(self, details=None, need_check=True):
"""Signal the resource.
Returns True if the metadata for all resources in the stack needs to
be regenerated as a result of the signal, False if it should not be.
Subclasses should provide a handle_signal() method to implement the
signal. The base-class raise an exception if no handler is implemented.
"""
if need_check:
self._signal_check_hook(details)
if details and 'unset_hook' in details:
self._unset_hook(details)
return False
if need_check:
self._signal_check_action()
with self.frozen_properties():
self._handle_signal(details)
return self.signal_needs_metadata_updates
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
raise UpdateReplace(self.name)
def metadata_update(self, new_metadata=None):
"""No-op for resources which don't explicitly override this method."""
if new_metadata:
LOG.warning("Resource %s does not implement metadata update",
self.name)
@classmethod
def resource_to_template(cls, resource_type, template_type='cfn'):
"""Generate a provider template that mirrors the resource.
:param resource_type: The resource type to be displayed in the template
:param template_type: the template type to generate, cfn or hot.
:returns: A template where the resource's properties_schema is mapped
as parameters, and the resource's attributes_schema is mapped as
outputs
"""
props_schema = {}
for name, schema_dict in cls.properties_schema.items():
schema = properties.Schema.from_legacy(schema_dict)
if schema.support_status.status != support.HIDDEN:
props_schema[name] = schema
params, props = (properties.Properties.
schema_to_parameters_and_properties(props_schema,
template_type))
resource_name = cls.__name__
outputs = attributes.Attributes.as_outputs(resource_name, cls,
template_type)
description = 'Initial template of %s' % resource_name
return cls.build_template_dict(resource_name, resource_type,
template_type, params, props,
outputs, description)
@staticmethod
def build_template_dict(res_name, res_type, tmpl_type,
params, props, outputs, description):
if tmpl_type == 'hot':
tmpl_dict = {
hot_tmpl.HOTemplate20161014.VERSION: '2016-10-14',
hot_tmpl.HOTemplate20161014.DESCRIPTION: description,
hot_tmpl.HOTemplate20161014.PARAMETERS: params,
hot_tmpl.HOTemplate20161014.OUTPUTS: outputs,
hot_tmpl.HOTemplate20161014.RESOURCES: {
res_name: {
hot_tmpl.HOTemplate20161014.RES_TYPE: res_type,
hot_tmpl.HOTemplate20161014.RES_PROPERTIES: props}}}
else:
tmpl_dict = {
cfn_tmpl.CfnTemplate.ALTERNATE_VERSION: '2012-12-12',
cfn_tmpl.CfnTemplate.DESCRIPTION: description,
cfn_tmpl.CfnTemplate.PARAMETERS: params,
cfn_tmpl.CfnTemplate.RESOURCES: {
res_name: {
cfn_tmpl.CfnTemplate.RES_TYPE: res_type,
cfn_tmpl.CfnTemplate.RES_PROPERTIES: props}
},
cfn_tmpl.CfnTemplate.OUTPUTS: outputs}
return tmpl_dict
def data(self):
"""Return the resource data for this resource.
Use methods data_set and data_delete to modify the resource data
for this resource.
:returns: a dict representing the resource data for this resource.
"""
if self._data is None and self.id is not None:
try:
self._data = resource_data_objects.ResourceData.get_all(self)
except exception.NotFound:
pass
return self._data or {}
def data_set(self, key, value, redact=False):
"""Set a key in the resource data."""
resource_data_objects.ResourceData.set(self, key, value, redact)
# force fetch all resource data from the database again
self._data = None
def data_delete(self, key):
"""Remove a key from the resource data.
:returns: True if the key existed to delete.
"""
try:
resource_data_objects.ResourceData.delete(self, key)
except exception.NotFound:
return False
else:
# force fetch all resource data from the database again
self._data = None
return True
def _create_or_replace_rsrc_prop_data(self):
if self._rsrc_prop_data_id is not None:
return self._rsrc_prop_data_id
if not self._stored_properties_data:
return None
self._rsrc_prop_data_id = \
rpd_objects.ResourcePropertiesData(self.context).create(
self.context, self._stored_properties_data).id
return self._rsrc_prop_data_id
def is_using_neutron(self):
try:
sess_client = self.client('neutron').httpclient
if not sess_client.get_endpoint():
return False
except Exception:
return False
return True
@staticmethod
def _make_resolver(ref):
"""Return an attribute resolution method.
This builds a resolver without a strong reference to this resource, to
break a possible cycle.
"""
def resolve(attr):
res = ref()
if res is None:
raise RuntimeError("Resource collected")
return res._resolve_any_attribute(attr)
return resolve
|
the-stack_0_2899 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import collections
import json
import logging
import random
import re
import threading
from builtins import next
from builtins import object
from future.utils import itervalues
from google import protobuf
import apache_beam as beam
from apache_beam import coders
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders import coder_impl
from apache_beam.internal import pickler
from apache_beam.io import iobase
from apache_beam.metrics import monitoring_infos
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import common
from apache_beam.runners import pipeline_context
from apache_beam.runners.dataflow import dataflow_runner
from apache_beam.runners.worker import operation_specs
from apache_beam.runners.worker import operations
from apache_beam.runners.worker import statesampler
from apache_beam.transforms import sideinputs
from apache_beam.transforms import userstate
from apache_beam.utils import counters
from apache_beam.utils import proto_utils
from apache_beam.utils import timestamp
from apache_beam.utils import windowed_value
# This module is experimental. No backwards-compatibility guarantees.
DATA_INPUT_URN = 'urn:org.apache.beam:source:runner:0.1'
DATA_OUTPUT_URN = 'urn:org.apache.beam:sink:runner:0.1'
IDENTITY_DOFN_URN = 'urn:org.apache.beam:dofn:identity:0.1'
# TODO(vikasrk): Fix this once runner sends appropriate common_urns.
OLD_DATAFLOW_RUNNER_HARNESS_PARDO_URN = 'urn:beam:dofn:javasdk:0.1'
OLD_DATAFLOW_RUNNER_HARNESS_READ_URN = 'urn:org.apache.beam:source:java:0.1'
class RunnerIOOperation(operations.Operation):
"""Common baseclass for runner harness IO operations."""
def __init__(self, name_context, step_name, consumers, counter_factory,
state_sampler, windowed_coder, target, data_channel):
super(RunnerIOOperation, self).__init__(
name_context, None, counter_factory, state_sampler)
self.windowed_coder = windowed_coder
self.windowed_coder_impl = windowed_coder.get_impl()
# target represents the consumer for the bytes in the data plane for a
# DataInputOperation or a producer of these bytes for a DataOutputOperation.
self.target = target
self.data_channel = data_channel
for _, consumer_ops in consumers.items():
for consumer in consumer_ops:
self.add_receiver(consumer, 0)
class DataOutputOperation(RunnerIOOperation):
"""A sink-like operation that gathers outputs to be sent back to the runner.
"""
def set_output_stream(self, output_stream):
self.output_stream = output_stream
def process(self, windowed_value):
self.windowed_coder_impl.encode_to_stream(
windowed_value, self.output_stream, True)
self.output_stream.maybe_flush()
def finish(self):
self.output_stream.close()
super(DataOutputOperation, self).finish()
class DataInputOperation(RunnerIOOperation):
"""A source-like operation that gathers input from the runner.
"""
def __init__(self, operation_name, step_name, consumers, counter_factory,
state_sampler, windowed_coder, input_target, data_channel):
super(DataInputOperation, self).__init__(
operation_name, step_name, consumers, counter_factory, state_sampler,
windowed_coder, target=input_target, data_channel=data_channel)
# We must do this manually as we don't have a spec or spec.output_coders.
self.receivers = [
operations.ConsumerSet.create(
self.counter_factory, self.name_context.step_name, 0,
next(iter(itervalues(consumers))), self.windowed_coder)]
self.splitting_lock = threading.Lock()
def start(self):
super(DataInputOperation, self).start()
self.index = -1
self.stop = float('inf')
def process(self, windowed_value):
self.output(windowed_value)
def process_encoded(self, encoded_windowed_values):
input_stream = coder_impl.create_InputStream(encoded_windowed_values)
while input_stream.size() > 0:
with self.splitting_lock:
if self.index == self.stop - 1:
return
self.index += 1
decoded_value = self.windowed_coder_impl.decode_from_stream(
input_stream, True)
self.output(decoded_value)
def try_split(self, fraction_of_remainder, total_buffer_size):
with self.splitting_lock:
if total_buffer_size < self.index + 1:
total_buffer_size = self.index + 1
elif self.stop and total_buffer_size > self.stop:
total_buffer_size = self.stop
if self.index == -1:
# We are "finished" with the (non-existent) previous element.
current_element_progress = 1
else:
current_element_progress_object = (
self.receivers[0].current_element_progress())
if current_element_progress_object is None:
current_element_progress = 0.5
else:
current_element_progress = (
current_element_progress_object.fraction_completed)
# Now figure out where to split.
# The units here (except for keep_of_element_remainder) are all in
# terms of number of (possibly fractional) elements.
remainder = total_buffer_size - self.index - current_element_progress
keep = remainder * fraction_of_remainder
if current_element_progress < 1:
keep_of_element_remainder = keep / (1 - current_element_progress)
# If it's less than what's left of the current element,
# try splitting at the current element.
if keep_of_element_remainder < 1:
split = self.receivers[0].try_split(keep_of_element_remainder)
if split:
element_primary, element_residual = split
self.stop = self.index + 1
return self.index - 1, element_primary, element_residual, self.stop
# Otherwise, split at the closest element boundary.
# pylint: disable=round-builtin
stop_index = (
self.index + max(1, int(round(current_element_progress + keep))))
if stop_index < self.stop:
self.stop = stop_index
return self.stop - 1, None, None, self.stop
class _StateBackedIterable(object):
def __init__(self, state_handler, state_key, coder_or_impl):
self._state_handler = state_handler
self._state_key = state_key
if isinstance(coder_or_impl, coders.Coder):
self._coder_impl = coder_or_impl.get_impl()
else:
self._coder_impl = coder_or_impl
def __iter__(self):
data, continuation_token = self._state_handler.blocking_get(self._state_key)
while True:
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
yield self._coder_impl.decode_from_stream(input_stream, True)
if not continuation_token:
break
else:
data, continuation_token = self._state_handler.blocking_get(
self._state_key, continuation_token)
def __reduce__(self):
return list, (list(self),)
coder_impl.FastPrimitivesCoderImpl.register_iterable_like_type(
_StateBackedIterable)
class StateBackedSideInputMap(object):
def __init__(self, state_handler, transform_id, tag, side_input_data, coder):
self._state_handler = state_handler
self._transform_id = transform_id
self._tag = tag
self._side_input_data = side_input_data
self._element_coder = coder.wrapped_value_coder
self._target_window_coder = coder.window_coder
# TODO(robertwb): Limit the cache size.
self._cache = {}
def __getitem__(self, window):
target_window = self._side_input_data.window_mapping_fn(window)
if target_window not in self._cache:
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=self._transform_id,
side_input_id=self._tag,
window=self._target_window_coder.encode(target_window),
key=b''))
state_handler = self._state_handler
access_pattern = self._side_input_data.access_pattern
if access_pattern == common_urns.side_inputs.ITERABLE.urn:
raw_view = _StateBackedIterable(
state_handler, state_key, self._element_coder)
elif (access_pattern == common_urns.side_inputs.MULTIMAP.urn or
access_pattern ==
dataflow_runner._DataflowSideInput.DATAFLOW_MULTIMAP_URN):
cache = {}
key_coder_impl = self._element_coder.key_coder().get_impl()
value_coder = self._element_coder.value_coder()
class MultiMap(object):
def __getitem__(self, key):
if key not in cache:
keyed_state_key = beam_fn_api_pb2.StateKey()
keyed_state_key.CopyFrom(state_key)
keyed_state_key.multimap_side_input.key = (
key_coder_impl.encode_nested(key))
cache[key] = _StateBackedIterable(
state_handler, keyed_state_key, value_coder)
return cache[key]
def __reduce__(self):
# TODO(robertwb): Figure out how to support this.
raise TypeError(common_urns.side_inputs.MULTIMAP.urn)
raw_view = MultiMap()
else:
raise ValueError(
"Unknown access pattern: '%s'" % access_pattern)
self._cache[target_window] = self._side_input_data.view_fn(raw_view)
return self._cache[target_window]
def is_globally_windowed(self):
return (self._side_input_data.window_mapping_fn
== sideinputs._global_window_mapping_fn)
def reset(self):
# TODO(BEAM-5428): Cross-bundle caching respecting cache tokens.
self._cache = {}
class CombiningValueRuntimeState(userstate.RuntimeState):
def __init__(self, underlying_bag_state, combinefn):
self._combinefn = combinefn
self._underlying_bag_state = underlying_bag_state
def _read_accumulator(self, rewrite=True):
merged_accumulator = self._combinefn.merge_accumulators(
self._underlying_bag_state.read())
if rewrite:
self._underlying_bag_state.clear()
self._underlying_bag_state.add(merged_accumulator)
return merged_accumulator
def read(self):
return self._combinefn.extract_output(self._read_accumulator())
def add(self, value):
# Prefer blind writes, but don't let them grow unboundedly.
# This should be tuned to be much lower, but for now exercise
# both paths well.
if random.random() < 0.5:
accumulator = self._read_accumulator(False)
self._underlying_bag_state.clear()
else:
accumulator = self._combinefn.create_accumulator()
self._underlying_bag_state.add(
self._combinefn.add_input(accumulator, value))
def clear(self):
self._underlying_bag_state.clear()
def _commit(self):
self._underlying_bag_state._commit()
class _ConcatIterable(object):
"""An iterable that is the concatination of two iterables.
Unlike itertools.chain, this allows reiteration.
"""
def __init__(self, first, second):
self.first = first
self.second = second
def __iter__(self):
for elem in self.first:
yield elem
for elem in self.second:
yield elem
coder_impl.FastPrimitivesCoderImpl.register_iterable_like_type(_ConcatIterable)
# TODO(BEAM-5428): Implement cross-bundle state caching.
class SynchronousBagRuntimeState(userstate.RuntimeState):
def __init__(self, state_handler, state_key, value_coder):
self._state_handler = state_handler
self._state_key = state_key
self._value_coder = value_coder
self._cleared = False
self._added_elements = []
def read(self):
return _ConcatIterable(
[] if self._cleared else _StateBackedIterable(
self._state_handler, self._state_key, self._value_coder),
self._added_elements)
def add(self, value):
self._added_elements.append(value)
def clear(self):
self._cleared = True
self._added_elements = []
def _commit(self):
if self._cleared:
self._state_handler.blocking_clear(self._state_key)
if self._added_elements:
value_coder_impl = self._value_coder.get_impl()
out = coder_impl.create_OutputStream()
for element in self._added_elements:
value_coder_impl.encode_to_stream(element, out, True)
self._state_handler.blocking_append(self._state_key, out.get())
class OutputTimer(object):
def __init__(self, key, window, receiver):
self._key = key
self._window = window
self._receiver = receiver
def set(self, ts):
ts = timestamp.Timestamp.of(ts)
self._receiver.receive(
windowed_value.WindowedValue(
(self._key, dict(timestamp=ts)), ts, (self._window,)))
def clear(self, timestamp):
self._receiver.receive((self._key, dict(clear=True)))
class FnApiUserStateContext(userstate.UserStateContext):
def __init__(
self, state_handler, transform_id, key_coder, window_coder, timer_specs):
self._state_handler = state_handler
self._transform_id = transform_id
self._key_coder = key_coder
self._window_coder = window_coder
self._timer_specs = timer_specs
self._timer_receivers = None
self._all_states = {}
def update_timer_receivers(self, receivers):
self._timer_receivers = {}
for tag in self._timer_specs:
self._timer_receivers[tag] = receivers.pop(tag)
def get_timer(self, timer_spec, key, window):
return OutputTimer(
key, window, self._timer_receivers[timer_spec.name])
def get_state(self, *args):
state_handle = self._all_states.get(args)
if state_handle is None:
state_handle = self._all_states[args] = self._create_state(*args)
return state_handle
def _create_state(self, state_spec, key, window):
if isinstance(state_spec,
(userstate.BagStateSpec, userstate.CombiningValueStateSpec)):
bag_state = SynchronousBagRuntimeState(
self._state_handler,
state_key=beam_fn_api_pb2.StateKey(
bag_user_state=beam_fn_api_pb2.StateKey.BagUserState(
ptransform_id=self._transform_id,
user_state_id=state_spec.name,
window=self._window_coder.encode(window),
key=self._key_coder.encode(key))),
value_coder=state_spec.coder)
if isinstance(state_spec, userstate.BagStateSpec):
return bag_state
else:
return CombiningValueRuntimeState(bag_state, state_spec.combine_fn)
else:
raise NotImplementedError(state_spec)
def commit(self):
for state in self._all_states.values():
state._commit()
def reset(self):
# TODO(BEAM-5428): Implement cross-bundle state caching.
self._all_states = {}
def memoize(func):
cache = {}
missing = object()
def wrapper(*args):
result = cache.get(args, missing)
if result is missing:
result = cache[args] = func(*args)
return result
return wrapper
def only_element(iterable):
element, = iterable
return element
class BundleProcessor(object):
"""A class for processing bundles of elements."""
def __init__(
self, process_bundle_descriptor, state_handler, data_channel_factory):
self.process_bundle_descriptor = process_bundle_descriptor
self.state_handler = state_handler
self.data_channel_factory = data_channel_factory
# TODO(robertwb): Figure out the correct prefix to use for output counters
# from StateSampler.
self.counter_factory = counters.CounterFactory()
self.state_sampler = statesampler.StateSampler(
'fnapi-step-%s' % self.process_bundle_descriptor.id,
self.counter_factory)
self.ops = self.create_execution_tree(self.process_bundle_descriptor)
for op in self.ops.values():
op.setup()
self.splitting_lock = threading.Lock()
def create_execution_tree(self, descriptor):
transform_factory = BeamTransformFactory(
descriptor, self.data_channel_factory, self.counter_factory,
self.state_sampler, self.state_handler)
def is_side_input(transform_proto, tag):
if transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn:
return tag in proto_utils.parse_Bytes(
transform_proto.spec.payload,
beam_runner_api_pb2.ParDoPayload).side_inputs
pcoll_consumers = collections.defaultdict(list)
for transform_id, transform_proto in descriptor.transforms.items():
for tag, pcoll_id in transform_proto.inputs.items():
if not is_side_input(transform_proto, tag):
pcoll_consumers[pcoll_id].append(transform_id)
@memoize
def get_operation(transform_id):
transform_consumers = {
tag: [get_operation(op) for op in pcoll_consumers[pcoll_id]]
for tag, pcoll_id
in descriptor.transforms[transform_id].outputs.items()
}
return transform_factory.create_operation(
transform_id, transform_consumers)
# Operations must be started (hence returned) in order.
@memoize
def topological_height(transform_id):
return 1 + max(
[0] +
[topological_height(consumer)
for pcoll in descriptor.transforms[transform_id].outputs.values()
for consumer in pcoll_consumers[pcoll]])
return collections.OrderedDict([
(transform_id, get_operation(transform_id))
for transform_id in sorted(
descriptor.transforms, key=topological_height, reverse=True)])
def reset(self):
self.counter_factory.reset()
self.state_sampler.reset()
# Side input caches.
for op in self.ops.values():
op.reset()
def process_bundle(self, instruction_id):
expected_inputs = []
for op in self.ops.values():
if isinstance(op, DataOutputOperation):
# TODO(robertwb): Is there a better way to pass the instruction id to
# the operation?
op.set_output_stream(op.data_channel.output_stream(
instruction_id, op.target))
elif isinstance(op, DataInputOperation):
# We must wait until we receive "end of stream" for each of these ops.
expected_inputs.append(op)
try:
execution_context = ExecutionContext()
self.state_sampler.start()
# Start all operations.
for op in reversed(self.ops.values()):
logging.debug('start %s', op)
op.execution_context = execution_context
op.start()
# Inject inputs from data plane.
data_channels = collections.defaultdict(list)
input_op_by_target = {}
for input_op in expected_inputs:
data_channels[input_op.data_channel].append(input_op.target)
# ignores input name
input_op_by_target[
input_op.target.primitive_transform_reference] = input_op
for data_channel, expected_targets in data_channels.items():
for data in data_channel.input_elements(
instruction_id, expected_targets):
input_op_by_target[
data.target.primitive_transform_reference
].process_encoded(data.data)
# Finish all operations.
for op in self.ops.values():
logging.debug('finish %s', op)
op.finish()
return ([self.delayed_bundle_application(op, residual)
for op, residual in execution_context.delayed_applications],
self.requires_finalization())
finally:
# Ensure any in-flight split attempts complete.
with self.splitting_lock:
pass
self.state_sampler.stop_if_still_running()
def finalize_bundle(self):
for op in self.ops.values():
op.finalize_bundle()
return beam_fn_api_pb2.FinalizeBundleResponse()
def requires_finalization(self):
return any(op.needs_finalization() for op in self.ops.values())
def try_split(self, bundle_split_request):
split_response = beam_fn_api_pb2.ProcessBundleSplitResponse()
with self.splitting_lock:
for op in self.ops.values():
if isinstance(op, DataInputOperation):
desired_split = bundle_split_request.desired_splits.get(
op.target.primitive_transform_reference)
if desired_split:
split = op.try_split(desired_split.fraction_of_remainder,
desired_split.estimated_input_elements)
if split:
(primary_end, element_primary, element_residual, residual_start,
) = split
if element_primary:
split_response.primary_roots.add().CopyFrom(
self.delayed_bundle_application(
*element_primary).application)
if element_residual:
split_response.residual_roots.add().CopyFrom(
self.delayed_bundle_application(*element_residual))
split_response.channel_splits.extend([
beam_fn_api_pb2.ProcessBundleSplitResponse.ChannelSplit(
ptransform_id=op.target.primitive_transform_reference,
input_id=op.target.name,
last_primary_element=primary_end,
first_residual_element=residual_start)])
return split_response
def delayed_bundle_application(self, op, deferred_remainder):
ptransform_id, main_input_tag, main_input_coder, outputs = op.input_info
# TODO(SDF): For non-root nodes, need main_input_coder + residual_coder.
element_and_restriction, watermark = deferred_remainder
if watermark:
proto_watermark = protobuf.Timestamp()
proto_watermark.FromMicroseconds(watermark.micros)
output_watermarks = {output: proto_watermark for output in outputs}
else:
output_watermarks = None
return beam_fn_api_pb2.DelayedBundleApplication(
application=beam_fn_api_pb2.BundleApplication(
ptransform_id=ptransform_id,
input_id=main_input_tag,
output_watermarks=output_watermarks,
element=main_input_coder.get_impl().encode_nested(
element_and_restriction)))
def metrics(self):
# DEPRECATED
return beam_fn_api_pb2.Metrics(
# TODO(robertwb): Rename to progress?
ptransforms={
transform_id:
self._fix_output_tags(transform_id, op.progress_metrics())
for transform_id, op in self.ops.items()})
def _fix_output_tags(self, transform_id, metrics):
# DEPRECATED
actual_output_tags = list(
self.process_bundle_descriptor.transforms[transform_id].outputs.keys())
# Outputs are still referred to by index, not by name, in many Operations.
# However, if there is exactly one output, we can fix up the name here.
def fix_only_output_tag(actual_output_tag, mapping):
if len(mapping) == 1:
fake_output_tag, count = only_element(list(mapping.items()))
if fake_output_tag != actual_output_tag:
del mapping[fake_output_tag]
mapping[actual_output_tag] = count
if len(actual_output_tags) == 1:
fix_only_output_tag(
actual_output_tags[0],
metrics.processed_elements.measured.output_element_counts)
fix_only_output_tag(
actual_output_tags[0],
metrics.active_elements.measured.output_element_counts)
return metrics
def monitoring_infos(self):
"""Returns the list of MonitoringInfos collected processing this bundle."""
# Construct a new dict first to remove duplciates.
all_monitoring_infos_dict = {}
for transform_id, op in self.ops.items():
for mi in op.monitoring_infos(transform_id).values():
fixed_mi = self._fix_output_tags_monitoring_info(transform_id, mi)
all_monitoring_infos_dict[monitoring_infos.to_key(fixed_mi)] = fixed_mi
infos_list = list(all_monitoring_infos_dict.values())
def inject_pcollection_into_element_count(monitoring_info):
"""
If provided metric is element count metric:
Finds relevant transform output info in current process_bundle_descriptor
and adds tag with PCOLLECTION_LABEL and pcollection_id into monitoring
info.
"""
if monitoring_info.urn == monitoring_infos.ELEMENT_COUNT_URN:
if not monitoring_infos.PTRANSFORM_LABEL in monitoring_info.labels:
return
ptransform_label = monitoring_info.labels[
monitoring_infos.PTRANSFORM_LABEL]
if not monitoring_infos.TAG_LABEL in monitoring_info.labels:
return
tag_label = monitoring_info.labels[monitoring_infos.TAG_LABEL]
if not ptransform_label in self.process_bundle_descriptor.transforms:
return
if not tag_label in self.process_bundle_descriptor.transforms[
ptransform_label].outputs:
return
pcollection_name = (self.process_bundle_descriptor
.transforms[ptransform_label].outputs[tag_label])
monitoring_info.labels[
monitoring_infos.PCOLLECTION_LABEL] = pcollection_name
# Cleaning up labels that are not in specification.
monitoring_info.labels.pop(monitoring_infos.PTRANSFORM_LABEL)
monitoring_info.labels.pop(monitoring_infos.TAG_LABEL)
for mi in infos_list:
inject_pcollection_into_element_count(mi)
return infos_list
def _fix_output_tags_monitoring_info(self, transform_id, monitoring_info):
actual_output_tags = list(
self.process_bundle_descriptor.transforms[transform_id].outputs.keys())
if ('TAG' in monitoring_info.labels and
monitoring_info.labels['TAG'] == 'ONLY_OUTPUT'):
if len(actual_output_tags) == 1:
monitoring_info.labels['TAG'] = actual_output_tags[0]
return monitoring_info
class ExecutionContext(object):
def __init__(self):
self.delayed_applications = []
class BeamTransformFactory(object):
"""Factory for turning transform_protos into executable operations."""
def __init__(self, descriptor, data_channel_factory, counter_factory,
state_sampler, state_handler):
self.descriptor = descriptor
self.data_channel_factory = data_channel_factory
self.counter_factory = counter_factory
self.state_sampler = state_sampler
self.state_handler = state_handler
self.context = pipeline_context.PipelineContext(
descriptor,
iterable_state_read=lambda token, element_coder_impl:
_StateBackedIterable(
state_handler,
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
element_coder_impl))
_known_urns = {}
@classmethod
def register_urn(cls, urn, parameter_type):
def wrapper(func):
cls._known_urns[urn] = func, parameter_type
return func
return wrapper
def create_operation(self, transform_id, consumers):
transform_proto = self.descriptor.transforms[transform_id]
if not transform_proto.unique_name:
logging.warn("No unique name set for transform %s" % transform_id)
transform_proto.unique_name = transform_id
creator, parameter_type = self._known_urns[transform_proto.spec.urn]
payload = proto_utils.parse_Bytes(
transform_proto.spec.payload, parameter_type)
return creator(self, transform_id, transform_proto, payload, consumers)
def get_coder(self, coder_id):
if coder_id not in self.descriptor.coders:
raise KeyError("No such coder: %s" % coder_id)
coder_proto = self.descriptor.coders[coder_id]
if coder_proto.spec.spec.urn:
return self.context.coders.get_by_id(coder_id)
else:
# No URN, assume cloud object encoding json bytes.
return operation_specs.get_coder_from_spec(
json.loads(coder_proto.spec.spec.payload.decode('utf-8')))
def get_windowed_coder(self, pcoll_id):
coder = self.get_coder(self.descriptor.pcollections[pcoll_id].coder_id)
# TODO(robertwb): Remove this condition once all runners are consistent.
if not isinstance(coder, WindowedValueCoder):
windowing_strategy = self.descriptor.windowing_strategies[
self.descriptor.pcollections[pcoll_id].windowing_strategy_id]
return WindowedValueCoder(
coder, self.get_coder(windowing_strategy.window_coder_id))
else:
return coder
def get_output_coders(self, transform_proto):
return {
tag: self.get_windowed_coder(pcoll_id)
for tag, pcoll_id in transform_proto.outputs.items()
}
def get_only_output_coder(self, transform_proto):
return only_element(self.get_output_coders(transform_proto).values())
def get_input_coders(self, transform_proto):
return {
tag: self.get_windowed_coder(pcoll_id)
for tag, pcoll_id in transform_proto.inputs.items()
}
def get_only_input_coder(self, transform_proto):
return only_element(list(self.get_input_coders(transform_proto).values()))
# TODO(robertwb): Update all operations to take these in the constructor.
@staticmethod
def augment_oldstyle_op(op, step_name, consumers, tag_list=None):
op.step_name = step_name
for tag, op_consumers in consumers.items():
for consumer in op_consumers:
op.add_receiver(consumer, tag_list.index(tag) if tag_list else 0)
return op
class TimerConsumer(operations.Operation):
def __init__(self, timer_tag, do_op):
self._timer_tag = timer_tag
self._do_op = do_op
def process(self, windowed_value):
self._do_op.process_timer(self._timer_tag, windowed_value)
@BeamTransformFactory.register_urn(
DATA_INPUT_URN, beam_fn_api_pb2.RemoteGrpcPort)
def create(factory, transform_id, transform_proto, grpc_port, consumers):
# Timers are the one special case where we don't want to call the
# (unlabeled) operation.process() method, which we detect here.
# TODO(robertwb): Consider generalizing if there are any more cases.
output_pcoll = only_element(transform_proto.outputs.values())
output_consumers = only_element(consumers.values())
if (len(output_consumers) == 1
and isinstance(only_element(output_consumers), operations.DoOperation)):
do_op = only_element(output_consumers)
for tag, pcoll_id in do_op.timer_inputs.items():
if pcoll_id == output_pcoll:
output_consumers[:] = [TimerConsumer(tag, do_op)]
break
target = beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id,
name=only_element(list(transform_proto.outputs.keys())))
if grpc_port.coder_id:
output_coder = factory.get_coder(grpc_port.coder_id)
else:
logging.error(
'Missing required coder_id on grpc_port for %s; '
'using deprecated fallback.',
transform_id)
output_coder = factory.get_only_output_coder(transform_proto)
return DataInputOperation(
transform_proto.unique_name,
transform_proto.unique_name,
consumers,
factory.counter_factory,
factory.state_sampler,
output_coder,
input_target=target,
data_channel=factory.data_channel_factory.create_data_channel(grpc_port))
@BeamTransformFactory.register_urn(
DATA_OUTPUT_URN, beam_fn_api_pb2.RemoteGrpcPort)
def create(factory, transform_id, transform_proto, grpc_port, consumers):
target = beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id,
name=only_element(list(transform_proto.inputs.keys())))
if grpc_port.coder_id:
output_coder = factory.get_coder(grpc_port.coder_id)
else:
logging.error(
'Missing required coder_id on grpc_port for %s; '
'using deprecated fallback.',
transform_id)
output_coder = factory.get_only_input_coder(transform_proto)
return DataOutputOperation(
transform_proto.unique_name,
transform_proto.unique_name,
consumers,
factory.counter_factory,
factory.state_sampler,
output_coder,
target=target,
data_channel=factory.data_channel_factory.create_data_channel(grpc_port))
@BeamTransformFactory.register_urn(OLD_DATAFLOW_RUNNER_HARNESS_READ_URN, None)
def create(factory, transform_id, transform_proto, parameter, consumers):
# The Dataflow runner harness strips the base64 encoding.
source = pickler.loads(base64.b64encode(parameter))
spec = operation_specs.WorkerRead(
iobase.SourceBundle(1.0, source, None, None),
[factory.get_only_output_coder(transform_proto)])
return factory.augment_oldstyle_op(
operations.ReadOperation(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.deprecated_primitives.READ.urn, beam_runner_api_pb2.ReadPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
source = iobase.SourceBase.from_runner_api(parameter.source, factory.context)
spec = operation_specs.WorkerRead(
iobase.SourceBundle(1.0, source, None, None),
[WindowedValueCoder(source.default_output_coder())])
return factory.augment_oldstyle_op(
operations.ReadOperation(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
python_urns.IMPULSE_READ_TRANSFORM, beam_runner_api_pb2.ReadPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
return operations.ImpulseReadOperation(
transform_proto.unique_name,
factory.counter_factory,
factory.state_sampler,
consumers,
iobase.SourceBase.from_runner_api(
parameter.source, factory.context),
factory.get_only_output_coder(transform_proto))
@BeamTransformFactory.register_urn(OLD_DATAFLOW_RUNNER_HARNESS_PARDO_URN, None)
def create(factory, transform_id, transform_proto, serialized_fn, consumers):
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers, serialized_fn)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.PAIR_WITH_RESTRICTION.urn,
beam_runner_api_pb2.ParDoPayload)
def create(*args):
class PairWithRestriction(beam.DoFn):
def __init__(self, fn, restriction_provider):
self.restriction_provider = restriction_provider
# An unused window is requested to force explosion of multi-window
# WindowedValues.
def process(
self, element, _unused_window=beam.DoFn.WindowParam, *args, **kwargs):
# TODO(SDF): Do we want to allow mutation of the element?
# (E.g. it could be nice to shift bulky description to the portion
# that can be distributed.)
yield element, self.restriction_provider.initial_restriction(element)
return _create_sdf_operation(PairWithRestriction, *args)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.SPLIT_AND_SIZE_RESTRICTIONS.urn,
beam_runner_api_pb2.ParDoPayload)
def create(*args):
class SplitAndSizeRestrictions(beam.DoFn):
def __init__(self, fn, restriction_provider):
self.restriction_provider = restriction_provider
def process(self, element_restriction, *args, **kwargs):
element, restriction = element_restriction
for part in self.restriction_provider.split(element, restriction):
yield ((element, part),
self.restriction_provider.restriction_size(element, part))
return _create_sdf_operation(SplitAndSizeRestrictions, *args)
@BeamTransformFactory.register_urn(
common_urns.sdf_components.PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,
beam_runner_api_pb2.ParDoPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
assert parameter.do_fn.spec.urn == python_urns.PICKLED_DOFN_INFO
serialized_fn = parameter.do_fn.spec.payload
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, parameter,
operation_cls=operations.SdfProcessSizedElements)
def _create_sdf_operation(
proxy_dofn,
factory, transform_id, transform_proto, parameter, consumers):
dofn_data = pickler.loads(parameter.do_fn.spec.payload)
dofn = dofn_data[0]
restriction_provider = common.DoFnSignature(dofn).get_restriction_provider()
serialized_fn = pickler.dumps(
(proxy_dofn(dofn, restriction_provider),) + dofn_data[1:])
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, parameter)
@BeamTransformFactory.register_urn(
common_urns.primitives.PAR_DO.urn, beam_runner_api_pb2.ParDoPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
assert parameter.do_fn.spec.urn == python_urns.PICKLED_DOFN_INFO
serialized_fn = parameter.do_fn.spec.payload
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, parameter)
def _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, pardo_proto=None, operation_cls=operations.DoOperation):
if pardo_proto and pardo_proto.side_inputs:
input_tags_to_coders = factory.get_input_coders(transform_proto)
tagged_side_inputs = [
(tag, beam.pvalue.SideInputData.from_runner_api(si, factory.context))
for tag, si in pardo_proto.side_inputs.items()]
tagged_side_inputs.sort(
key=lambda tag_si: int(re.match('side([0-9]+)(-.*)?$',
tag_si[0]).group(1)))
side_input_maps = [
StateBackedSideInputMap(
factory.state_handler,
transform_id,
tag,
si,
input_tags_to_coders[tag])
for tag, si in tagged_side_inputs]
else:
side_input_maps = []
output_tags = list(transform_proto.outputs.keys())
# Hack to match out prefix injected by dataflow runner.
def mutate_tag(tag):
if 'None' in output_tags:
if tag == 'None':
return 'out'
else:
return 'out_' + tag
else:
return tag
dofn_data = pickler.loads(serialized_fn)
if not dofn_data[-1]:
# Windowing not set.
if pardo_proto:
other_input_tags = set.union(
set(pardo_proto.side_inputs), set(pardo_proto.timer_specs))
else:
other_input_tags = ()
pcoll_id, = [pcoll for tag, pcoll in transform_proto.inputs.items()
if tag not in other_input_tags]
windowing = factory.context.windowing_strategies.get_by_id(
factory.descriptor.pcollections[pcoll_id].windowing_strategy_id)
serialized_fn = pickler.dumps(dofn_data[:-1] + (windowing,))
if pardo_proto and (pardo_proto.timer_specs or pardo_proto.state_specs
or pardo_proto.splittable):
main_input_coder = None
timer_inputs = {}
for tag, pcoll_id in transform_proto.inputs.items():
if tag in pardo_proto.timer_specs:
timer_inputs[tag] = pcoll_id
elif tag in pardo_proto.side_inputs:
pass
else:
# Must be the main input
assert main_input_coder is None
main_input_tag = tag
main_input_coder = factory.get_windowed_coder(pcoll_id)
assert main_input_coder is not None
if pardo_proto.timer_specs or pardo_proto.state_specs:
user_state_context = FnApiUserStateContext(
factory.state_handler,
transform_id,
main_input_coder.key_coder(),
main_input_coder.window_coder,
timer_specs=pardo_proto.timer_specs)
else:
user_state_context = None
else:
user_state_context = None
timer_inputs = None
output_coders = factory.get_output_coders(transform_proto)
spec = operation_specs.WorkerDoFn(
serialized_fn=serialized_fn,
output_tags=[mutate_tag(tag) for tag in output_tags],
input=None,
side_inputs=None, # Fn API uses proto definitions and the Fn State API
output_coders=[output_coders[tag] for tag in output_tags])
result = factory.augment_oldstyle_op(
operation_cls(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler,
side_input_maps,
user_state_context,
timer_inputs=timer_inputs),
transform_proto.unique_name,
consumers,
output_tags)
if pardo_proto and pardo_proto.splittable:
result.input_info = (
transform_id, main_input_tag, main_input_coder,
transform_proto.outputs.keys())
return result
def _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers, dofn):
serialized_fn = pickler.dumps((dofn, (), {}, [], None))
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers, serialized_fn)
@BeamTransformFactory.register_urn(
common_urns.primitives.ASSIGN_WINDOWS.urn,
beam_runner_api_pb2.WindowingStrategy)
def create(factory, transform_id, transform_proto, parameter, consumers):
class WindowIntoDoFn(beam.DoFn):
def __init__(self, windowing):
self.windowing = windowing
def process(self, element, timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam):
new_windows = self.windowing.windowfn.assign(
WindowFn.AssignContext(timestamp, element=element, window=window))
yield WindowedValue(element, timestamp, new_windows)
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.window import WindowFn, WindowedValue
windowing = Windowing.from_runner_api(parameter, factory.context)
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers,
WindowIntoDoFn(windowing))
@BeamTransformFactory.register_urn(IDENTITY_DOFN_URN, None)
def create(factory, transform_id, transform_proto, unused_parameter, consumers):
return factory.augment_oldstyle_op(
operations.FlattenOperation(
transform_proto.unique_name,
operation_specs.WorkerFlatten(
None, [factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PGBKCV.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
# TODO: Combine side inputs.
serialized_combine_fn = pickler.dumps(
(beam.CombineFn.from_runner_api(payload.combine_fn, factory.context),
[], {}))
return factory.augment_oldstyle_op(
operations.PGBKCVOperation(
transform_proto.unique_name,
operation_specs.WorkerPartialGroupByKey(
serialized_combine_fn,
None,
[factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_MERGE_ACCUMULATORS.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'merge')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_EXTRACT_OUTPUTS.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'extract')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_PRECOMBINE.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
serialized_combine_fn = pickler.dumps(
(beam.CombineFn.from_runner_api(payload.combine_fn, factory.context),
[], {}))
return factory.augment_oldstyle_op(
operations.PGBKCVOperation(
transform_proto.unique_name,
operation_specs.WorkerPartialGroupByKey(
serialized_combine_fn,
None,
[factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_MERGE_ACCUMULATORS.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'merge')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_PER_KEY_EXTRACT_OUTPUTS.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'extract')
@BeamTransformFactory.register_urn(
common_urns.combine_components.COMBINE_GROUPED_VALUES.urn,
beam_runner_api_pb2.CombinePayload)
def create(factory, transform_id, transform_proto, payload, consumers):
return _create_combine_phase_operation(
factory, transform_proto, payload, consumers, 'all')
def _create_combine_phase_operation(
factory, transform_proto, payload, consumers, phase):
serialized_combine_fn = pickler.dumps(
(beam.CombineFn.from_runner_api(payload.combine_fn, factory.context),
[], {}))
return factory.augment_oldstyle_op(
operations.CombineOperation(
transform_proto.unique_name,
operation_specs.WorkerCombineFn(
serialized_combine_fn,
phase,
None,
[factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(common_urns.primitives.FLATTEN.urn, None)
def create(factory, transform_id, transform_proto, unused_parameter, consumers):
return factory.augment_oldstyle_op(
operations.FlattenOperation(
transform_proto.unique_name,
operation_specs.WorkerFlatten(
None,
[factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
common_urns.primitives.MAP_WINDOWS.urn,
beam_runner_api_pb2.SdkFunctionSpec)
def create(factory, transform_id, transform_proto, mapping_fn_spec, consumers):
assert mapping_fn_spec.spec.urn == python_urns.PICKLED_WINDOW_MAPPING_FN
window_mapping_fn = pickler.loads(mapping_fn_spec.spec.payload)
class MapWindows(beam.DoFn):
def process(self, element):
key, window = element
return [(key, window_mapping_fn(window))]
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers,
MapWindows())
|
the-stack_0_2900 | '''
Scratchpad for test-based development.
LICENSING
-------------------------------------------------
hypergolix: A python Golix client.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
[email protected] | [email protected] | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
import argparse
import unittest
import sys
import time
import statistics
import collections
import threading
import random
import pathlib
import tempfile
import shutil
import logging
from loopa.utils import await_coroutine_threadsafe
from hypergolix.utils import ApiID
from hypergolix.objproxy import Obj
from hypergolix.comms import WSConnection
from hypergolix.comms import WSBeatingConn
from hypergolix.service import RemotePersistenceServer
from hypergolix.app import HypergolixCore
from hypergolix.accounting import Account
from hypergolix.embed import HGXLink
from golix._getlow import GIDC
from hypergolix.persistence import _GidcLite
# ###############################################
# Fixtures
# ###############################################
from trashtest._fixtures.identities import TEST_AGENT1
from trashtest._fixtures.identities import TEST_READER1
from trashtest._fixtures.identities import TEST_AGENT2
from trashtest._fixtures.identities import TEST_READER2
gidc1 = TEST_READER1.packed
gidclite1 = _GidcLite.from_golix(GIDC.unpack(TEST_READER1.packed))
gidc2 = TEST_READER2.packed
gidclite2 = _GidcLite.from_golix(GIDC.unpack(TEST_READER2.packed))
logger = logging.getLogger(__name__)
# ###############################################
# Testing
# ###############################################
class TestAppNoRestore(unittest.TestCase):
''' Test a fake application with no account restoration, just with
a parrot between two identities.
'''
@classmethod
def setUpClass(cls):
''' Make a fake application, yo.
'''
# Set up the SERVER
###########################################
cls.server_cachedir = tempfile.mkdtemp()
cls.server = RemotePersistenceServer(
cache_dir = cls.server_cachedir,
host = '127.0.0.1',
port = 6022,
reusable_loop = False,
threaded = True,
# debug = True,
thread_kwargs = {'name': 'pserver'}
)
# Set up the FIRST CLIENT
###########################################
cls.hgxcore1_cachedir = tempfile.mkdtemp()
cls.hgxcore1 = HypergolixCore(
cache_dir = cls.hgxcore1_cachedir,
ipc_port = 6023,
reusable_loop = False,
threaded = True,
# debug = True,
thread_kwargs = {'name': 'hgxcore1'}
)
cls.hgxcore1.add_remote(
connection_cls = WSBeatingConn,
host = '127.0.0.1',
port = 6022,
tls = False
)
cls.root_secret_1 = TEST_AGENT1.new_secret()
cls.account1 = Account(
user_id = TEST_AGENT1,
root_secret = cls.root_secret_1,
hgxcore = cls.hgxcore1
)
cls.hgxcore1.account = cls.account1
cls.hgxlink1 = HGXLink(
ipc_port = 6023,
autostart = False,
# debug = True,
threaded = True,
thread_kwargs = {'name': 'hgxlink1'}
)
# Set up the SECOND CLIENT
###########################################
cls.hgxcore2_cachedir = tempfile.mkdtemp()
cls.hgxcore2 = HypergolixCore(
cache_dir = cls.hgxcore2_cachedir,
ipc_port = 6024,
reusable_loop = False,
threaded = True,
# debug = True,
thread_kwargs = {'name': 'hgxcore2'}
)
cls.hgxcore2.add_remote(
connection_cls = WSBeatingConn,
host = '127.0.0.1',
port = 6022,
tls = False
)
cls.root_secret_2 = TEST_AGENT2.new_secret()
cls.account2 = Account(
user_id = TEST_AGENT2,
root_secret = cls.root_secret_2,
hgxcore = cls.hgxcore2
)
cls.hgxcore2.account = cls.account2
cls.hgxlink2 = HGXLink(
ipc_port = 6024,
autostart = False,
# debug = True,
threaded = True,
thread_kwargs = {'name': 'hgxlink2'}
)
# START THE WHOLE SHEBANG
###########################################
# Start the server and wait until it's ready to serve connections
cls.server.start()
await_coroutine_threadsafe(
coro = cls.server.await_init(),
loop = cls.server._loop
)
# Start the first core and wait until it's ready to serve connections
cls.hgxcore1.start()
await_coroutine_threadsafe(
coro = cls.hgxcore1.await_init(),
loop = cls.hgxcore1._loop
)
# Start the second core and wait until it's ready to serve connections
cls.hgxcore2.start()
await_coroutine_threadsafe(
coro = cls.hgxcore2.await_init(),
loop = cls.hgxcore2._loop
)
# These don't need to wait though.
cls.hgxlink1.start()
cls.hgxlink2.start()
@classmethod
def tearDownClass(cls):
''' Kill errything and then remove the caches.
'''
try:
cls.hgxlink2.stop_threadsafe(timeout=.5)
cls.hgxlink1.stop_threadsafe(timeout=.5)
cls.hgxcore2.stop_threadsafe(timeout=.5)
cls.hgxcore1.stop_threadsafe(timeout=.5)
cls.server.stop_threadsafe(timeout=.5)
finally:
shutil.rmtree(cls.hgxcore2_cachedir)
shutil.rmtree(cls.hgxcore1_cachedir)
shutil.rmtree(cls.server_cachedir)
def setUp(self):
''' Do some housekeeping.
'''
self.iterations = 10
self.timeout = 10
self.request_api = ApiID(bytes(63) + b'\x01')
self.response_api = ApiID(bytes(63) + b'\x02')
self.incoming1 = collections.deque()
self.incoming2 = collections.deque()
self.cache2 = collections.deque()
self.returnflag1 = threading.Event()
self.updateflags = collections.deque()
# Set up the timing recorder
self.timers = collections.deque()
async def roundtrip_notifier(self, mirror_obj):
''' This gets called when we get an update for a response.
'''
end_time = time.monotonic()
ii = int.from_bytes(mirror_obj.state[:1], 'big')
self.timers[ii].appendleft(end_time)
self.updateflags[ii].set()
def share_handler(self, ghid, origin, api_id):
''' This handles all shares. It's defined to be used STRICTLY in
one direction.
'''
# The request handler. Requests are only received by hgxlink2.
if api_id == self.request_api:
# Get the object itself
obj = self.hgxlink2.get_threadsafe(
cls = Obj,
ghid = ghid
)
# Construct a mirror object
mirror = self.hgxlink2.new_threadsafe(
cls = Obj,
state = obj.state,
api_id = self.response_api,
dynamic = True,
private = False
)
# Create an update callback
async def state_mirror(source_obj, mirror_obj=mirror):
mirror_obj.state = source_obj.state
await mirror_obj.push()
# Set the update callback and then share the mirror
obj.callback = state_mirror
self.incoming2.appendleft(obj)
self.cache2.appendleft(mirror)
mirror.share_threadsafe(origin)
# The response handler. Responses are only received by hgxlink1.
elif api_id == self.response_api:
# Get the object itself
mirror = self.hgxlink1.get_threadsafe(
cls = Obj,
ghid = ghid
)
mirror.callback = self.roundtrip_notifier
self.incoming1.appendleft(mirror)
self.returnflag1.set()
else:
raise ValueError('Bad api.')
def test_whoami(self):
''' Super simple whoami test to make sure it's working.
'''
# First make sure everything is correctly started up.
await_coroutine_threadsafe(
coro = self.hgxcore1.await_startup(),
loop = self.hgxcore1._loop
)
await_coroutine_threadsafe(
coro = self.hgxcore2.await_startup(),
loop = self.hgxcore2._loop
)
whoami = await_coroutine_threadsafe(
coro = self.hgxlink1._ipc_manager.get_whoami(timeout=5),
loop = self.hgxlink1._loop
)
self.assertEqual(whoami, self.hgxlink1.whoami)
self.assertEqual(whoami, TEST_AGENT1.ghid)
whoami2 = await_coroutine_threadsafe(
coro = self.hgxlink2._ipc_manager.get_whoami(timeout=5),
loop = self.hgxlink2._loop
)
self.assertEqual(whoami2, self.hgxlink2.whoami)
self.assertEqual(whoami2, TEST_AGENT2.ghid)
def test_roundtrip(self):
''' Bidirectional communication test.
'''
# First make sure everything is correctly started up.
await_coroutine_threadsafe(
coro = self.hgxcore1.await_startup(),
loop = self.hgxcore1._loop
)
await_coroutine_threadsafe(
coro = self.hgxcore2.await_startup(),
loop = self.hgxcore2._loop
)
# First we need to wrap the share handler appropriately
handler1 = self.hgxlink1.wrap_threadsafe(self.share_handler)
handler2 = self.hgxlink2.wrap_threadsafe(self.share_handler)
# Then we need to actually register it with the respective links
self.hgxlink1.register_share_handler_threadsafe(
self.response_api,
handler1
)
self.hgxlink2.register_share_handler_threadsafe(
self.request_api,
handler2
)
# Now let's make the actual request, then share is
state = bytes([random.randint(0, 255) for i in range(0, 25)])
request = self.hgxlink1.new_threadsafe(
cls = Obj,
state = state,
api_id = self.request_api,
dynamic = True,
private = False
)
request.share_threadsafe(self.hgxlink2.whoami)
# Wait for a response. First make sure one comes, then that it matches
self.assertTrue(self.returnflag1.wait(30))
mirror = self.incoming1.pop()
self.assertEqual(request.state, mirror.state)
# Notify that we're starting the actual tests
logger.info(
'\n\n########################################################\n' +
'######### Handshakes complete! Starting tests. #########\n' +
'########################################################\n'
)
for ii in range(self.iterations):
with self.subTest(i=ii):
logger.info(
'\n' +
'################ Starting mirror cycle. ################'
)
# Prep the object with an update
state = ii.to_bytes(1, 'big') + \
bytes([random.randint(0, 255) for i in range(0, 25)])
request.state = state
# Clear the update flag and zero out the timer
self.updateflags.append(threading.Event())
self.timers.append(collections.deque([0, 0], maxlen=2))
self.timers[ii].appendleft(time.monotonic())
# Call an update, wait for the response, and record the time
request.push_threadsafe()
success = self.updateflags[ii].wait(self.timeout)
# Check for success
self.assertTrue(success)
self.assertEqual(mirror.state, state)
times = [end - start for end, start in self.timers]
# Get a newline for tidyness when being run within the whole test suite
print('')
print('Max time: ', max(times))
print('Min time: ', min(times))
print('Mean time:', statistics.mean(times))
print('Med time: ', statistics.median(times))
# ###############################################
# Operations
# ###############################################
if __name__ == "__main__":
from hypergolix import logutils
logutils.autoconfig(loglevel='debug')
# from hypergolix.utils import TraceLogger
# with TraceLogger(interval=30):
# unittest.main()
unittest.main()
|
the-stack_0_2902 | N = int(input())
ans = ''
for _ in range(N):
p, q, r = input().split()
if p == 'BEGINNING':
ans += r[0]
elif p == 'MIDDLE':
ans += r[len(r)//2]
else:
ans += r[-1]
print(ans)
|
the-stack_0_2903 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
axes1 = fig.add_subplot(1, 1, 1)
line, = axes1.plot(np.random.rand(10))
def update(data):
line.set_ydata(data)
return line,
def data_gen():
while True:
yield np.random.rand(10)
ani = animation.FuncAnimation(fig, update, data_gen, interval=1000)
plt.show()
|
the-stack_0_2904 | """The tests for the device tracker component."""
from datetime import datetime, timedelta
import json
import logging
import os
import pytest
from homeassistant.components import zone
import homeassistant.components.device_tracker as device_tracker
from homeassistant.components.device_tracker import const, legacy
from homeassistant.const import (
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_GPS_ACCURACY,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_PLATFORM,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.core import State, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import discovery
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import Mock, call, patch
from tests.common import (
assert_setup_component,
async_fire_time_changed,
mock_registry,
mock_restore_cache,
patch_yaml_files,
)
from tests.components.device_tracker import common
TEST_PLATFORM = {device_tracker.DOMAIN: {CONF_PLATFORM: "test"}}
_LOGGER = logging.getLogger(__name__)
@pytest.fixture(name="yaml_devices")
def mock_yaml_devices(hass):
"""Get a path for storing yaml devices."""
yaml_devices = hass.config.path(legacy.YAML_DEVICES)
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
yield yaml_devices
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
async def test_is_on(hass):
"""Test is_on method."""
entity_id = f"{const.DOMAIN}.test"
hass.states.async_set(entity_id, STATE_HOME)
assert device_tracker.is_on(hass, entity_id)
hass.states.async_set(entity_id, STATE_NOT_HOME)
assert not device_tracker.is_on(hass, entity_id)
async def test_reading_broken_yaml_config(hass):
"""Test when known devices contains invalid data."""
files = {
"empty.yaml": "",
"nodict.yaml": "100",
"badkey.yaml": "@:\n name: Device",
"noname.yaml": "my_device:\n",
"allok.yaml": "My Device:\n name: Device",
"oneok.yaml": ("My Device!:\n name: Device\nbad_device:\n nme: Device"),
}
args = {"hass": hass, "consider_home": timedelta(seconds=60)}
with patch_yaml_files(files):
assert await legacy.async_load_config("empty.yaml", **args) == []
assert await legacy.async_load_config("nodict.yaml", **args) == []
assert await legacy.async_load_config("noname.yaml", **args) == []
assert await legacy.async_load_config("badkey.yaml", **args) == []
res = await legacy.async_load_config("allok.yaml", **args)
assert len(res) == 1
assert res[0].name == "Device"
assert res[0].dev_id == "my_device"
res = await legacy.async_load_config("oneok.yaml", **args)
assert len(res) == 1
assert res[0].name == "Device"
assert res[0].dev_id == "my_device"
async def test_reading_yaml_config(hass, yaml_devices):
"""Test the rendering of the YAML configuration."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
picture="http://test.picture",
icon="mdi:kettle",
)
await hass.async_add_executor_job(
legacy.update_config, yaml_devices, dev_id, device
)
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
config = (await legacy.async_load_config(yaml_devices, hass, device.consider_home))[
0
]
assert device.dev_id == config.dev_id
assert device.track == config.track
assert device.mac == config.mac
assert device.config_picture == config.config_picture
assert device.consider_home == config.consider_home
assert device.icon == config.icon
@patch("homeassistant.components.device_tracker.const.LOGGER.warning")
async def test_duplicate_mac_dev_id(mock_warning, hass):
"""Test adding duplicate MACs or device IDs to DeviceTracker."""
devices = [
legacy.Device(
hass, True, True, "my_device", "AB:01", "My device", None, None, False
),
legacy.Device(
hass, True, True, "your_device", "AB:01", "Your device", None, None, False
),
]
legacy.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert (
mock_warning.call_count == 1
), "The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert "Duplicate device MAC" in args[0], "Duplicate MAC warning expected"
mock_warning.reset_mock()
devices = [
legacy.Device(
hass, True, True, "my_device", "AB:01", "My device", None, None, False
),
legacy.Device(
hass, True, True, "my_device", None, "Your device", None, None, False
),
]
legacy.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert (
mock_warning.call_count == 1
), "The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert "Duplicate device IDs" in args[0], "Duplicate device IDs warning expected"
async def test_setup_without_yaml_file(hass):
"""Test with no YAML file."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
async def test_gravatar(hass):
"""Test the Gravatar generation."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
gravatar="[email protected]",
)
gravatar_url = (
"https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar"
)
assert device.config_picture == gravatar_url
async def test_gravatar_and_picture(hass):
"""Test that Gravatar overrides picture."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
picture="http://test.picture",
gravatar="[email protected]",
)
gravatar_url = (
"https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar"
)
assert device.config_picture == gravatar_url
@patch("homeassistant.components.device_tracker.legacy.DeviceTracker.see")
@patch("homeassistant.components.demo.device_tracker.setup_scanner", autospec=True)
async def test_discover_platform(mock_demo_setup_scanner, mock_see, hass):
"""Test discovery of device_tracker demo platform."""
await discovery.async_load_platform(
hass, device_tracker.DOMAIN, "demo", {"test_key": "test_val"}, {"bla": {}}
)
await hass.async_block_till_done()
assert device_tracker.DOMAIN in hass.config.components
assert mock_demo_setup_scanner.called
assert mock_demo_setup_scanner.call_args[0] == (
hass,
{},
mock_see,
{"test_key": "test_val"},
)
async def test_update_stale(hass, mock_device_tracker_conf):
"""Test stalled update."""
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("DEV1")
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=register_time,
):
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(
hass,
device_tracker.DOMAIN,
{
device_tracker.DOMAIN: {
CONF_PLATFORM: "test",
device_tracker.CONF_CONSIDER_HOME: 59,
}
},
)
await hass.async_block_till_done()
assert STATE_HOME == hass.states.get("device_tracker.dev1").state
scanner.leave_home("DEV1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=scan_time,
):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
assert STATE_NOT_HOME == hass.states.get("device_tracker.dev1").state
async def test_entity_attributes(hass, mock_device_tracker_conf):
"""Test the entity attributes."""
devices = mock_device_tracker_conf
dev_id = "test_entity"
entity_id = f"{const.DOMAIN}.{dev_id}"
friendly_name = "Paulus"
picture = "http://placehold.it/200x200"
icon = "mdi:kettle"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
None,
friendly_name,
picture,
icon=icon,
)
devices.append(device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
attrs = hass.states.get(entity_id).attributes
assert friendly_name == attrs.get(ATTR_FRIENDLY_NAME)
assert icon == attrs.get(ATTR_ICON)
assert picture == attrs.get(ATTR_ENTITY_PICTURE)
@patch("homeassistant.components.device_tracker.legacy." "DeviceTracker.async_see")
async def test_see_service(mock_see, hass):
"""Test the see service with a unicode dev_id and NO MAC."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {
"dev_id": "some_device",
"host_name": "example.com",
"location_name": "Work",
"gps": [0.3, 0.8],
"attributes": {"test": "test"},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
mock_see.reset_mock()
params["dev_id"] += chr(233) # e' acute accent from icloud
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
async def test_see_service_guard_config_entry(hass, mock_device_tracker_conf):
"""Test the guard if the device is registered in the entity registry."""
mock_entry = Mock()
dev_id = "test"
entity_id = f"{const.DOMAIN}.{dev_id}"
mock_registry(hass, {entity_id: mock_entry})
devices = mock_device_tracker_conf
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {"dev_id": dev_id, "gps": [0.3, 0.8]}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert not devices
async def test_new_device_event_fired(hass, mock_device_tracker_conf):
"""Test that the device tracker will fire an event."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
test_events = []
@callback
def listener(event):
"""Record that our event got called."""
test_events.append(event)
hass.bus.async_listen("device_tracker_new_device", listener)
common.async_see(hass, "mac_1", host_name="hello")
common.async_see(hass, "mac_1", host_name="hello")
await hass.async_block_till_done()
assert len(test_events) == 1
# Assert we can serialize the event
json.dumps(test_events[0].as_dict(), cls=JSONEncoder)
assert test_events[0].data == {
"entity_id": "device_tracker.hello",
"host_name": "hello",
"mac": "MAC_1",
}
async def test_duplicate_yaml_keys(hass, mock_device_tracker_conf):
"""Test that the device tracker will not generate invalid YAML."""
devices = mock_device_tracker_conf
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
common.async_see(hass, "mac_1", host_name="hello")
common.async_see(hass, "mac_2", host_name="hello")
await hass.async_block_till_done()
assert len(devices) == 2
assert devices[0].dev_id != devices[1].dev_id
async def test_invalid_dev_id(hass, mock_device_tracker_conf):
"""Test that the device tracker will not allow invalid dev ids."""
devices = mock_device_tracker_conf
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
common.async_see(hass, dev_id="hello-world")
await hass.async_block_till_done()
assert not devices
async def test_see_state(hass, yaml_devices):
"""Test device tracker see records state correctly."""
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {
"mac": "AA:BB:CC:DD:EE:FF",
"dev_id": "some_device",
"host_name": "example.com",
"location_name": "Work",
"gps": [0.3, 0.8],
"gps_accuracy": 1,
"battery": 100,
"attributes": {"test": "test", "number": 1},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
config = await legacy.async_load_config(yaml_devices, hass, timedelta(seconds=0))
assert len(config) == 1
state = hass.states.get("device_tracker.example_com")
attrs = state.attributes
assert state.state == "Work"
assert state.object_id == "example_com"
assert state.name == "example.com"
assert attrs["friendly_name"] == "example.com"
assert attrs["battery"] == 100
assert attrs["latitude"] == 0.3
assert attrs["longitude"] == 0.8
assert attrs["test"] == "test"
assert attrs["gps_accuracy"] == 1
assert attrs["source_type"] == "gps"
assert attrs["number"] == 1
async def test_see_passive_zone_state(hass, mock_device_tracker_conf):
"""Test that the device tracker sets gps for passive trackers."""
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with assert_setup_component(1, zone.DOMAIN):
zone_info = {
"name": "Home",
"latitude": 1,
"longitude": 2,
"radius": 250,
"passive": False,
}
await async_setup_component(hass, zone.DOMAIN, {"zone": zone_info})
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("dev1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=register_time,
):
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(
hass,
device_tracker.DOMAIN,
{
device_tracker.DOMAIN: {
CONF_PLATFORM: "test",
device_tracker.CONF_CONSIDER_HOME: 59,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("device_tracker.dev1")
attrs = state.attributes
assert STATE_HOME == state.state
assert state.object_id == "dev1"
assert state.name == "dev1"
assert attrs.get("friendly_name") == "dev1"
assert attrs.get("latitude") == 1
assert attrs.get("longitude") == 2
assert attrs.get("gps_accuracy") == 0
assert attrs.get("source_type") == device_tracker.SOURCE_TYPE_ROUTER
scanner.leave_home("dev1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=scan_time,
):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
state = hass.states.get("device_tracker.dev1")
attrs = state.attributes
assert STATE_NOT_HOME == state.state
assert state.object_id == "dev1"
assert state.name == "dev1"
assert attrs.get("friendly_name") == "dev1"
assert attrs.get("latitude") is None
assert attrs.get("longitude") is None
assert attrs.get("gps_accuracy") is None
assert attrs.get("source_type") == device_tracker.SOURCE_TYPE_ROUTER
@patch("homeassistant.components.device_tracker.const.LOGGER.warning")
async def test_see_failures(mock_warning, hass, mock_device_tracker_conf):
"""Test that the device tracker see failures."""
devices = mock_device_tracker_conf
tracker = legacy.DeviceTracker(hass, timedelta(seconds=60), 0, {}, [])
# MAC is not a string (but added)
await tracker.async_see(mac=567, host_name="Number MAC")
# No device id or MAC(not added)
with pytest.raises(HomeAssistantError):
await tracker.async_see()
assert mock_warning.call_count == 0
# Ignore gps on invalid GPS (both added & warnings)
await tracker.async_see(mac="mac_1_bad_gps", gps=1)
await tracker.async_see(mac="mac_2_bad_gps", gps=[1])
await tracker.async_see(mac="mac_3_bad_gps", gps="gps")
await hass.async_block_till_done()
assert mock_warning.call_count == 3
assert len(devices) == 4
async def test_async_added_to_hass(hass):
"""Test restoring state."""
attr = {
ATTR_LONGITUDE: 18,
ATTR_LATITUDE: -33,
const.ATTR_SOURCE_TYPE: "gps",
ATTR_GPS_ACCURACY: 2,
const.ATTR_BATTERY: 100,
}
mock_restore_cache(hass, [State("device_tracker.jk", "home", attr)])
path = hass.config.path(legacy.YAML_DEVICES)
files = {path: "jk:\n name: JK Phone\n track: True"}
with patch_yaml_files(files):
assert await async_setup_component(hass, device_tracker.DOMAIN, {})
state = hass.states.get("device_tracker.jk")
assert state
assert state.state == "home"
for key, val in attr.items():
atr = state.attributes.get(key)
assert atr == val, f"{key}={atr} expected: {val}"
async def test_bad_platform(hass):
"""Test bad platform."""
config = {"device_tracker": [{"platform": "bad_platform"}]}
with assert_setup_component(0, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, config)
async def test_adding_unknown_device_to_config(mock_device_tracker_conf, hass):
"""Test the adding of unknown devices to configuration file."""
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("DEV1")
await async_setup_component(
hass, device_tracker.DOMAIN, {device_tracker.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
device = mock_device_tracker_conf[0]
assert device.dev_id == "dev1"
assert device.track
async def test_picture_and_icon_on_see_discovery(mock_device_tracker_conf, hass):
"""Test that picture and icon are set in initial see."""
tracker = legacy.DeviceTracker(hass, timedelta(seconds=60), False, {}, [])
await tracker.async_see(dev_id=11, picture="pic_url", icon="mdi:icon")
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].icon == "mdi:icon"
assert mock_device_tracker_conf[0].entity_picture == "pic_url"
async def test_backward_compatibility_for_track_new(mock_device_tracker_conf, hass):
"""Test backward compatibility for track new."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), False, {device_tracker.CONF_TRACK_NEW: True}, []
)
await tracker.async_see(dev_id=13)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
async def test_old_style_track_new_is_skipped(mock_device_tracker_conf, hass):
"""Test old style config is skipped."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), None, {device_tracker.CONF_TRACK_NEW: False}, []
)
await tracker.async_see(dev_id=14)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
def test_see_schema_allowing_ios_calls():
"""Test SEE service schema allows extra keys.
Temp work around because the iOS app sends incorrect data.
"""
device_tracker.SERVICE_SEE_PAYLOAD_SCHEMA(
{
"dev_id": "Test",
"battery": 35,
"battery_status": "Not Charging",
"gps": [10.0, 10.0],
"gps_accuracy": 300,
"hostname": "beer",
}
)
|
the-stack_0_2905 | # cta_apply2(test_images{l},model_orient,'padding',0,'precision','single')
import numpy as np
import scipy.ndimage
class Cta_apply2():
def __init__(self, image, model, padding=0, precision='complex64'):
self.precision=precision # 'double'
self.verbosity=0
self.padding=padding
self.image=image
self.model=model
self.shape=image.shape
# if padding>0
# padded_shape=cta_fft_bestshape(shape+padding)
# original_shape=shape
# img=zeros(padded_shape)
# img(1:shape(1),1:shape(2))=image
# image=img
# end
# shape=size(image)
self.complex_derivatives=np.array([[0,0,(-1j)/8,0,0],
[0,0,1j,0,0],
[(-1)/8,1 ,0,-1, (1)/8],
[0,0,-1j,0,0],
[0,0,(1j)/8,0,0]])
self.complex_derivatives=np.conj(self.complex_derivatives)
def cta_apply2(self):
chog=Cta_chog(self.image, self.model['L'], self.precision, self.model['chog_options']).cta_chog()
#model[4] = model.chog_options
# chog=Cta_chog(image, self.L, self.precision, self.chog_options).cta_chog()
num_products=self.model['products'].shape[0] #size(model.products,1)
H=np.zeros(self.shape, dtype=self.precision) # test_image と同じ大きさのゼロ行列
for vf in range(len(self.model['v_sigma'])): #=1:numel(model.v_sigma),
L= self.model['products'][0, -2] #model.products(1,end-1)
# print('L: ' + str(L))
H_tmp=np.zeros(self.shape,self.precision)
for p in range(num_products):
product=self.model['products'][p,:] #model.products(p,:)
if product[2] != 0:
A=np.conj(np.squeeze(chog[int(product[0]-1)]['data'][int(product[1])]))
else:
A=np.squeeze(chog[int(product[0]-1)]['data'][int(product[1])])
if product[3]==-1: # 2つ目以降の window 関数がない場合
if self.verbosity>1:
print('(%d) [%d]%d -> %d' % (product[0],(-1)**product[2],product[1],product[1]))
tmp=A
else:
if product[5] != 0:
B=np.conj(np.squeeze(chog[int(product[3]-1)]['data'][int(product[4])]))
else:
B=np.squeeze(chog[int(product[3]-1)]['data'][int(product[4])])
if product[6]==-1: # 3つ目以降の window 関数がない場合
if self.verbosity>1:
print('(%d) [%d]%d x (%d) [%d]%d -> %d' % (product[0],(-1)^product[2],product[1],product[3],(-1)^product[5],product[4],product[9]))
tmp=A*B
else:
if product[8] != 0:
C=np.conj(np.squeeze(chog[int(product[6]-1)]['data'][int(product[7])]))
else:
C=np.squeeze(chog[int(product[6]-1)]['data'][int(product[7])])
if self.verbosity>1:
print('(%d) [%d]%d x (%d) [%d]%d x (%d) [%d]%d -> %d',product[0],(-1)^product[2],product[1],product[3],(-1)^product[5],product[4],product[6],(-1)^product[8],product[7],product[9])
tmp=A*B*C
l=product[-2]
while l<L:
L=L-1
# H_tmp=imfilter(H_tmp,complex_derivatives,model.filter_mode)
H_tmp_x_real=scipy.ndimage.correlate(np.real(H_tmp), np.real(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_y_real=scipy.ndimage.correlate(np.real(H_tmp) , np.imag(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_x_imag=scipy.ndimage.correlate(np.imag(H_tmp), np.real(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_y_imag=scipy.ndimage.correlate(np.imag(H_tmp) , np.imag(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp= H_tmp_x_real+ 1j*H_tmp_y_real + 1j*H_tmp_x_imag - H_tmp_y_imag
H_tmp = H_tmp + self.model['alpha'][vf][p]*tmp
while L>self.model['output_order']: #(L>model.output_order)
L=L-1
# H_tmp=imfilter(H_tmp,complex_derivatives,model.filter_mode)
H_tmp_x_real=scipy.ndimage.correlate(np.real(H_tmp), np.real(self.scomplex_derivatives), mode=self.model['filter_mode'])
H_tmp_y_real=scipy.ndimage.correlate(np.real(H_tmp) , np.imag(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_x_imag=scipy.ndimage.correlate(np.imag(H_tmp), np.real(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_y_imag=scipy.ndimage.correlate(np.imag(H_tmp) , np.imag(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp= H_tmp_x_real+ 1j*H_tmp_y_real + 1j*H_tmp_x_imag - H_tmp_y_imag
# ft_kernel=fftn(cta_fspecial('gauss',model.v_sigma(vf),shape,false,precision))
ft_kernel=np.fft.fftn(Cta_fspecial(self.shape, 'gauss', self.model['v_sigma'][vf], False, self.precision).cta_fspecial())
if self.model['output_order']==0:
H=H+np.real(np.fft.ifftn(np.fft.fftn(H_tmp)*ft_kernel))
else:
H=H+np.fft.ifftn(np.fft.fftn(H_tmp)*ft_kernel)
if self.model['output_order']>0: #(model.output_order>0)
# H=abs(H).*(H./abs(H)).^(1/model.output_order)
H=np.abs(H)*(H/np.abs(H))**(1/self.model['output_order'])
# if padding>0
# H=H(1:original_shape(1),1:original_shape(2))
Mask=np.zeros(H.shape)
border=int(np.ceil(np.max(self.model['v_sigma']))) # ceil(max(model.v_sigma))
Mask[border-1:Mask.shape[0]-border+1, border-1:Mask.shape[1]-border+1]=1
H[Mask==0]=0
return H
|
the-stack_0_2906 | ##script for finding the overlap in the top 100 most significant genes in each cancer and plotting results
##load necessary modules
import pylab as plt
import numpy as np
import math
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
##I did not write this function, from http://depts.washington.edu/clawpack/clawpack-4.6.3/python/pyclaw/plotters/colormaps.py
##-------------------------
def make_colormap(colors):
##-------------------------
"""
Define a new color map based on values specified in the dictionary
colors, where colors[z] is the color that value z should be mapped to,
with linear interpolation between the given values of z.
The z values (dictionary keys) are real numbers and the values
colors[z] can be either an RGB list, e.g. [1,0,0] for red, or an
html hex string, e.g. "#ff0000" for red.
"""
from matplotlib.colors import LinearSegmentedColormap, ColorConverter
from numpy import sort
z = sort(colors.keys())
n = len(z)
z1 = min(z)
zn = max(z)
x0 = (z - z1) / (zn - z1)
CC = ColorConverter()
R = []
G = []
B = []
for i in range(n):
#i'th color at level z[i]:
Ci = colors[z[i]]
if type(Ci) == str:
# a hex string of form '#ff0000' for example (for red)
RGB = CC.to_rgb(Ci)
else:
# assume it's an RGB triple already:
RGB = Ci
R.append(RGB[0])
G.append(RGB[1])
B.append(RGB[2])
cmap_dict = {}
cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]
cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]
cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]
mymap = LinearSegmentedColormap('mymap',cmap_dict)
return mymap
def compare3(first,second):
if float(first[-1])>float(second[-1]):
return 1
elif float(first[-1])<float(second[-1]):
return -1
else:
return 0
##get the 100 most significant genes for each cancer
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','coeffs_pvalues.txt'))
BLCA=[i.strip().split() for i in f]
BLCA.sort(cmp=compare3)
BLCA_dict_100={}
for i in BLCA[:100]:
BLCA_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','coeffs_pvalues.txt'))
LGG=[i.strip().split() for i in f]
LGG.sort(cmp=compare3)
LGG_dict_100={}
for i in LGG[:100]:
LGG_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','coeffs_pvalues.txt'))
BRCA=[i.strip().split() for i in f]
BRCA.sort(cmp=compare3)
BRCA_dict_100={}
for i in BRCA[:100]:
BRCA_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_pvalues.txt'))
CESC=[i.strip().split() for i in f]
CESC.sort(cmp=compare3)
CESC_dict_100={}
for i in CESC[:100]:
CESC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','coeffs_pvalues.txt'))
COAD=[i.strip().split() for i in f]
COAD.sort(cmp=compare3)
COAD_dict_100={}
for i in COAD[:100]:
COAD_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','coeffs_pvalues.txt'))
GBM=[i.strip().split() for i in f]
GBM.sort(cmp=compare3)
GBM_dict_100={}
for i in GBM[:100]:
GBM_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','coeffs_pvalues.txt'))
HNSC=[i.strip().split() for i in f]
HNSC.sort(cmp=compare3)
HNSC_dict_100={}
for i in HNSC[:100]:
HNSC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','coeffs_pvalues.txt'))
KIRC=[i.strip().split() for i in f]
KIRC.sort(cmp=compare3)
KIRC_dict_100={}
for i in KIRC[:100]:
KIRC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','coeffs_pvalues.txt'))
KIRP=[i.strip().split() for i in f]
KIRP.sort(cmp=compare3)
KIRP_dict_100={}
for i in KIRP[:100]:
KIRP_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','coeffs_pvalues.txt'))
LAML=[i.strip().split() for i in f]
LAML.sort(cmp=compare3)
LAML_dict_100={}
for i in LAML[:100]:
LAML_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','coeffs_pvalues.txt'))
LIHC=[i.strip().split() for i in f]
LIHC.sort(cmp=compare3)
LIHC_dict_100={}
for i in LIHC[:100]:
LIHC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','coeffs_pvalues.txt'))
LUAD=[i.strip().split() for i in f]
LUAD.sort(cmp=compare3)
LUAD_dict_100={}
for i in LUAD[:100]:
LUAD_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','coeffs_pvalues.txt'))
LUSC=[i.strip().split() for i in f]
LUSC.sort(cmp=compare3)
LUSC_dict_100={}
for i in LUSC[:100]:
LUSC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','coeffs_pvalues.txt'))
SKCM=[i.strip().split() for i in f]
SKCM.sort(cmp=compare3)
SKCM_dict_100={}
for i in SKCM[:100]:
SKCM_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','OV','coeffs_pvalues.txt'))
OV=[i.strip().split() for i in f]
OV.sort(cmp=compare3)
OV_dict_100={}
for i in OV[:100]:
OV_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','coeffs_pvalues.txt'))
STAD=[i.strip().split() for i in f]
STAD.sort(cmp=compare3)
STAD_dict_100={}
for i in STAD[:100]:
STAD_dict_100[i[0]]=''
all_cancers=[BLCA_dict_100,BRCA_dict_100,CESC_dict_100,COAD_dict_100,\
GBM_dict_100,HNSC_dict_100,KIRC_dict_100,KIRP_dict_100,LAML_dict_100,\
LGG_dict_100,LIHC_dict_100,LUAD_dict_100,LUSC_dict_100,OV_dict_100,\
SKCM_dict_100,STAD_dict_100]
final_array=[]
for i in all_cancers[::-1]:
temp=[]
for j in all_cancers[::-1]:
##compute overlap
temp.append(len([k for k in j if k in i]))
final_array.append(temp)
##create a custom colormap
blue_yellow_red = make_colormap({0:'w',.05:'#85A3E0',.1:'#3366CC',.2:'#00FF00',.3:'#FFFF66',0.4:'#FF9966', 1:'#CC3300'})
##plot
Z=np.array(final_array)
mask=np.tri(Z.shape[0],k=-1)
Z= np.ma.array(Z, mask=mask)
fig = plt.figure()
fig.subplots_adjust(bottom=.15)
fig.subplots_adjust(left=.15)
ax = fig.add_subplot(111)
figure=ax.imshow(Z,cmap=blue_yellow_red,interpolation="nearest")
cbar=fig.colorbar(figure,pad=.02)
cbar.ax.tick_params(labelsize=40)
cbar.set_label('number of genes', rotation=270,fontsize=80,labelpad=25)
ax.set_yticks([i for i in range(0,16)])
ax.set_yticklabels(['BLCA','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','SKCM','STAD'][::-1])
ax.tick_params(axis='y',labelsize=40)
ax.set_xticks([i for i in range(0,16)])
ax.set_xticklabels(['BLCA','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','SKCM','STAD'][::-1],rotation=90)
ax.tick_params(axis='x',labelsize=40)
ax.tick_params(axis='x',length=0,width=0)
ax.tick_params(axis='y',length=0,width=0)
ax.invert_yaxis()
ax.invert_xaxis()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.show()
|
the-stack_0_2907 | # Original Code: https://github.com/nrsyed/computer-vision/blob/master/multithread/CountsPerSec.py
# Modified for use in PyPotter
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from datetime import datetime
class CountsPerSec:
"""
Class that tracks the number of occurrences ("counts") of an
arbitrary event and returns the frequency in occurrences
(counts) per second. The caller must increment the count.
"""
def __init__(self):
self._SmoothingFactor = 90
self._timeList = []
def countsPerSec(self):
self._timeList.append(datetime.now())
if (len(self._timeList) > self._SmoothingFactor):
self._timeList.pop(0)
elapsed_time = (self._timeList[-1] - self._timeList[0]).total_seconds()
if (elapsed_time > 0):
return len(self._timeList) / elapsed_time
return 0 |
the-stack_0_2908 | """
Shows 20 most important Amino acids, can be used to learn them.
"""
from setuptools import setup, find_packages
dependencies = ["pyqt5", "pandas"]
opt_dependencies = []
setup(
name="amino-acids-tutor",
version="1.0",
author="Luka Jeromel",
author_email="[email protected]",
description="Shows desired Amino Acid",
long_description=__doc__,
packages=find_packages(exclude=["tests"]),
# modules=["amino_acids"],
install_requires=dependencies,
install_extas=opt_dependencies,
entry_points={
# "console_scripts": ["luka-led-display=led_display.__main__:main"],
"gui_scripts": ["amino-acids=amino_acids.__main__:main"],
},
)
|
the-stack_0_2909 | from fastapi.testclient import TestClient
from main import app
client = TestClient(app)
def test_vowels_post_expected_code():
response = client.post("/vowels/", json={"line": "HOLA"})
assert response.status_code == 200
def test_vowels_post_result():
response = client.post("/vowels/", json={"line": "HOLA"})
assert response.json() == {
"data": {"vowels_count": 2, "new_line": "HULE"},
"message": "success",
"code": 200,
}
|
the-stack_0_2911 | import datetime
import jwt
from app.core import config
'''
JWT RFC:
https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-25
sub - Subject of the JWT (Volunteer).
exp - Expiration time in which the JWT token will be invalid by the server.
iat - Issue time, identifies the time at which the JWT as issued.
iss - Issuer of the JWT.
'''
def create_access_token(token: str, exipres_delta: datetime.timedelta = datetime.timedelta(days=config.ACCESS_TOKEN_EXPIRE_DAYS)):
data = {"sub": token,
"exp": datetime.datetime.utcnow() + exipres_delta,
"iat": datetime.datetime.utcnow(),
"iss": config.SERVER_HOST,
}
return jwt.encode(data, key=config.JWT_SECRET_KEY, algorithm=config.JWT_ALGORITHM)
|
the-stack_0_2912 | import os
import uuid
from mlflow.entities import Experiment, Metric, Param, Run, RunData, RunInfo, RunStatus, RunTag, \
ViewType
from mlflow.store.abstract_store import AbstractStore
from mlflow.utils.validation import _validate_metric_name, _validate_param_name, _validate_run_id, \
_validate_tag_name
from mlflow.utils.env import get_env
from mlflow.utils.file_utils import (is_directory, list_subdirs, mkdir, exists, write_yaml,
read_yaml, find, read_file, build_path, write_to, append_to,
make_containing_dirs, mv)
from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME
from mlflow.utils.search_utils import does_run_match_clause
_TRACKING_DIR_ENV_VAR = "MLFLOW_TRACKING_DIR"
def _default_root_dir():
return get_env(_TRACKING_DIR_ENV_VAR) or os.path.abspath("mlruns")
class FileStore(AbstractStore):
TRASH_FOLDER_NAME = ".trash"
ARTIFACTS_FOLDER_NAME = "artifacts"
METRICS_FOLDER_NAME = "metrics"
PARAMS_FOLDER_NAME = "params"
TAGS_FOLDER_NAME = "tags"
META_DATA_FILE_NAME = "meta.yaml"
def __init__(self, root_directory=None, artifact_root_uri=None):
"""
Create a new FileStore with the given root directory and a given default artifact root URI.
"""
super(FileStore, self).__init__()
self.root_directory = root_directory or _default_root_dir()
self.artifact_root_uri = artifact_root_uri or self.root_directory
self.trash_folder = build_path(self.root_directory, FileStore.TRASH_FOLDER_NAME)
# Create root directory if needed
if not exists(self.root_directory):
mkdir(self.root_directory)
# Create trash folder if needed
if not exists(self.trash_folder):
mkdir(self.trash_folder)
# Create default experiment if needed
if not self._has_experiment(experiment_id=Experiment.DEFAULT_EXPERIMENT_ID):
self._create_experiment_with_id(name="Default",
experiment_id=Experiment.DEFAULT_EXPERIMENT_ID,
artifact_uri=None)
def _check_root_dir(self):
"""
Run checks before running directory operations.
"""
if not exists(self.root_directory):
raise Exception("'%s' does not exist." % self.root_directory)
if not is_directory(self.root_directory):
raise Exception("'%s' is not a directory." % self.root_directory)
def _get_experiment_path(self, experiment_id, view_type=ViewType.ALL):
parents = []
if view_type == ViewType.ACTIVE_ONLY or view_type == ViewType.ALL:
parents.append(self.root_directory)
if view_type == ViewType.DELETED_ONLY or view_type == ViewType.ALL:
parents.append(self.trash_folder)
for parent in parents:
exp_list = find(parent, str(experiment_id), full_path=True)
if len(exp_list) > 0:
return exp_list
return []
def _get_run_dir(self, experiment_id, run_uuid):
_validate_run_id(run_uuid)
return build_path(self._get_experiment_path(experiment_id)[0], run_uuid)
def _get_metric_path(self, experiment_id, run_uuid, metric_key):
_validate_run_id(run_uuid)
_validate_metric_name(metric_key)
return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.METRICS_FOLDER_NAME,
metric_key)
def _get_param_path(self, experiment_id, run_uuid, param_name):
_validate_run_id(run_uuid)
_validate_param_name(param_name)
return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.PARAMS_FOLDER_NAME,
param_name)
def _get_tag_path(self, experiment_id, run_uuid, tag_name):
_validate_run_id(run_uuid)
_validate_tag_name(tag_name)
return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.TAGS_FOLDER_NAME,
tag_name)
def _get_artifact_dir(self, experiment_id, run_uuid):
_validate_run_id(run_uuid)
artifacts_dir = build_path(self.get_experiment(experiment_id).artifact_location,
run_uuid,
FileStore.ARTIFACTS_FOLDER_NAME)
return artifacts_dir
def _get_active_experiments(self, full_path=False):
exp_list = list_subdirs(self.root_directory, full_path)
return [exp for exp in exp_list if not exp.endswith(FileStore.TRASH_FOLDER_NAME)]
def _get_deleted_experiments(self, full_path=False):
return list_subdirs(self.trash_folder, full_path)
def list_experiments(self, view_type=ViewType.ACTIVE_ONLY):
self._check_root_dir()
rsl = []
if view_type == ViewType.ACTIVE_ONLY or view_type == ViewType.ALL:
rsl += self._get_active_experiments(full_path=False)
if view_type == ViewType.DELETED_ONLY or view_type == ViewType.ALL:
rsl += self._get_deleted_experiments(full_path=False)
return [self._get_experiment(exp_id, view_type) for exp_id in rsl]
def _create_experiment_with_id(self, name, experiment_id, artifact_uri):
self._check_root_dir()
meta_dir = mkdir(self.root_directory, str(experiment_id))
artifact_uri = artifact_uri or build_path(self.artifact_root_uri, str(experiment_id))
experiment = Experiment(experiment_id, name, artifact_uri)
write_yaml(meta_dir, FileStore.META_DATA_FILE_NAME, dict(experiment))
return experiment_id
def create_experiment(self, name, artifact_location=None):
self._check_root_dir()
if name is None or name == "":
raise Exception("Invalid experiment name '%s'" % name)
experiment = self.get_experiment_by_name(name)
if experiment is not None:
raise Exception("Experiment '%s' already exists." % experiment.name)
# Get all existing experiments and find the one with largest ID.
# len(list_all(..)) would not work when experiments are deleted.
experiments_ids = [e.experiment_id for e in self.list_experiments(ViewType.ALL)]
experiment_id = max(experiments_ids) + 1
return self._create_experiment_with_id(name, experiment_id, artifact_location)
def _has_experiment(self, experiment_id):
return len(self._get_experiment_path(experiment_id)) > 0
def _get_experiment(self, experiment_id, view_type=ViewType.ALL):
self._check_root_dir()
experiment_dirs = self._get_experiment_path(experiment_id, view_type)
if len(experiment_dirs) == 0:
raise Exception("Could not find experiment with ID %s" % experiment_id)
meta = read_yaml(experiment_dirs[0], FileStore.META_DATA_FILE_NAME)
return Experiment.from_dictionary(meta)
def get_experiment(self, experiment_id):
"""
Fetches the experiment. This will search for active as well as deleted experiments.
:param experiment_id: Integer id for the experiment
:return: A single Experiment object if it exists, otherwise raises an Exception.
"""
return self._get_experiment(experiment_id)
def get_experiment_by_name(self, name):
self._check_root_dir()
for experiment in self.list_experiments(ViewType.ALL):
if experiment.name == name:
return experiment
return None
def delete_experiment(self, experiment_id):
experiment_dirs = self._get_experiment_path(experiment_id, ViewType.ACTIVE_ONLY)
if len(experiment_dirs) == 0:
raise Exception("Could not find experiment with ID %s" % experiment_id)
mv(experiment_dirs[0], self.trash_folder)
def restore_experiment(self, experiment_id):
experiment_dirs = self._get_experiment_path(experiment_id, ViewType.DELETED_ONLY)
if len(experiment_dirs) == 0:
raise Exception("Could not find deleted experiment with ID %d" % experiment_id)
conflict_experiment = self._get_experiment_path(experiment_id, ViewType.ACTIVE_ONLY)
if len(conflict_experiment) > 0:
raise Exception("Cannot restore eperiment with ID %d. "
"An experiment with same ID already exists." % experiment_id)
mv(experiment_dirs[0], self.root_directory)
def _find_run_root(self, run_uuid):
_validate_run_id(run_uuid)
self._check_root_dir()
all_experiments = self._get_active_experiments(True) + self._get_deleted_experiments(True)
for experiment_dir in all_experiments:
runs = find(experiment_dir, run_uuid, full_path=True)
if len(runs) == 0:
continue
return runs[0]
return None
def update_run_info(self, run_uuid, run_status, end_time):
_validate_run_id(run_uuid)
run_info = self.get_run(run_uuid).info
new_info = run_info.copy_with_overrides(run_status, end_time)
run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_uuid)
new_info_dict = self._make_run_info_dict(new_info)
write_yaml(run_dir, FileStore.META_DATA_FILE_NAME, new_info_dict, overwrite=True)
return new_info
def create_run(self, experiment_id, user_id, run_name, source_type,
source_name, entry_point_name, start_time, source_version, tags):
"""
Creates a run with the specified attributes.
"""
if self.get_experiment(experiment_id) is None:
raise Exception("Could not create run under experiment with ID %s - no such experiment "
"exists." % experiment_id)
run_uuid = uuid.uuid4().hex
artifact_uri = self._get_artifact_dir(experiment_id, run_uuid)
run_info = RunInfo(run_uuid=run_uuid, experiment_id=experiment_id,
name="",
artifact_uri=artifact_uri, source_type=source_type,
source_name=source_name,
entry_point_name=entry_point_name, user_id=user_id,
status=RunStatus.RUNNING, start_time=start_time, end_time=None,
source_version=source_version)
# Persist run metadata and create directories for logging metrics, parameters, artifacts
run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_uuid)
mkdir(run_dir)
write_yaml(run_dir, FileStore.META_DATA_FILE_NAME, self._make_run_info_dict(run_info))
mkdir(run_dir, FileStore.METRICS_FOLDER_NAME)
mkdir(run_dir, FileStore.PARAMS_FOLDER_NAME)
mkdir(run_dir, FileStore.ARTIFACTS_FOLDER_NAME)
for tag in tags:
self.set_tag(run_uuid, tag)
if run_name:
self.set_tag(run_uuid, RunTag(key=MLFLOW_RUN_NAME, value=run_name))
return Run(run_info=run_info, run_data=None)
def _make_run_info_dict(self, run_info):
# 'tags' was moved from RunInfo to RunData, so we must keep storing it in the meta.yaml for
# old mlflow versions to read
run_info_dict = dict(run_info)
run_info_dict['tags'] = []
return run_info_dict
def get_run(self, run_uuid):
_validate_run_id(run_uuid)
run_dir = self._find_run_root(run_uuid)
if run_dir is None:
raise Exception("Run '%s' not found" % run_uuid)
run_info = self.get_run_info(run_dir)
metrics = self.get_all_metrics(run_uuid)
params = self.get_all_params(run_uuid)
tags = self.get_all_tags(run_uuid)
return Run(run_info, RunData(metrics, params, tags))
@staticmethod
def get_run_info(run_dir):
meta = read_yaml(run_dir, FileStore.META_DATA_FILE_NAME)
return RunInfo.from_dictionary(meta)
def _get_run_files(self, run_uuid, resource_type):
_validate_run_id(run_uuid)
if resource_type == "metric":
subfolder_name = FileStore.METRICS_FOLDER_NAME
elif resource_type == "param":
subfolder_name = FileStore.PARAMS_FOLDER_NAME
elif resource_type == "tag":
subfolder_name = FileStore.TAGS_FOLDER_NAME
else:
raise Exception("Looking for unknown resource under run.")
run_dir = self._find_run_root(run_uuid)
if run_dir is None:
raise Exception("Run '%s' not found" % run_uuid)
source_dirs = find(run_dir, subfolder_name, full_path=True)
if len(source_dirs) == 0:
return run_dir, []
file_names = []
for root, _, files in os.walk(source_dirs[0]):
for name in files:
abspath = os.path.join(root, name)
file_names.append(os.path.relpath(abspath, source_dirs[0]))
return source_dirs[0], file_names
@staticmethod
def _get_metric_from_file(parent_path, metric_name):
_validate_metric_name(metric_name)
metric_data = read_file(parent_path, metric_name)
if len(metric_data) == 0:
raise Exception("Metric '%s' is malformed. No data found." % metric_name)
last_line = metric_data[-1]
timestamp, val = last_line.strip().split(" ")
return Metric(metric_name, float(val), int(timestamp))
def get_metric(self, run_uuid, metric_key):
_validate_run_id(run_uuid)
_validate_metric_name(metric_key)
parent_path, metric_files = self._get_run_files(run_uuid, "metric")
if metric_key not in metric_files:
raise Exception("Metric '%s' not found under run '%s'" % (metric_key, run_uuid))
return self._get_metric_from_file(parent_path, metric_key)
def get_all_metrics(self, run_uuid):
_validate_run_id(run_uuid)
parent_path, metric_files = self._get_run_files(run_uuid, "metric")
metrics = []
for metric_file in metric_files:
metrics.append(self._get_metric_from_file(parent_path, metric_file))
return metrics
def get_metric_history(self, run_uuid, metric_key):
_validate_run_id(run_uuid)
_validate_metric_name(metric_key)
parent_path, metric_files = self._get_run_files(run_uuid, "metric")
if metric_key not in metric_files:
raise Exception("Metric '%s' not found under run '%s'" % (metric_key, run_uuid))
metric_data = read_file(parent_path, metric_key)
rsl = []
for pair in metric_data:
ts, val = pair.strip().split(" ")
rsl.append(Metric(metric_key, float(val), int(ts)))
return rsl
@staticmethod
def _get_param_from_file(parent_path, param_name):
_validate_param_name(param_name)
param_data = read_file(parent_path, param_name)
if len(param_data) == 0:
raise Exception("Param '%s' is malformed. No data found." % param_name)
if len(param_data) > 1:
raise Exception("Unexpected data for param '%s'. Param recorded more than once"
% param_name)
return Param(param_name, str(param_data[0].strip()))
@staticmethod
def _get_tag_from_file(parent_path, tag_name):
_validate_tag_name(tag_name)
tag_data = read_file(parent_path, tag_name)
if len(tag_data) == 0:
raise Exception("Tag '%s' is malformed. No data found." % tag_name)
if len(tag_data) > 1:
raise Exception("Unexpected data for tag '%s'. Tag recorded more than once"
% tag_name)
return RunTag(tag_name, str(tag_data[0].strip()))
def get_param(self, run_uuid, param_name):
_validate_run_id(run_uuid)
_validate_param_name(param_name)
parent_path, param_files = self._get_run_files(run_uuid, "param")
if param_name not in param_files:
raise Exception("Param '%s' not found under run '%s'" % (param_name, run_uuid))
return self._get_param_from_file(parent_path, param_name)
def get_all_params(self, run_uuid):
parent_path, param_files = self._get_run_files(run_uuid, "param")
params = []
for param_file in param_files:
params.append(self._get_param_from_file(parent_path, param_file))
return params
def get_all_tags(self, run_uuid):
parent_path, tag_files = self._get_run_files(run_uuid, "tag")
tags = []
for tag_file in tag_files:
tags.append(self._get_tag_from_file(parent_path, tag_file))
return tags
def _list_run_uuids(self, experiment_id):
self._check_root_dir()
experiment_dir = self._get_experiment_path(experiment_id)[0]
return list_subdirs(experiment_dir, full_path=False)
def search_runs(self, experiment_ids, search_expressions):
run_uuids = []
if len(search_expressions) == 0:
for experiment_id in experiment_ids:
run_uuids.extend(self._list_run_uuids(experiment_id))
else:
for experiment_id in experiment_ids:
for run_uuid in self._list_run_uuids(experiment_id):
run = self.get_run(run_uuid)
if all([does_run_match_clause(run, s) for s in search_expressions]):
run_uuids.append(run_uuid)
return [self.get_run(run_uuid) for run_uuid in run_uuids]
def list_run_infos(self, experiment_id):
run_infos = []
for run_uuid in self._list_run_uuids(experiment_id):
run_infos.append(self.get_run_info(self._get_run_dir(experiment_id, run_uuid)))
return run_infos
def log_metric(self, run_uuid, metric):
_validate_run_id(run_uuid)
_validate_metric_name(metric.key)
run = self.get_run(run_uuid)
metric_path = self._get_metric_path(run.info.experiment_id, run_uuid, metric.key)
make_containing_dirs(metric_path)
append_to(metric_path, "%s %s\n" % (metric.timestamp, metric.value))
def log_param(self, run_uuid, param):
_validate_run_id(run_uuid)
_validate_param_name(param.key)
run = self.get_run(run_uuid)
param_path = self._get_param_path(run.info.experiment_id, run_uuid, param.key)
make_containing_dirs(param_path)
write_to(param_path, "%s\n" % param.value)
def set_tag(self, run_uuid, tag):
_validate_run_id(run_uuid)
_validate_tag_name(tag.key)
run = self.get_run(run_uuid)
tag_path = self._get_tag_path(run.info.experiment_id, run_uuid, tag.key)
make_containing_dirs(tag_path)
write_to(tag_path, "%s\n" % tag.value)
|
the-stack_0_2913 | import logging
from pprint import pprint # noqa
from followthemoney import model
from followthemoney.types import registry
from followthemoney.compare import compare
from aleph.core import db, es, celery
from aleph.model import Match
from aleph.index.indexes import entities_read_index
from aleph.index.entities import iter_proxies, entities_by_ids
from aleph.logic.entities.match import match_query
from aleph.index.util import unpack_result, none_query
from aleph.index.util import BULK_PAGE
from aleph.index.collections import get_collection
from aleph.logic.util import entity_url
log = logging.getLogger(__name__)
SCORE_CUTOFF = 0.05
def xref_item(proxy, collection_ids=None):
"""Cross-reference an entity or document, given as an indexed document."""
query = match_query(proxy, collection_ids=collection_ids)
if query == none_query():
return
query = {
'query': query,
'size': 100,
'_source': {'includes': ['schema', 'properties', 'collection_id']}
}
matchable = list(proxy.schema.matchable_schemata)
index = entities_read_index(schema=matchable)
result = es.search(index=index, body=query)
results = result.get('hits').get('hits')
for result in results:
result = unpack_result(result)
if result is not None:
other = model.get_proxy(result)
score = compare(model, proxy, other)
if score >= SCORE_CUTOFF:
yield score, result.get('collection_id'), other
@celery.task()
def xref_collection(collection_id, against_collection_ids=None):
"""Cross-reference all the entities and documents in a collection."""
matchable = [s.name for s in model if s.matchable]
entities = iter_proxies(collection_id=collection_id, schemata=matchable)
for entity in entities:
proxy = model.get_proxy(entity)
dq = db.session.query(Match)
dq = dq.filter(Match.entity_id == proxy.id)
dq.delete()
matches = xref_item(proxy, collection_ids=against_collection_ids)
for (score, other_id, other) in matches:
log.info("Xref [%.3f]: %s <=> %s", score, proxy, other)
obj = Match()
obj.entity_id = proxy.id
obj.collection_id = collection_id
obj.match_id = other.id
obj.match_collection_id = other_id
obj.score = score
db.session.add(obj)
db.session.commit()
def _format_date(proxy):
dates = proxy.get_type_values(registry.date)
if not len(dates):
return ''
return min(dates)
def _format_country(proxy):
countries = [c.upper() for c in proxy.countries]
return ', '.join(countries)
def _iter_match_batch(batch, authz):
matchable = [s.name for s in model if s.matchable]
entities = set()
for match in batch:
entities.add(match.entity_id)
entities.add(match.match_id)
entities = entities_by_ids(list(entities), schemata=matchable)
entities = {e.get('id'): e for e in entities}
for obj in batch:
if not authz.can(obj.match_collection_id, authz.READ):
continue
entity = entities.get(str(obj.entity_id))
match = entities.get(str(obj.match_id))
collection = get_collection(obj.match_collection_id)
if entity is None or match is None or collection is None:
continue
eproxy = model.get_proxy(entity)
mproxy = model.get_proxy(match)
yield (
int(obj.score * 100),
eproxy.caption,
_format_date(eproxy),
_format_country(eproxy),
collection.get('label'),
mproxy.caption,
_format_date(mproxy),
_format_country(mproxy),
entity_url(eproxy.id),
entity_url(mproxy.id),
)
def export_matches_csv(collection_id, authz):
"""Export the top N matches of cross-referencing for the given collection
to an Excel 2010 formatted export."""
dq = db.session.query(Match)
dq = dq.filter(Match.collection_id == collection_id)
dq = dq.order_by(Match.score.desc())
yield [
'Score',
'EntityName',
'EntityDate',
'EntityCountries',
'MatchCollection',
'MatchName',
'MatchDate',
'MatchCountries',
'EntityLink',
'MatchLink',
]
batch = []
for match in dq.yield_per(BULK_PAGE):
batch.append(match)
if len(batch) >= BULK_PAGE:
yield from _iter_match_batch(batch, authz)
batch = []
if len(batch):
yield from _iter_match_batch(batch, authz)
|
the-stack_0_2915 | """Contains UI methods for LE user operations."""
import logging
import zope.component
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.compat import misc
from certbot.compat import os
from certbot.display import util as display_util
logger = logging.getLogger(__name__)
# Define a helper function to avoid verbose code
z_util = zope.component.getUtility
def get_email(invalid=False, optional=True):
"""Prompt for valid email address.
:param bool invalid: True if an invalid address was provided by the user
:param bool optional: True if the user can use
--register-unsafely-without-email to avoid providing an e-mail
:returns: e-mail address
:rtype: str
:raises errors.Error: if the user cancels
"""
invalid_prefix = "There seem to be problems with that address. "
msg = "Enter email address (used for urgent renewal and security notices)"
unsafe_suggestion = ("\n\nIf you really want to skip this, you can run "
"the client with --register-unsafely-without-email "
"but make sure you then backup your account key from "
"{0}\n\n".format(os.path.join(
misc.get_default_folder('config'), 'accounts')))
if optional:
if invalid:
msg += unsafe_suggestion
suggest_unsafe = False
else:
suggest_unsafe = True
else:
suggest_unsafe = False
while True:
try:
code, email = z_util(interfaces.IDisplay).input(
invalid_prefix + msg if invalid else msg,
force_interactive=True)
except errors.MissingCommandlineFlag:
msg = ("You should register before running non-interactively, "
"or provide --agree-tos and --email <email_address> flags.")
raise errors.MissingCommandlineFlag(msg)
if code != display_util.OK:
if optional:
raise errors.Error(
"An e-mail address or "
"--register-unsafely-without-email must be provided.")
else:
raise errors.Error("An e-mail address must be provided.")
elif util.safe_email(email):
return email
elif suggest_unsafe:
msg += unsafe_suggestion
suggest_unsafe = False # add this message at most once
invalid = bool(email)
def choose_account(accounts):
"""Choose an account.
:param list accounts: Containing at least one
:class:`~certbot._internal.account.Account`
"""
# Note this will get more complicated once we start recording authorizations
labels = [acc.slug for acc in accounts]
code, index = z_util(interfaces.IDisplay).menu(
"Please choose an account", labels, force_interactive=True)
if code == display_util.OK:
return accounts[index]
return None
def choose_values(values, question=None):
"""Display screen to let user pick one or multiple values from the provided
list.
:param list values: Values to select from
:returns: List of selected values
:rtype: list
"""
code, items = z_util(interfaces.IDisplay).checklist(
question, tags=values, force_interactive=True)
if code == display_util.OK and items:
return items
return []
def choose_names(installer, question=None):
"""Display screen to select domains to validate.
:param installer: An installer object
:type installer: :class:`certbot.interfaces.IInstaller`
:param `str` question: Overriding dialog question to ask the user if asked
to choose from domain names.
:returns: List of selected names
:rtype: `list` of `str`
"""
if installer is None:
logger.debug("No installer, picking names manually")
return _choose_names_manually()
domains = list(installer.get_all_names())
names = get_valid_domains(domains)
if not names:
return _choose_names_manually(
"No names were found in your configuration files. ")
code, names = _filter_names(names, question)
if code == display_util.OK and names:
return names
return []
def get_valid_domains(domains):
"""Helper method for choose_names that implements basic checks
on domain names
:param list domains: Domain names to validate
:return: List of valid domains
:rtype: list
"""
valid_domains = []
for domain in domains:
try:
valid_domains.append(util.enforce_domain_sanity(domain))
except errors.ConfigurationError:
continue
return valid_domains
def _sort_names(FQDNs):
"""Sort FQDNs by SLD (and if many, by their subdomains)
:param list FQDNs: list of domain names
:returns: Sorted list of domain names
:rtype: list
"""
return sorted(FQDNs, key=lambda fqdn: fqdn.split('.')[::-1][1:])
def _filter_names(names, override_question=None):
"""Determine which names the user would like to select from a list.
:param list names: domain names
:returns: tuple of the form (`code`, `names`) where
`code` - str display exit code
`names` - list of names selected
:rtype: tuple
"""
#Sort by domain first, and then by subdomain
sorted_names = _sort_names(names)
if override_question:
question = override_question
else:
question = "Which names would you like to activate HTTPS for?"
code, names = z_util(interfaces.IDisplay).checklist(
question, tags=sorted_names, cli_flag="--domains", force_interactive=True)
return code, [str(s) for s in names]
def _choose_names_manually(prompt_prefix=""):
"""Manually input names for those without an installer.
:param str prompt_prefix: string to prepend to prompt for domains
:returns: list of provided names
:rtype: `list` of `str`
"""
code, input_ = z_util(interfaces.IDisplay).input(
prompt_prefix +
"Please enter in your domain name(s) (comma and/or space separated) ",
cli_flag="--domains", force_interactive=True)
if code == display_util.OK:
invalid_domains = dict()
retry_message = ""
try:
domain_list = display_util.separate_list_input(input_)
except UnicodeEncodeError:
domain_list = []
retry_message = (
"Internationalized domain names are not presently "
"supported.{0}{0}Would you like to re-enter the "
"names?{0}").format(os.linesep)
for i, domain in enumerate(domain_list):
try:
domain_list[i] = util.enforce_domain_sanity(domain)
except errors.ConfigurationError as e:
invalid_domains[domain] = str(e)
if invalid_domains:
retry_message = (
"One or more of the entered domain names was not valid:"
"{0}{0}").format(os.linesep)
for domain in invalid_domains:
retry_message = retry_message + "{1}: {2}{0}".format(
os.linesep, domain, invalid_domains[domain])
retry_message = retry_message + (
"{0}Would you like to re-enter the names?{0}").format(
os.linesep)
if retry_message:
# We had error in input
retry = z_util(interfaces.IDisplay).yesno(retry_message,
force_interactive=True)
if retry:
return _choose_names_manually()
else:
return domain_list
return []
def success_installation(domains):
"""Display a box confirming the installation of HTTPS.
:param list domains: domain names which were enabled
"""
z_util(interfaces.IDisplay).notification(
"Congratulations! You have successfully enabled {0}{1}{1}"
"You should test your configuration at:{1}{2}".format(
_gen_https_names(domains),
os.linesep,
os.linesep.join(_gen_ssl_lab_urls(domains))),
pause=False)
def success_renewal(domains):
"""Display a box confirming the renewal of an existing certificate.
:param list domains: domain names which were renewed
"""
z_util(interfaces.IDisplay).notification(
"Your existing certificate has been successfully renewed, and the "
"new certificate has been installed.{1}{1}"
"The new certificate covers the following domains: {0}{1}{1}"
"You should test your configuration at:{1}{2}".format(
_gen_https_names(domains),
os.linesep,
os.linesep.join(_gen_ssl_lab_urls(domains))),
pause=False)
def success_revocation(cert_path):
"""Display a box confirming a certificate has been revoked.
:param list cert_path: path to certificate which was revoked.
"""
z_util(interfaces.IDisplay).notification(
"Congratulations! You have successfully revoked the certificate "
"that was located at {0}{1}{1}".format(
cert_path,
os.linesep),
pause=False)
def _gen_ssl_lab_urls(domains):
"""Returns a list of urls.
:param list domains: Each domain is a 'str'
"""
return ["https://www.ssllabs.com/ssltest/analyze.html?d=%s" % dom for dom in domains]
def _gen_https_names(domains):
"""Returns a string of the https domains.
Domains are formatted nicely with https:// prepended to each.
:param list domains: Each domain is a 'str'
"""
if len(domains) == 1:
return "https://{0}".format(domains[0])
elif len(domains) == 2:
return "https://{dom[0]} and https://{dom[1]}".format(dom=domains)
elif len(domains) > 2:
return "{0}{1}{2}".format(
", ".join("https://%s" % dom for dom in domains[:-1]),
", and https://",
domains[-1])
return ""
def _get_validated(method, validator, message, default=None, **kwargs):
if default is not None:
try:
validator(default)
except errors.Error as error:
logger.debug('Encountered invalid default value "%s" when prompting for "%s"',
default,
message,
exc_info=True)
raise AssertionError('Invalid default "{0}"'.format(default))
while True:
code, raw = method(message, default=default, **kwargs)
if code == display_util.OK:
try:
validator(raw)
return code, raw
except errors.Error as error:
logger.debug('Validator rejected "%s" when prompting for "%s"',
raw,
message,
exc_info=True)
zope.component.getUtility(interfaces.IDisplay).notification(str(error), pause=False)
else:
return code, raw
def validated_input(validator, *args, **kwargs):
"""Like `~certbot.interfaces.IDisplay.input`, but with validation.
:param callable validator: A method which will be called on the
supplied input. If the method raises a `errors.Error`, its
text will be displayed and the user will be re-prompted.
:param list `*args`: Arguments to be passed to `~certbot.interfaces.IDisplay.input`.
:param dict `**kwargs`: Arguments to be passed to `~certbot.interfaces.IDisplay.input`.
:return: as `~certbot.interfaces.IDisplay.input`
:rtype: tuple
"""
return _get_validated(zope.component.getUtility(interfaces.IDisplay).input,
validator, *args, **kwargs)
def validated_directory(validator, *args, **kwargs):
"""Like `~certbot.interfaces.IDisplay.directory_select`, but with validation.
:param callable validator: A method which will be called on the
supplied input. If the method raises a `errors.Error`, its
text will be displayed and the user will be re-prompted.
:param list `*args`: Arguments to be passed to `~certbot.interfaces.IDisplay.directory_select`.
:param dict `**kwargs`: Arguments to be passed to
`~certbot.interfaces.IDisplay.directory_select`.
:return: as `~certbot.interfaces.IDisplay.directory_select`
:rtype: tuple
"""
return _get_validated(zope.component.getUtility(interfaces.IDisplay).directory_select,
validator, *args, **kwargs)
|
the-stack_0_2916 | #!/usr/bin/python
import json
from random import randint
#if any changes are made to this plugin, kindly update the plugin version here.
PLUGIN_VERSION = "1"
#Setting this to true will alert you when there is a communication problem while posting plugin data to server
HEARTBEAT="true"
#Mention the units of your metrics . If any new metrics are added, make an entry here for its unit if needed.
METRICS_UNITS={'metric_1':'MB', 'metric_2':'ms'}
def metricCollector():
data = {}
data['plugin_version'] = PLUGIN_VERSION
data['heartbeat_required'] = HEARTBEAT
data['metric_1']=randint(0,1000)
data['metric_2']=randint(0,500)
data['metric_3']=randint(0,100)
data['units']=METRICS_UNITS
return data
if __name__ == "__main__":
result = metricCollector()
print(json.dumps(result, indent=4, sort_keys=True))
|
the-stack_0_2917 | # https://www.codewars.com/kata/5b2e5a02a454c82fb9000048
def get_neighbourhood(n_type, arr, coordinates):
x, y = coordinates
r, c = len(arr), len(arr[0])
if 0 > x or x >= r or 0 > y or y >= c: return []
if n_type == "moore":
return [
arr[i][j]
for i in range(x-1 if x > 0 else x, x+2 if x < r-1 else x+1)
for j in range(y-1 if y > 0 else y, y+2 if y < c-1 else y+1)
if (i, j) != (x, y)
]
return [
arr[c1][c2]
for i in (-1, 1)
for c1, c2 in [(x+i, y), (x, y+i)]
if c1 >= 0 and c2 >= 0 and c1 < r and c2 < c
]
|
the-stack_0_2918 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# check if MongoDB is enabled and running
def test_mongo_running_and_enabled(host):
mongo = host.service("mongod")
assert mongo.is_running
assert mongo.is_enabled
# check if configuration file contains the required line
def test_config_file(host):
config_file = host.file('/etc/mongod.conf')
assert config_file.contains('bindIp: 0.0.0.0')
assert config_file.is_file
# mongo port testing on 0.0.0.0:27017
def test_mongo_socket(host):
socket = host.socket("tcp://0.0.0.0:27017")
assert socket.is_listening
|
the-stack_0_2920 | # -*- coding: utf-8 -*-
from socialregister.users.models import User
def create_user(strategy, details, user=None, *args, **kwargs):
if user:
return {'is_new': False}
if not details['email']:
username = details['username']
else:
username = details['email']
user = User.objects.get_or_create(
username=username,
defaults={
'email': details['email'], 'first_name': details['first_name'],
'last_name': details['last_name'], 'is_active': True})[0]
return {
'is_new': True,
'user': user
}
|
the-stack_0_2921 | import numpy as np
from sklearn.decomposition import PCA
from sklearn.base import BaseEstimator, OutlierMixin
from sklearn.utils.validation import check_is_fitted, check_array, FLOAT_DTYPES
class PCAOutlierDetection(BaseEstimator, OutlierMixin):
"""
Does outlier detection based on the reconstruction error from PCA.
"""
def __init__(
self,
n_components=None,
threshold=None,
variant="relative",
whiten=False,
svd_solver="auto",
tol=0.0,
iterated_power="auto",
random_state=None,
):
self.n_components = n_components
self.threshold = threshold
self.whiten = whiten
self.variant = variant
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""
Fit the model using X as training data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:param y: ignored but kept in for pipeline support
:return: Returns an instance of self.
"""
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
if not self.threshold:
raise ValueError("The `threshold` value cannot be `None`.")
self.pca_ = PCA(
n_components=self.n_components,
whiten=self.whiten,
svd_solver=self.svd_solver,
tol=self.tol,
iterated_power=self.iterated_power,
random_state=self.random_state,
)
self.pca_.fit(X, y)
self.offset_ = -self.threshold
return self
def transform(self, X):
"""
Uses the underlying PCA method to transform the data.
"""
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
check_is_fitted(self, ["pca_", "offset_"])
return self.pca_.transform(X)
def difference(self, X):
"""
Shows the calculated difference between original and reconstructed data. Row by row.
:param X: array-like, shape=(n_columns, n_samples, ) training data.
:return: array, shape=(n_samples,) the difference
"""
check_is_fitted(self, ["pca_", "offset_"])
reduced = self.pca_.transform(X)
diff = np.sum(np.abs(self.pca_.inverse_transform(reduced) - X), axis=1)
if self.variant == "relative":
diff = diff / X.sum(axis=1)
return diff
def decision_function(self, X):
return self.threshold - self.difference(X)
def score_samples(self, X):
return -self.difference(X)
def predict(self, X):
"""
Predict if a point is an outlier.
:param X: array-like, shape=(n_columns, n_samples, ) training data.
:return: array, shape=(n_samples,) the predicted data. 1 for inliers, -1 for outliers.
"""
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
check_is_fitted(self, ["pca_", "offset_"])
result = np.ones(X.shape[0])
result[self.difference(X) > self.threshold] = -1
return result.astype(np.int)
|
the-stack_0_2922 | # -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio ([email protected])
=============
Class Mix-Ins
=============
Some reusable class Mixins
'''
# pylint: disable=repr-flag-used-in-string
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import types
import atexit
import pprint
import logging
import tempfile
import functools
import subprocess
import multiprocessing
# Import Salt Testing Libs
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.paths import CODE_DIR
# Import salt libs
import salt.config
import salt.utils.event
import salt.utils.files
import salt.utils.functools
import salt.utils.path
import salt.utils.stringutils
import salt.utils.yaml
import salt.version
import salt.exceptions
import salt.utils.process
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt._compat import ElementTree as etree
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
class CheckShellBinaryNameAndVersionMixin(object):
'''
Simple class mix-in to subclass in companion to :class:`ShellTestCase<tests.support.case.ShellTestCase>` which
adds a test case to verify proper version report from Salt's CLI tools.
'''
_call_binary_ = None
_call_binary_expected_version_ = None
def test_version_includes_binary_name(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
if self._call_binary_expected_version_ is None:
# Late import
self._call_binary_expected_version_ = salt.version.__version__
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(self._call_binary_, out)
self.assertIn(self._call_binary_expected_version_, out)
class AdaptedConfigurationTestCaseMixin(object):
__slots__ = ()
@staticmethod
def get_temp_config(config_for, **config_overrides):
rootdir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
conf_dir = os.path.join(rootdir, 'conf')
for key in ('cachedir', 'pki_dir', 'sock_dir'):
if key not in config_overrides:
config_overrides[key] = key
if 'log_file' not in config_overrides:
config_overrides['log_file'] = 'logs/{}.log'.format(config_for)
if 'user' not in config_overrides:
config_overrides['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
config_overrides['root_dir'] = rootdir
cdict = AdaptedConfigurationTestCaseMixin.get_config(config_for, from_scratch=True)
if config_for in ('master', 'client_config'):
rdict = salt.config.apply_master_config(config_overrides, cdict)
if config_for == 'minion':
rdict = salt.config.apply_minion_config(config_overrides, cdict)
verify_env([os.path.join(rdict['pki_dir'], 'minions'),
os.path.join(rdict['pki_dir'], 'minions_pre'),
os.path.join(rdict['pki_dir'], 'minions_rejected'),
os.path.join(rdict['pki_dir'], 'minions_denied'),
os.path.join(rdict['cachedir'], 'jobs'),
os.path.join(rdict['cachedir'], 'raet'),
os.path.join(rdict['cachedir'], 'tokens'),
os.path.join(rdict['root_dir'], 'cache', 'tokens'),
os.path.join(rdict['pki_dir'], 'accepted'),
os.path.join(rdict['pki_dir'], 'rejected'),
os.path.join(rdict['pki_dir'], 'pending'),
os.path.dirname(rdict['log_file']),
rdict['sock_dir'],
conf_dir
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=rdict['root_dir'],
)
rdict['config_dir'] = conf_dir
rdict['conf_file'] = os.path.join(conf_dir, config_for)
with salt.utils.files.fopen(rdict['conf_file'], 'w') as wfh:
salt.utils.yaml.safe_dump(rdict, wfh, default_flow_style=False)
return rdict
@staticmethod
def get_config(config_for, from_scratch=False):
if from_scratch:
if config_for in ('master', 'syndic_master'):
return salt.config.master_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
elif config_for in ('minion', 'sub_minion'):
return salt.config.minion_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
elif config_for in ('syndic',):
return salt.config.syndic_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for),
AdaptedConfigurationTestCaseMixin.get_config_file_path('minion')
)
elif config_for == 'client_config':
return salt.config.client_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path('master')
)
if config_for not in RUNTIME_VARS.RUNTIME_CONFIGS:
if config_for in ('master', 'syndic_master'):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.master_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
)
elif config_for in ('minion', 'sub_minion'):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.minion_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
)
elif config_for in ('syndic',):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.syndic_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for),
AdaptedConfigurationTestCaseMixin.get_config_file_path('minion')
)
)
elif config_for == 'client_config':
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.client_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path('master')
)
)
return RUNTIME_VARS.RUNTIME_CONFIGS[config_for]
@staticmethod
def get_config_dir():
return RUNTIME_VARS.TMP_CONF_DIR
@staticmethod
def get_config_file_path(filename):
if filename == 'syndic_master':
return os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master')
if filename == 'syndic':
return os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion')
if filename == 'sub_minion':
return os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion')
return os.path.join(RUNTIME_VARS.TMP_CONF_DIR, filename)
@property
def master_opts(self):
'''
Return the options used for the master
'''
return self.get_config('master')
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('minion')
@property
def sub_minion_opts(self):
'''
Return the options used for the sub_minion
'''
return self.get_config('sub_minion')
class SaltClientTestCaseMixin(AdaptedConfigurationTestCaseMixin):
'''
Mix-in class that provides a ``client`` attribute which returns a Salt
:class:`LocalClient<salt:salt.client.LocalClient>`.
.. code-block:: python
class LocalClientTestCase(TestCase, SaltClientTestCaseMixin):
def test_check_pub_data(self):
just_minions = {'minions': ['m1', 'm2']}
jid_no_minions = {'jid': '1234', 'minions': []}
valid_pub_data = {'minions': ['m1', 'm2'], 'jid': '1234'}
self.assertRaises(EauthAuthenticationError,
self.client._check_pub_data, None)
self.assertDictEqual({},
self.client._check_pub_data(just_minions),
'Did not handle lack of jid correctly')
self.assertDictEqual(
{},
self.client._check_pub_data({'jid': '0'}),
'Passing JID of zero is not handled gracefully')
'''
_salt_client_config_file_name_ = 'master'
@property
def client(self):
# Late import
import salt.client
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
mopts = self.get_config(self._salt_client_config_file_name_, from_scratch=True)
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(mopts=mopts)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
class ShellCaseCommonTestsMixin(CheckShellBinaryNameAndVersionMixin):
_call_binary_expected_version_ = salt.version.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.version import __version_info__, SaltStackVersion
git = salt.utils.path.which('git')
if not git:
self.skipTest('The git binary is not available')
opts = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': CODE_DIR,
}
if not salt.utils.platform.is_windows():
opts['close_fds'] = True
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*'],
**opts
)
out, err = process.communicate()
if process.returncode != 0:
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
**opts
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: \'{0}\''.format(
salt.utils.stringutils.to_str(err)
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed(\'{0}\') < Expected(\'{1}\')'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
class _FixLoaderModuleMockMixinMroOrder(type):
'''
This metaclass will make sure that LoaderModuleMockMixin will always come as the first
base class in order for LoaderModuleMockMixin.setUp to actually run
'''
def __new__(mcs, cls_name, cls_bases, cls_dict):
if cls_name == 'LoaderModuleMockMixin':
return super(_FixLoaderModuleMockMixinMroOrder, mcs).__new__(mcs, cls_name, cls_bases, cls_dict)
bases = list(cls_bases)
for idx, base in enumerate(bases):
if base.__name__ == 'LoaderModuleMockMixin':
bases.insert(0, bases.pop(idx))
break
# Create the class instance
instance = super(_FixLoaderModuleMockMixinMroOrder, mcs).__new__(mcs, cls_name, tuple(bases), cls_dict)
# Apply our setUp function decorator
instance.setUp = LoaderModuleMockMixin.__setup_loader_modules_mocks__(instance.setUp)
return instance
class LoaderModuleMockMixin(six.with_metaclass(_FixLoaderModuleMockMixinMroOrder, object)):
'''
This class will setup salt loader dunders.
Please check `set_up_loader_mocks` above
'''
# Define our setUp function decorator
@staticmethod
def __setup_loader_modules_mocks__(setup_func):
@functools.wraps(setup_func)
def wrapper(self):
if NO_MOCK:
self.skipTest(NO_MOCK_REASON)
loader_modules_configs = self.setup_loader_modules()
if not isinstance(loader_modules_configs, dict):
raise RuntimeError(
'{}.setup_loader_modules() must return a dictionary where the keys are the '
'modules that require loader mocking setup and the values, the global module '
'variables for each of the module being mocked. For example \'__salt__\', '
'\'__opts__\', etc.'.format(self.__class__.__name__)
)
salt_dunders = (
'__opts__', '__salt__', '__runner__', '__context__', '__utils__',
'__ext_pillar__', '__thorium__', '__states__', '__serializers__', '__ret__',
'__grains__', '__pillar__', '__sdb__',
# Proxy is commented out on purpose since some code in salt expects a NameError
# and is most of the time not a required dunder
# '__proxy__'
)
for module, module_globals in six.iteritems(loader_modules_configs):
if not isinstance(module, types.ModuleType):
raise RuntimeError(
'The dictionary keys returned by {}.setup_loader_modules() '
'must be an imported module, not {}'.format(
self.__class__.__name__,
type(module)
)
)
if not isinstance(module_globals, dict):
raise RuntimeError(
'The dictionary values returned by {}.setup_loader_modules() '
'must be a dictionary, not {}'.format(
self.__class__.__name__,
type(module_globals)
)
)
module_blacklisted_dunders = module_globals.pop('blacklisted_dunders', ())
minion_funcs = {}
if '__salt__' in module_globals and module_globals['__salt__'] == 'autoload':
if '__opts__' not in module_globals:
raise RuntimeError(
'You must provide \'__opts__\' on the {} module globals dictionary '
'to auto load the minion functions'.format(module.__name__)
)
import salt.loader
ctx = {}
if '__utils__' not in module_globals:
utils = salt.loader.utils(module_globals['__opts__'],
context=module_globals.get('__context__') or ctx)
module_globals['__utils__'] = utils
minion_funcs = salt.loader.minion_mods(
module_globals['__opts__'],
context=module_globals.get('__context__') or ctx,
utils=module_globals.get('__utils__'),
)
module_globals['__salt__'] = minion_funcs
for dunder_name in salt_dunders:
if dunder_name not in module_globals:
if dunder_name in module_blacklisted_dunders:
continue
module_globals[dunder_name] = {}
sys_modules = module_globals.pop('sys.modules', None)
if sys_modules is not None:
if not isinstance(sys_modules, dict):
raise RuntimeError(
'\'sys.modules\' must be a dictionary not: {}'.format(
type(sys_modules)
)
)
patcher = patch.dict(sys.modules, sys_modules)
patcher.start()
def cleanup_sys_modules(patcher, sys_modules):
patcher.stop()
del patcher
del sys_modules
self.addCleanup(cleanup_sys_modules, patcher, sys_modules)
for key in module_globals:
if not hasattr(module, key):
if key in salt_dunders:
setattr(module, key, {})
else:
setattr(module, key, None)
if module_globals:
patcher = patch.multiple(module, **module_globals)
patcher.start()
def cleanup_module_globals(patcher, module_globals):
patcher.stop()
del patcher
del module_globals
self.addCleanup(cleanup_module_globals, patcher, module_globals)
if minion_funcs:
# Since we autoloaded the minion_funcs, let's namespace the functions with the globals
# used to patch above
import salt.utils
for func in minion_funcs:
minion_funcs[func] = salt.utils.functools.namespaced_function(
minion_funcs[func],
module_globals,
preserve_context=True
)
return setup_func(self)
return wrapper
def setup_loader_modules(self):
raise NotImplementedError(
'\'{}.setup_loader_modules()\' must be implemented'.format(self.__class__.__name__)
)
class XMLEqualityMixin(object):
def assertEqualXML(self, e1, e2):
if six.PY3 and isinstance(e1, bytes):
e1 = e1.decode('utf-8')
if six.PY3 and isinstance(e2, bytes):
e2 = e2.decode('utf-8')
if isinstance(e1, six.string_types):
e1 = etree.XML(e1)
if isinstance(e2, six.string_types):
e2 = etree.XML(e2)
if e1.tag != e2.tag:
return False
if e1.text != e2.text:
return False
if e1.tail != e2.tail:
return False
if e1.attrib != e2.attrib:
return False
if len(e1) != len(e2):
return False
return all(self.assertEqualXML(c1, c2) for c1, c2 in zip(e1, e2))
class SaltReturnAssertsMixin(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, six.string_types):
# If it's a string, make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
ret_data = []
for part in six.itervalues(ret):
keys = self.__return_valid_keys(keys)
okeys = keys[:]
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
ret_data.append(ret_item)
return ret_data
def assertSaltTrueReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertTrue(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertFalse(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertIsNone(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'comment'):
self.assertIn(in_comment, saltret)
def assertNotInSaltComment(self, not_in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'comment'):
self.assertNotIn(not_in_comment, saltret)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltStateWarning(self, in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'warnings'):
self.assertIn(in_comment, saltret)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'warnings'):
self.assertNotIn(not_in_comment, saltret)
def assertInSaltReturn(self, item_to_check, ret, keys):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertIn(item_to_check, saltret)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertNotIn(item_to_check, saltret)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertRegex(saltret, pattern)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertEqual(saltret, comparison)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertNotEqual(saltret, comparison)
def _fetch_events(q):
'''
Collect events and store them
'''
def _clean_queue():
print('Cleaning queue!')
while not q.empty():
queue_item = q.get()
queue_item.task_done()
atexit.register(_clean_queue)
a_config = AdaptedConfigurationTestCaseMixin()
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
while True:
try:
events = event.get_event(full=False)
except Exception:
# This is broad but we'll see all kinds of issues right now
# if we drop the proc out from under the socket while we're reading
pass
q.put(events)
class SaltMinionEventAssertsMixin(object):
'''
Asserts to verify that a given event was seen
'''
def __new__(cls, *args, **kwargs):
# We have to cross-call to re-gen a config
cls.q = multiprocessing.Queue()
cls.fetch_proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=_fetch_events, args=(cls.q,)
)
cls.fetch_proc.start()
return object.__new__(cls)
def __exit__(self, *args, **kwargs):
self.fetch_proc.join()
def assertMinionEventFired(self, tag):
#TODO
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')
def assertMinionEventReceived(self, desired_event):
queue_wait = 5 # 2.5s
while self.q.empty():
time.sleep(0.5) # Wait for events to be pushed into the queue
queue_wait -= 1
if queue_wait <= 0:
raise AssertionError('Queue wait timer expired')
while not self.q.empty(): # This is not thread-safe and may be inaccurate
event = self.q.get()
if isinstance(event, dict):
event.pop('_stamp')
if desired_event == event:
self.fetch_proc.terminate()
return True
self.fetch_proc.terminate()
raise AssertionError('Event {0} was not received by minion'.format(desired_event))
|
the-stack_0_2923 | import random
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.runner.checkpoint import _load_checkpoint_with_prefix
from mmgen.core.runners.fp16_utils import auto_fp16
from mmgen.models.architectures import PixelNorm
from mmgen.models.architectures.common import get_module_device
from mmgen.models.architectures.stylegan.generator_discriminator_v2 import (
StyleGAN2Discriminator, StyleGANv2Generator)
from mmgen.models.architectures.stylegan.modules.styleganv2_modules import (
ConstantInput, ConvDownLayer, EqualLinearActModule, ModMBStddevLayer,
ModulatedStyleConv)
from mmgen.models.builder import MODULES
from .modules.swagan_modules import (ConvBlock, HaarTransform,
InverseHaarTransform, ModulatedFromRGB,
ModulatedToRGB)
@MODULES.register_module()
class SwaganGenerator(StyleGANv2Generator):
r"""StyleGAN2 Generator.
In StyleGAN2, we use a static architecture composing of a style mapping
module and number of convolutional style blocks. More details can be found
in: Analyzing and Improving the Image Quality of StyleGAN CVPR2020.
You can load pretrained model through passing information into
``pretrained`` argument. We have already offered official weights as
follows:
- stylegan2-ffhq-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-ffhq-config-f-official_20210327_171224-bce9310c.pth # noqa
- stylegan2-horse-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-horse-config-f-official_20210327_173203-ef3e69ca.pth # noqa
- stylegan2-car-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-car-config-f-official_20210327_172340-8cfe053c.pth # noqa
- stylegan2-cat-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-cat-config-f-official_20210327_172444-15bc485b.pth # noqa
- stylegan2-church-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-church-config-f-official_20210327_172657-1d42b7d1.pth # noqa
If you want to load the ema model, you can just use following codes:
.. code-block:: python
# ckpt_http is one of the valid path from http source
generator = StyleGANv2Generator(1024, 512,
pretrained=dict(
ckpt_path=ckpt_http,
prefix='generator_ema'))
Of course, you can also download the checkpoint in advance and set
``ckpt_path`` with local path. If you just want to load the original
generator (not the ema model), please set the prefix with 'generator'.
Note that our implementation allows to generate BGR image, while the
original StyleGAN2 outputs RGB images by default. Thus, we provide
``bgr2rgb`` argument to convert the image space.
Args:
out_size (int): The output size of the StyleGAN2 generator.
style_channels (int): The number of channels for style code.
num_mlps (int, optional): The number of MLP layers. Defaults to 8.
channel_multiplier (int, optional): The multiplier factor for the
channel number. Defaults to 2.
blur_kernel (list, optional): The blurry kernel. Defaults
to [1, 3, 3, 1].
lr_mlp (float, optional): The learning rate for the style mapping
layer. Defaults to 0.01.
default_style_mode (str, optional): The default mode of style mixing.
In training, we defaultly adopt mixing style mode. However, in the
evaluation, we use 'single' style mode. `['mix', 'single']` are
currently supported. Defaults to 'mix'.
eval_style_mode (str, optional): The evaluation mode of style mixing.
Defaults to 'single'.
mix_prob (float, optional): Mixing probability. The value should be
in range of [0, 1]. Defaults to ``0.9``.
num_fp16_scales (int, optional): The number of resolutions to use auto
fp16 training. Different from ``fp16_enabled``, this argument
allows users to adopt FP16 training only in several blocks.
This behaviour is much more similar to the offical implementation
by Tero. Defaults to 0.
fp16_enabled (bool, optional): Whether to use fp16 training in this
module. If this flag is `True`, the whole module will be wrapped
with ``auto_fp16``. Defaults to False.
pretrained (dict | None, optional): Information for pretained models.
The necessary key is 'ckpt_path'. Besides, you can also provide
'prefix' to load the generator part from the whole state dict.
Defaults to None.
"""
def __init__(self,
out_size,
style_channels,
num_mlps=8,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
default_style_mode='mix',
eval_style_mode='single',
mix_prob=0.9,
num_fp16_scales=0,
fp16_enabled=False,
pretrained=None):
nn.Module.__init__(self)
self.out_size = out_size
self.style_channels = style_channels
self.num_mlps = num_mlps
self.channel_multiplier = channel_multiplier
self.lr_mlp = lr_mlp
self._default_style_mode = default_style_mode
self.default_style_mode = default_style_mode
self.eval_style_mode = eval_style_mode
self.mix_prob = mix_prob
self.num_fp16_scales = num_fp16_scales
self.fp16_enabled = fp16_enabled
# define style mapping layers
mapping_layers = [PixelNorm()]
for _ in range(num_mlps):
mapping_layers.append(
EqualLinearActModule(
style_channels,
style_channels,
equalized_lr_cfg=dict(lr_mul=lr_mlp, gain=1.),
act_cfg=dict(type='fused_bias')))
self.style_mapping = nn.Sequential(*mapping_layers)
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
# constant input layer
self.constant_input = ConstantInput(self.channels[4])
# 4x4 stage
self.conv1 = ModulatedStyleConv(
self.channels[4],
self.channels[4],
kernel_size=3,
style_channels=style_channels,
blur_kernel=blur_kernel)
self.to_rgb1 = ModulatedToRGB(
self.channels[4],
style_channels,
upsample=False,
fp16_enabled=fp16_enabled)
# generator backbone (8x8 --> higher resolutions)
self.log_size = int(np.log2(self.out_size)) - 1
self.convs = nn.ModuleList()
self.upsamples = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
in_channels_ = self.channels[4]
for i in range(3, self.log_size + 1):
out_channels_ = self.channels[2**i]
# If `fp16_enabled` is True, all of layers will be run in auto
# FP16. In the case of `num_fp16_sacles` > 0, only partial
# layers will be run in fp16.
_use_fp16 = (self.log_size - i) < num_fp16_scales or fp16_enabled
self.convs.append(
ModulatedStyleConv(
in_channels_,
out_channels_,
3,
style_channels,
upsample=True,
blur_kernel=blur_kernel,
fp16_enabled=_use_fp16))
self.convs.append(
ModulatedStyleConv(
out_channels_,
out_channels_,
3,
style_channels,
upsample=False,
blur_kernel=blur_kernel,
fp16_enabled=_use_fp16))
self.to_rgbs.append(
ModulatedToRGB(
out_channels_,
style_channels,
upsample=True,
fp16_enabled=_use_fp16)) # set to global fp16
in_channels_ = out_channels_
self.num_latents = self.log_size * 2 - 2
self.num_injected_noises = self.num_latents - 1
self.iwt = InverseHaarTransform()
# register buffer for injected noises
for layer_idx in range(self.num_injected_noises):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
self.register_buffer(f'injected_noise_{layer_idx}',
torch.randn(*shape))
if pretrained is not None:
self._load_pretrained_model(**pretrained)
@auto_fp16()
def forward(self,
styles,
num_batches=-1,
return_noise=False,
return_latents=False,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
injected_noise=None,
randomize_noise=True):
"""Forward function.
This function has been integrated with the truncation trick. Please
refer to the usage of `truncation` and `truncation_latent`.
Args:
styles (torch.Tensor | list[torch.Tensor] | callable | None): In
StyleGAN2, you can provide noise tensor or latent tensor. Given
a list containing more than one noise or latent tensors, style
mixing trick will be used in training. Of course, You can
directly give a batch of noise through a ``torch.Tensor`` or
offer a callable function to sample a batch of noise data.
Otherwise, the ``None`` indicates to use the default noise
sampler.
num_batches (int, optional): The number of batch size.
Defaults to 0.
return_noise (bool, optional): If True, ``noise_batch`` will be
returned in a dict with ``fake_img``. Defaults to False.
return_latents (bool, optional): If True, ``latent`` will be
returned in a dict with ``fake_img``. Defaults to False.
inject_index (int | None, optional): The index number for mixing
style codes. Defaults to None.
truncation (float, optional): Truncation factor. Give value less
than 1., the truncation trick will be adopted. Defaults to 1.
truncation_latent (torch.Tensor, optional): Mean truncation latent.
Defaults to None.
input_is_latent (bool, optional): If `True`, the input tensor is
the latent tensor. Defaults to False.
injected_noise (torch.Tensor | None, optional): Given a tensor, the
random noise will be fixed as this input injected noise.
Defaults to None.
randomize_noise (bool, optional): If `False`, images are sampled
with the buffered noise tensor injected to the style conv
block. Defaults to True.
Returns:
torch.Tensor | dict: Generated image tensor or dictionary \
containing more data.
"""
# receive noise and conduct sanity check.
if isinstance(styles, torch.Tensor):
assert styles.shape[1] == self.style_channels
styles = [styles]
elif mmcv.is_seq_of(styles, torch.Tensor):
for t in styles:
assert t.shape[-1] == self.style_channels
# receive a noise generator and sample noise.
elif callable(styles):
device = get_module_device(self)
noise_generator = styles
assert num_batches > 0
if self.default_style_mode == 'mix' and random.random(
) < self.mix_prob:
styles = [
noise_generator((num_batches, self.style_channels))
for _ in range(2)
]
else:
styles = [noise_generator((num_batches, self.style_channels))]
styles = [s.to(device) for s in styles]
# otherwise, we will adopt default noise sampler.
else:
device = get_module_device(self)
assert num_batches > 0 and not input_is_latent
if self.default_style_mode == 'mix' and random.random(
) < self.mix_prob:
styles = [
torch.randn((num_batches, self.style_channels))
for _ in range(2)
]
else:
styles = [torch.randn((num_batches, self.style_channels))]
styles = [s.to(device) for s in styles]
if not input_is_latent:
noise_batch = styles
styles = [self.style_mapping(s) for s in styles]
else:
noise_batch = None
if injected_noise is None:
if randomize_noise:
injected_noise = [None] * self.num_injected_noises
else:
injected_noise = [
getattr(self, f'injected_noise_{i}')
for i in range(self.num_injected_noises)
]
# use truncation trick
if truncation < 1:
style_t = []
# calculate truncation latent on the fly
if truncation_latent is None and not hasattr(
self, 'truncation_latent'):
self.truncation_latent = self.get_mean_latent()
truncation_latent = self.truncation_latent
elif truncation_latent is None and hasattr(self,
'truncation_latent'):
truncation_latent = self.truncation_latent
for style in styles:
style_t.append(truncation_latent + truncation *
(style - truncation_latent))
styles = style_t
# no style mixing
if len(styles) < 2:
inject_index = self.num_latents
if styles[0].ndim < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
# style mixing
else:
if inject_index is None:
inject_index = random.randint(1, self.num_latents - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(
1, self.num_latents - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
# 4x4 stage
out = self.constant_input(latent)
out = self.conv1(out, latent[:, 0], noise=injected_noise[0])
skip = self.to_rgb1(out, latent[:, 1])
_index = 1
# 8x8 ---> higher resolutions
for up_conv, conv, noise1, noise2, to_rgb in zip(
self.convs[::2], self.convs[1::2], injected_noise[1::2],
injected_noise[2::2], self.to_rgbs):
out = up_conv(out, latent[:, _index], noise=noise1)
out = conv(out, latent[:, _index + 1], noise=noise2)
skip = to_rgb(out, latent[:, _index + 2], skip)
_index += 2
img = self.iwt(skip)
# make sure the output image is torch.float32 to avoid RunTime Error
# in other modules
img = img.to(torch.float32)
if return_latents or return_noise:
output_dict = dict(
fake_img=img,
latent=latent,
inject_index=inject_index,
noise_batch=noise_batch)
return output_dict
return img
@MODULES.register_module()
class SwaganDiscriminator(StyleGAN2Discriminator):
"""StyleGAN2 Discriminator.
The architecture of this discriminator is proposed in StyleGAN2. More
details can be found in: Analyzing and Improving the Image Quality of
StyleGAN CVPR2020.
You can load pretrained model through passing information into
``pretrained`` argument. We have already offered official weights as
follows:
- stylegan2-ffhq-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-ffhq-config-f-official_20210327_171224-bce9310c.pth # noqa
- stylegan2-horse-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-horse-config-f-official_20210327_173203-ef3e69ca.pth # noqa
- stylegan2-car-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-car-config-f-official_20210327_172340-8cfe053c.pth # noqa
- stylegan2-cat-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-cat-config-f-official_20210327_172444-15bc485b.pth # noqa
- stylegan2-church-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-church-config-f-official_20210327_172657-1d42b7d1.pth # noqa
If you want to load the ema model, you can just use following codes:
.. code-block:: python
# ckpt_http is one of the valid path from http source
discriminator = StyleGAN2Discriminator(1024, 512,
pretrained=dict(
ckpt_path=ckpt_http,
prefix='discriminator'))
Of course, you can also download the checkpoint in advance and set
``ckpt_path`` with local path.
Note that our implementation adopts BGR image as input, while the
original StyleGAN2 provides RGB images to the discriminator. Thus, we
provide ``bgr2rgb`` argument to convert the image space. If your images
follow the RGB order, please set it to ``True`` accordingly.
Args:
in_size (int): The input size of images.
channel_multiplier (int, optional): The multiplier factor for the
channel number. Defaults to 2.
blur_kernel (list, optional): The blurry kernel. Defaults
to [1, 3, 3, 1].
mbstd_cfg (dict, optional): Configs for minibatch-stddev layer.
Defaults to dict(group_size=4, channel_groups=1).
num_fp16_scales (int, optional): The number of resolutions to use auto
fp16 training. Defaults to 0.
fp16_enabled (bool, optional): Whether to use fp16 training in this
module. Defaults to False.
out_fp32 (bool, optional): Whether to convert the output feature map to
`torch.float32`. Defaults to `True`.
convert_input_fp32 (bool, optional): Whether to convert input type to
fp32 if not `fp16_enabled`. This argument is designed to deal with
the cases where some modules are run in FP16 and others in FP32.
Defaults to True.
pretrained (dict | None, optional): Information for pretained models.
The necessary key is 'ckpt_path'. Besides, you can also provide
'prefix' to load the generator part from the whole state dict.
Defaults to None.
"""
def __init__(self,
in_size,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
mbstd_cfg=dict(group_size=4, channel_groups=1),
num_fp16_scales=0,
fp16_enabled=False,
out_fp32=True,
convert_input_fp32=True,
pretrained=None):
nn.Module.__init__(self)
self.num_fp16_scale = num_fp16_scales
self.fp16_enabled = fp16_enabled
self.convert_input_fp32 = convert_input_fp32
self.out_fp32 = out_fp32
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
log_size = int(np.log2(in_size)) - 1
in_channels = channels[in_size]
_use_fp16 = num_fp16_scales > 0
from_rgbs = []
convs = []
for i in range(log_size, 2, -1):
out_channel = channels[2**(i - 1)]
# add fp16 training for higher resolutions
_use_fp16 = (log_size - i) < num_fp16_scales or fp16_enabled
from_rgbs.append(
ModulatedFromRGB(
in_channels,
downsample=i != log_size,
fp16_enabled=_use_fp16,
convert_input_fp32=convert_input_fp32))
convs.append(
ConvBlock(
in_channels,
out_channel,
blur_kernel,
fp16_enabled=_use_fp16,
convert_input_fp32=convert_input_fp32))
in_channels = out_channel
from_rgbs.append(
ModulatedFromRGB(
channels[4],
downsample=True,
fp16_enabled=_use_fp16,
convert_input_fp32=convert_input_fp32))
self.from_rgbs = nn.Sequential(*from_rgbs)
self.convs = nn.Sequential(*convs)
self.mbstd_layer = ModMBStddevLayer(**mbstd_cfg)
self.final_conv = ConvDownLayer(in_channels + 1, channels[4], 3)
self.final_linear = nn.Sequential(
EqualLinearActModule(
channels[4] * 4 * 4,
channels[4],
act_cfg=dict(type='fused_bias')),
EqualLinearActModule(channels[4], 1),
)
self.dwt = HaarTransform()
if pretrained is not None:
self._load_pretrained_model(**pretrained)
def _load_pretrained_model(self,
ckpt_path,
prefix='',
map_location='cpu',
strict=True):
state_dict = _load_checkpoint_with_prefix(prefix, ckpt_path,
map_location)
self.load_state_dict(state_dict, strict=strict)
mmcv.print_log(f'Load pretrained model from {ckpt_path}', 'mmgen')
@auto_fp16()
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor): Input image tensor.
Returns:
torch.Tensor: Predict score for the input image.
"""
x = self.dwt(x)
out = None
for from_rgb, conv in zip(self.from_rgbs, self.convs):
x, out = from_rgb(x, out)
out = conv(out)
_, out = self.from_rgbs[-1](x, out)
x = self.mbstd_layer(out)
if not self.final_conv.fp16_enabled and self.convert_input_fp32:
x = x.to(torch.float32)
x = self.final_conv(x)
x = x.view(x.shape[0], -1)
x = self.final_linear(x)
return x
|
the-stack_0_2924 | import hashlib
import os
from shutil import move
from tempfile import mkstemp
BLOCKSIZE = 65535
def find_hash(hash_file, plan_name):
# Try to find the hash in the hash file
filename = os.path.normpath(hash_file)
if os.path.isfile(filename):
plan_hashes = open(filename, 'r').readlines()
for line in plan_hashes:
parts = line.strip().split('=')
if len(parts) == 2 and parts[0] == plan_name:
return parts[1]
return None
def update_hash(hash_file, plan_name, hash_value):
# Do the update (create the file if it doesn't exist)
filename = os.path.normpath(hash_file)
# If it doesn't exist, we shortcut this
if not os.path.isfile(hash_file):
with open(hash_file, 'w') as new_file:
new_file.write('%s=%s\n' % (plan_name, hash_value))
return
# Otherwise, we need to rebuild the file
fh, abs_path = mkstemp()
is_written = False
with open(abs_path, 'w') as new_file:
with open(filename, 'r') as old_file:
# Handle existing entries in the file
for line in old_file:
parts = line.strip().split('=')
if parts[0] == plan_name:
is_written = True
new_file.write('%s=%s\n' % (plan_name, hash_value))
else:
new_file.write(line)
# If the hash wasn't already in the file
if not is_written:
new_file.write('%s=%s\n' % (plan_name, hash_value))
os.close(fh)
# Remove original file
os.remove(hash_file)
# Move new file
move(abs_path, hash_file)
def calc_hash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
|
the-stack_0_2926 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for input/output types in KFP SDK.
These are only compatible with v2 Pipelines.
"""
import os
from typing import Dict, Generic, List, Optional, Type, TypeVar, Union
_GCS_LOCAL_MOUNT_PREFIX = '/gcs/'
class Artifact(object):
"""Generic Artifact class.
This class is meant to represent the metadata around an input or output
machine-learning Artifact. Artifacts have URIs, which can either be a location
on disk (or Cloud storage) or some other resource identifier such as
an API resource name.
Artifacts carry a `metadata` field, which is a dictionary for storing
metadata related to this artifact.
"""
TYPE_NAME = 'system.Artifact'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
"""Initializes the Artifact with the given name, URI and metadata."""
self.uri = uri or ''
self.name = name or ''
self.metadata = metadata or {}
@property
def path(self):
return self._get_path()
@path.setter
def path(self, path):
self._set_path(path)
def _get_path(self) -> str:
if self.uri.startswith('gs://'):
return _GCS_LOCAL_MOUNT_PREFIX + self.uri[len('gs://'):]
def _set_path(self, path):
if path.startswith(_GCS_LOCAL_MOUNT_PREFIX):
path = 'gs://' + path[len(_GCS_LOCAL_MOUNT_PREFIX):]
self.uri = path
class Model(Artifact):
"""An artifact representing an ML Model."""
TYPE_NAME = 'system.Model'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
@property
def framework(self) -> str:
return self._get_framework()
def _get_framework(self) -> str:
return self.metadata.get('framework', '')
@framework.setter
def framework(self, framework: str):
self._set_framework(framework)
def _set_framework(self, framework: str):
self.metadata['framework'] = framework
class Dataset(Artifact):
"""An artifact representing an ML Dataset."""
TYPE_NAME = 'system.Dataset'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
class Metrics(Artifact):
"""Represent a simple base Artifact type to store key-value scalar metrics."""
TYPE_NAME = 'system.Metrics'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
def log_metric(self, metric: str, value: float):
"""Sets a custom scalar metric.
Args:
metric: Metric key
value: Value of the metric.
"""
self.metadata[metric] = value
class ClassificationMetrics(Artifact):
"""Represents Artifact class to store Classification Metrics."""
TYPE_NAME = 'system.ClassificationMetrics'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
def log_roc_data_point(self, fpr: float, tpr: float, threshold: float):
"""Logs a single data point in the ROC Curve.
Args:
fpr: False positive rate value of the data point.
tpr: True positive rate value of the data point.
threshold: Threshold value for the data point.
"""
roc_reading = {
'confidenceThreshold': threshold,
'recall': tpr,
'falsePositiveRate': fpr
}
if 'confidenceMetrics' not in self.metadata.keys():
self.metadata['confidenceMetrics'] = []
self.metadata['confidenceMetrics'].append(roc_reading)
def log_roc_curve(self, fpr: List[float], tpr: List[float],
threshold: List[float]):
"""Logs an ROC curve.
The list length of fpr, tpr and threshold must be the same.
Args:
fpr: List of false positive rate values.
tpr: List of true positive rate values.
threshold: List of threshold values.
"""
if len(fpr) != len(tpr) or len(fpr) != len(threshold) or len(tpr) != len(
threshold):
raise ValueError('Length of fpr, tpr and threshold must be the same. '
'Got lengths {}, {} and {} respectively.'.format(
len(fpr), len(tpr), len(threshold)))
for i in range(len(fpr)):
self.log_roc_data_point(fpr=fpr[i], tpr=tpr[i], threshold=threshold[i])
def set_confusion_matrix_categories(self, categories: List[str]):
"""Stores confusion matrix categories.
Args:
categories: List of strings specifying the categories.
"""
self._categories = []
annotation_specs = []
for category in categories:
annotation_spec = {'displayName': category}
self._categories.append(category)
annotation_specs.append(annotation_spec)
self._matrix = []
for row in range(len(self._categories)):
self._matrix.append({'row': [0] * len(self._categories)})
self._confusion_matrix = {}
self._confusion_matrix['annotationSpecs'] = annotation_specs
self._confusion_matrix['rows'] = self._matrix
self.metadata['confusionMatrix'] = self._confusion_matrix
def log_confusion_matrix_row(self, row_category: str, row: List[float]):
"""Logs a confusion matrix row.
Args:
row_category: Category to which the row belongs.
row: List of integers specifying the values for the row.
Raises:
ValueError: If row_category is not in the list of categories
set in set_categories call.
"""
if row_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
if len(row) != len(self._categories):
raise ValueError('Invalid row. Expected size: {} got: {}'.\
format(len(self._categories), len(row)))
self._matrix[self._categories.index(row_category)] = {'row': row}
self.metadata['confusionMatrix'] = self._confusion_matrix
def log_confusion_matrix_cell(self, row_category: str, col_category: str,
value: int):
"""Logs a cell in the confusion matrix.
Args:
row_category: String representing the name of the row category.
col_category: String representing the name of the column category.
value: Int value of the cell.
Raises:
ValueError: If row_category or col_category is not in the list of
categories set in set_categories.
"""
if row_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
if col_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
self._matrix[self._categories.index(row_category)]['row'][
self._categories.index(col_category)] = value
self.metadata['confusionMatrix'] = self._confusion_matrix
def log_confusion_matrix(self, categories: List[str],
matrix: List[List[int]]):
"""Logs a confusion matrix.
Args:
categories: List of the category names.
matrix: Complete confusion matrix.
Raises:
ValueError: Length of categories does not match number of rows or columns.
"""
self.set_confusion_matrix_categories(categories)
if len(matrix) != len(categories):
raise ValueError('Invalid matrix: {} passed for categories: {}'.\
format(matrix, categories))
for index in range(len(categories)):
if len(matrix[index]) != len(categories):
raise ValueError('Invalid matrix: {} passed for categories: {}'.\
format(matrix, categories))
self.log_confusion_matrix_row(categories[index], matrix[index])
self.metadata['confusionMatrix'] = self._confusion_matrix
class SlicedClassificationMetrics(Artifact):
"""Metrics class representing Sliced Classification Metrics.
Similar to ClassificationMetrics clients using this class are expected to use
log methods of the class to log metrics with the difference being each log
method takes a slice to associate the ClassificationMetrics.
"""
TYPE_NAME = 'system.SlicedClassificationMetrics'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
def _upsert_classification_metrics_for_slice(self, slice: str):
"""Upserts the classification metrics instance for a slice."""
if slice not in self._sliced_metrics:
self._sliced_metrics[slice] = ClassificationMetrics()
def _update_metadata(self, slice: str):
"""Updates metadata to adhere to the metrics schema."""
self.metadata = {}
self.metadata['evaluationSlices'] = []
for slice in self._sliced_metrics.keys():
slice_metrics = {
'slice': slice,
'sliceClassificationMetrics': self._sliced_metrics[slice].metadata
}
self.metadata['evaluationSlices'].append(slice_metrics)
def log_roc_reading(self, slice: str, threshold: float, tpr: float,
fpr: float):
"""Logs a single data point in the ROC Curve of a slice.
Args:
slice: String representing slice label.
threshold: Thresold value for the data point.
tpr: True positive rate value of the data point.
fpr: False positive rate value of the data point.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].log_roc_reading(threshold, tpr, fpr)
self._update_metadata(slice)
def load_roc_readings(self, slice: str, readings: List[List[float]]):
"""Supports bulk loading ROC Curve readings for a slice.
Args:
slice: String representing slice label.
readings: A 2-D list providing ROC Curve data points.
The expected order of the data points is: threshold,
true_positive_rate, false_positive_rate.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].load_roc_readings(readings)
self._update_metadata(slice)
def set_confusion_matrix_categories(self, slice: str, categories: List[str]):
"""Stores confusion matrix categories for a slice..
Categories are stored in the internal metrics_utils.ConfusionMatrix
instance of the slice.
Args:
slice: String representing slice label.
categories: List of strings specifying the categories.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].set_confusion_matrix_categories(categories)
self._update_metadata(slice)
def log_confusion_matrix_row(self, slice: str, row_category: str,
row: List[int]):
"""Logs a confusion matrix row for a slice.
Row is updated on the internal metrics_utils.ConfusionMatrix
instance of the slice.
Args:
slice: String representing slice label.
row_category: Category to which the row belongs.
row: List of integers specifying the values for the row.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].log_confusion_matrix_row(row_category, row)
self._update_metadata(slice)
def log_confusion_matrix_cell(self, slice: str, row_category: str,
col_category: str, value: int):
"""Logs a confusion matrix cell for a slice..
Cell is updated on the internal metrics_utils.ConfusionMatrix
instance of the slice.
Args:
slice: String representing slice label.
row_category: String representing the name of the row category.
col_category: String representing the name of the column category.
value: Int value of the cell.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].log_confusion_matrix_cell(
row_category, col_category, value)
self._update_metadata(slice)
def load_confusion_matrix(self, slice: str, categories: List[str],
matrix: List[List[int]]):
"""Supports bulk loading the whole confusion matrix for a slice.
Args:
slice: String representing slice label.
categories: List of the category names.
matrix: Complete confusion matrix.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].log_confusion_matrix_cell(categories, matrix)
self._update_metadata(slice)
T = TypeVar('T')
class InputAnnotation():
"""Marker type for input artifacts."""
pass
class OutputAnnotation():
"""Marker type for output artifacts."""
pass
# TODO: Use typing.Annotated instead of this hack.
# With typing.Annotated (Python 3.9+ or typing_extensions package), the
# following would look like:
# Input = typing.Annotated[T, InputAnnotation]
# Output = typing.Annotated[T, OutputAnnotation]
# Input represents an Input artifact of type T.
Input = Union[T, InputAnnotation]
# Output represents an Output artifact of type T.
Output = Union[T, OutputAnnotation]
def is_artifact_annotation(typ) -> bool:
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
return len(subs_tree) == 3 and subs_tree[0] == Union and subs_tree[2] in [InputAnnotation, OutputAnnotation]
if not hasattr(typ, '__origin__'):
return False
if typ.__origin__ != Union and type(typ.__origin__) != type(Union):
return False
if not hasattr(typ, '__args__') or len(typ.__args__) != 2:
return False
if typ.__args__[1] not in [InputAnnotation, OutputAnnotation]:
return False
return True
def is_input_artifact(typ) -> bool:
"""Returns True if typ is of type Input[T]."""
if not is_artifact_annotation(typ):
return False
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
return len(subs_tree) == 3 and subs_tree[2] == InputAnnotation
return typ.__args__[1] == InputAnnotation
def is_output_artifact(typ) -> bool:
"""Returns True if typ is of type Output[T]."""
if not is_artifact_annotation(typ):
return False
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
return len(subs_tree) == 3 and subs_tree[2] == OutputAnnotation
return typ.__args__[1] == OutputAnnotation
def get_io_artifact_class(typ):
if not is_artifact_annotation(typ):
return None
if typ == Input or typ == Output:
return None
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
if len(subs_tree) != 3:
return None
return subs_tree[1]
return typ.__args__[0]
def get_io_artifact_annotation(typ):
if not is_artifact_annotation(typ):
return None
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
if len(subs_tree) != 3:
return None
return subs_tree[2]
return typ.__args__[1]
_SCHEMA_TITLE_TO_TYPE: Dict[str, Artifact] = {
x.TYPE_NAME: x
for x in [Artifact, Model, Dataset, Metrics, ClassificationMetrics]
}
def create_runtime_artifact(runtime_artifact: Dict) -> Artifact:
"""Creates an Artifact instance from the specified RuntimeArtifact.
Args:
runtime_artifact: Dictionary representing JSON-encoded RuntimeArtifact.
"""
schema_title = runtime_artifact.get('type', {}).get('schemaTitle', '')
artifact_type = _SCHEMA_TITLE_TO_TYPE.get(schema_title)
if not artifact_type:
artifact_type = Artifact
return artifact_type(
uri=runtime_artifact.get('uri', ''),
name=runtime_artifact.get('name', ''),
metadata=runtime_artifact.get('metadata', {}),
)
|
the-stack_0_2928 | #!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Hans Baier <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
# https://www.aliexpress.com/item/1000006630084.html
import os
import argparse
from migen import *
from litex_boards.platforms import qmtech_xc7a35t
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.video import VideoVGAPHY
from litex.soc.cores.led import LedChaser
from litedram.modules import MT41J128M16
from litedram.phy import s7ddrphy
from liteeth.phy.mii import LiteEthPHYMII
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq, with_ethernet, with_vga):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_idelay = ClockDomain()
self.clock_domains.cd_eth = ClockDomain()
if with_ethernet:
self.clock_domains.cd_eth = ClockDomain()
if with_vga:
self.clock_domains.cd_vga = ClockDomain(reset_less=True)
# # #
self.submodules.pll = pll = S7PLL(speedgrade=-1)
try:
reset_button = platform.request("cpu_reset")
self.comb += pll.reset.eq(~reset_button | self.rst)
except:
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(platform.request("clk50"), 50e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
pll.create_clkout(self.cd_sys4x_dqs, 4*sys_clk_freq, phase=90)
pll.create_clkout(self.cd_idelay, 200e6)
if with_ethernet:
pll.create_clkout(self.cd_eth, 25e6)
if with_vga:
pll.create_clkout(self.cd_vga, 40e6)
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_idelay)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
mem_map = {**SoCCore.mem_map, **{"spiflash": 0x80000000}}
def __init__(self, toolchain="vivado", sys_clk_freq=int(100e6), with_daughterboard=False,
with_ethernet=False, with_etherbone=False, eth_ip="192.168.1.50", eth_dynamic_ip=False,
with_led_chaser=True, with_video_terminal=False, with_video_framebuffer=False,
ident_version=True, with_jtagbone=True, with_spi_flash=False, **kwargs):
platform = qmtech_xc7a35t.Platform(toolchain=toolchain, with_daughterboard=with_daughterboard)
# SoCCore ----------------------------------------------------------------------------------
if kwargs["uart_name"] == "serial":
kwargs["uart_name"] = "jtag_uart"
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on QMTech XC7A35T" + (" + Daughterboard" if with_daughterboard else ""),
ident_version = ident_version,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq, with_ethernet or with_etherbone, with_video_terminal or with_video_framebuffer)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.A7DDRPHY(platform.request("ddram"),
memtype = "DDR3",
nphases = 4,
sys_clk_freq = sys_clk_freq)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT41J128M16(sys_clk_freq, "1:4"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Ethernet / Etherbone ---------------------------------------------------------------------
if with_ethernet or with_etherbone:
self.submodules.ethphy = LiteEthPHYMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
if with_ethernet:
self.add_ethernet(phy=self.ethphy, dynamic_ip=eth_dynamic_ip)
if with_etherbone:
self.add_etherbone(phy=self.ethphy, ip_address=eth_ip)
# The daughterboard has the tx clock wired to a non-clock pin, so we can't help it
self.platform.add_platform_command("set_property CLOCK_DEDICATED_ROUTE FALSE [get_nets eth_clocks_tx_IBUF]")
# Jtagbone ---------------------------------------------------------------------------------
if with_jtagbone:
self.add_jtagbone()
# SPI Flash --------------------------------------------------------------------------------
if with_spi_flash:
from litespi.modules import MT25QL128
from litespi.opcodes import SpiNorFlashOpCodes as Codes
self.add_spi_flash(mode="4x", module=MT25QL128(Codes.READ_1_1_1), with_master=True)
# Video ------------------------------------------------------------------------------------
if with_video_terminal or with_video_framebuffer:
self.submodules.videophy = VideoVGAPHY(platform.request("vga"), clock_domain="vga")
if with_video_terminal:
self.add_video_terminal(phy=self.videophy, timings="800x600@60Hz", clock_domain="vga")
if with_video_framebuffer:
self.add_video_framebuffer(phy=self.videophy, timings="800x600@60Hz", clock_domain="vga")
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
if not with_daughterboard and kwargs["uart_name"] == "serial":
kwargs["uart_name"] = "jtag_serial"
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on QMTech XC7A35T")
parser.add_argument("--toolchain", default="vivado", help="Toolchain use to build (default: vivado)")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--sys-clk-freq", default=100e6, help="System clock frequency (default: 100MHz)")
parser.add_argument("--with-daughterboard", action="store_true", help="Whether the core board is plugged into the QMTech daughterboard")
ethopts = parser.add_mutually_exclusive_group()
ethopts.add_argument("--with-ethernet", action="store_true", help="Enable Ethernet support")
ethopts.add_argument("--with-etherbone", action="store_true", help="Enable Etherbone support")
parser.add_argument("--eth-ip", default="192.168.1.50", type=str, help="Ethernet/Etherbone IP address")
parser.add_argument("--eth-dynamic-ip", action="store_true", help="Enable dynamic Ethernet IP addresses setting")
sdopts = parser.add_mutually_exclusive_group()
sdopts.add_argument("--with-spi-sdcard", action="store_true", help="Enable SPI-mode SDCard support")
sdopts.add_argument("--with-sdcard", action="store_true", help="Enable SDCard support")
parser.add_argument("--no-ident-version", action="store_false", help="Disable build time output")
parser.add_argument("--with-jtagbone", action="store_true", help="Enable Jtagbone support")
parser.add_argument("--with-spi-flash", action="store_true", help="Enable SPI Flash (MMAPed)")
viopts = parser.add_mutually_exclusive_group()
viopts.add_argument("--with-video-terminal", action="store_true", help="Enable Video Terminal (VGA)")
viopts.add_argument("--with-video-framebuffer", action="store_true", help="Enable Video Framebuffer (VGA)")
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
args = parser.parse_args()
soc = BaseSoC(
toolchain = args.toolchain,
sys_clk_freq = int(float(args.sys_clk_freq)),
with_daughterboard = args.with_daughterboard,
with_ethernet = args.with_ethernet,
with_etherbone = args.with_etherbone,
eth_ip = args.eth_ip,
eth_dynamic_ip = args.eth_dynamic_ip,
ident_version = args.no_ident_version,
with_jtagbone = args.with_jtagbone,
with_spi_flash = args.with_spi_flash,
with_video_terminal = args.with_video_terminal,
with_video_framebuffer = args.with_video_framebuffer,
**soc_core_argdict(args)
)
if args.with_spi_sdcard:
soc.add_spi_sdcard()
if args.with_sdcard:
soc.add_sdcard()
builder = Builder(soc, **builder_argdict(args))
builder_kwargs = vivado_build_argdict(args) if args.toolchain == "vivado" else {}
builder.build(**builder_kwargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
|
the-stack_0_2929 | import os
import cv2
from PIL import Image
import numpy as np
import pickle
base = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(base, "images")
face_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml')
# LBPH is a data algorithm that is used mostly for face recignition, there is a lot other than that, like AdaBoost algorithm etc
recognizer = cv2.face.LBPHFaceRecognizer_create()
currentid = 0
label_ids = {}
x_train = []
y_labels = []
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ", "-").lower()
print(label, path)
if not label in label_ids:
label_ids[label] = currentid
currentid += 1
id_ = label_ids[label]
print(label_ids)
pil_image = Image.open(path).convert(
"L") # converting into grayscale
image_array = np.array(pil_image, "uint8")
print(image_array)
whatface = face_cascade.detectMultiScale(
image_array, scaleFactor=1.5, minNeighbors=5)
for (x, y, w, h) in whatface:
roi = image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id_)
# print(y_labels)
# print(x_train)
# the with open as model is a model to open a new file called "labels.pickle" and assigned it to "f" and "w" means the model (what are you trying to do with it)
# in this case "wb" means "writing binary" that means we want to write in the "f" file in binary mode.
with open("labels.pickle", "wb") as f:
# pickle.dump means we will dump up the labels_id which is full of id that we made before into the file "f" that was created before
pickle.dump(label_ids, f)
recognizer.train(x_train, np.array(y_labels))
recognizer.save("trainer.yml")
|
the-stack_0_2930 | # Source : https://leetcode.com/problems/find-all-anagrams-in-a-string/
# Author : YipingPan
# Date : 2020-08-13
#####################################################################################################
#
# Given a string s and a non-empty string p, find all the start indices of p's anagrams in s.
#
# Strings consists of lowercase English letters only and the length of both strings s and p will not
# be larger than 20,100.
#
# The order of output does not matter.
#
# Example 1:
#
# Input:
# s: "cbaebabacd" p: "abc"
#
# Output:
# [0, 6]
#
# Explanation:
# The substring with start index = 0 is "cba", which is an anagram of "abc".
# The substring with start index = 6 is "bac", which is an anagram of "abc".
#
# Example 2:
#
# Input:
# s: "abab" p: "ab"
#
# Output:
# [0, 1, 2]
#
# Explanation:
# The substring with start index = 0 is "ab", which is an anagram of "ab".
# The substring with start index = 1 is "ba", which is an anagram of "ab".
# The substring with start index = 2 is "ab", which is an anagram of "ab".
#
#####################################################################################################
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
if len(s)<len(p): return []
cp = [0]*26
cs = [0]*26
def idx(x):
return ord(x) - ord('a')
for x in p:
cp[idx(x)] += 1
for x in s[:len(p)]:
cs[idx(x)] += 1
res = []
i = len(p)-1
while (1):
if cs == cp:
res.append(i-len(p)+1)
i += 1
if i == len(s):
break
cs[idx(s[i-len(p)])] -= 1
cs[idx(s[i])] += 1
return res
|
the-stack_0_2931 | # Python test set -- part 5, built-in exceptions
import copy
import gc
import os
import sys
import unittest
import pickle
import weakref
import errno
from test.support import (TESTFN, captured_stderr, check_impl_detail,
check_warnings, cpython_only, gc_collect,
no_tracing, unlink, import_module, script_helper,
SuppressCrashReport)
from test import support
class NaiveException(Exception):
def __init__(self, x):
self.x = x
class SlottedNaiveException(Exception):
__slots__ = ('x',)
def __init__(self, x):
self.x = x
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def raise_catch(self, exc, excname):
try:
raise exc("spam")
except exc as err:
buf1 = str(err)
try:
raise exc("spam")
except exc as err:
buf2 = str(err)
self.assertEqual(buf1, buf2)
self.assertEqual(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
import marshal
marshal.loads(b'')
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(OSError, "OSError")
self.assertRaises(OSError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(RecursionError, "RecursionError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec('/\n')
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n",
'<string>', 'exec')
except TabError: pass
else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 17<<16)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1/0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1/0
except Exception as e: pass
self.raise_catch(StopAsyncIteration, "StopAsyncIteration")
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError as e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
def testSyntaxErrorMissingParens(self):
def ckmsg(src, msg, exception=SyntaxError):
try:
compile(src, '<fragment>', 'exec')
except exception as e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''print "old style"'''
ckmsg(s, "Missing parentheses in call to 'print'. "
"Did you mean print(\"old style\")?")
s = '''print "old style",'''
ckmsg(s, "Missing parentheses in call to 'print'. "
"Did you mean print(\"old style\", end=\" \")?")
s = '''exec "old style"'''
ckmsg(s, "Missing parentheses in call to 'exec'")
# should not apply to subclasses, see issue #31161
s = '''if True:\nprint "No indent"'''
ckmsg(s, "expected an indented block", IndentationError)
s = '''if True:\n print()\n\texec "mixed tabs and spaces"'''
ckmsg(s, "inconsistent use of tabs and spaces in indentation", TabError)
def testSyntaxErrorOffset(self):
def check(src, lineno, offset, encoding='utf-8'):
with self.assertRaises(SyntaxError) as cm:
compile(src, '<fragment>', 'exec')
self.assertEqual(cm.exception.lineno, lineno)
self.assertEqual(cm.exception.offset, offset)
if cm.exception.text is not None:
if not isinstance(src, str):
src = src.decode(encoding, 'replace')
line = src.split('\n')[lineno-1]
self.assertEqual(cm.exception.text.rstrip('\n'), line)
check('def fact(x):\n\treturn x!\n', 2, 10)
check('1 +\n', 1, 4)
check('def spam():\n print(1)\n print(2)', 3, 10)
check('Python = "Python" +', 1, 20)
check('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', 1, 20)
check(b'# -*- coding: cp1251 -*-\nPython = "\xcf\xb3\xf2\xee\xed" +',
2, 19, encoding='cp1251')
check(b'Python = "\xcf\xb3\xf2\xee\xed" +', 1, 18)
check('x = "a', 1, 7)
check('lambda x: x = 2', 1, 1)
# Errors thrown by compile.c
check('class foo:return 1', 1, 11)
check('def f():\n continue', 2, 3)
check('def f():\n break', 2, 3)
check('try:\n pass\nexcept:\n pass\nexcept ValueError:\n pass', 2, 3)
# Errors thrown by tokenizer.c
check('(0x+1)', 1, 3)
check('x = 0xI', 1, 6)
check('0010 + 2', 1, 4)
check('x = 32e-+4', 1, 8)
check('x = 0o9', 1, 6)
check('\u03b1 = 0xI', 1, 6)
check(b'\xce\xb1 = 0xI', 1, 6)
check(b'# -*- coding: iso8859-7 -*-\n\xe1 = 0xI', 2, 6,
encoding='iso8859-7')
# Errors thrown by symtable.c
check('x = [(yield i) for i in range(3)]', 1, 5)
check('def f():\n from _ import *', 1, 1)
check('def f(x, x):\n pass', 1, 1)
check('def f(x):\n nonlocal x', 2, 3)
check('def f(x):\n x = 1\n global x', 3, 3)
check('nonlocal x', 1, 1)
check('def f():\n global x\n nonlocal x', 2, 3)
# Errors thrown by ast.c
check('for 1 in []: pass', 1, 5)
check('def f(*):\n pass', 1, 7)
check('[*x for x in xs]', 1, 2)
check('def f():\n x, y: int', 2, 3)
check('(yield i) = 2', 1, 1)
check('foo(x for x in range(10), 100)', 1, 5)
check('foo(1=2)', 1, 5)
# Errors thrown by future.c
check('from __future__ import doesnt_exist', 1, 1)
check('from __future__ import braces', 1, 1)
check('x=1\nfrom __future__ import division', 2, 1)
@cpython_only
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException(Exception):
def __init__(self_):
raise RuntimeError("can't instantiate BadException")
class InvalidException:
pass
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEqual(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
def test_capi3():
import _testcapi
self.assertRaises(SystemError, _testcapi.raise_exception,
InvalidException, 1)
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
test_capi3()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.assertIs(WindowsError, OSError)
self.assertEqual(str(OSError(1001)), "1001")
self.assertEqual(str(OSError(1001, "message")),
"[Errno 1001] message")
# POSIX errno (9 aka EBADF) is untranslated
w = OSError(9, 'foo', 'bar')
self.assertEqual(w.errno, 9)
self.assertEqual(w.winerror, None)
self.assertEqual(str(w), "[Errno 9] foo: 'bar'")
# ERROR_PATH_NOT_FOUND (win error 3) becomes ENOENT (2)
w = OSError(0, 'foo', 'bar', 3)
self.assertEqual(w.errno, 2)
self.assertEqual(w.winerror, 3)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, 'bar')
self.assertEqual(w.filename2, None)
self.assertEqual(str(w), "[WinError 3] foo: 'bar'")
# Unknown win error becomes EINVAL (22)
w = OSError(0, 'foo', None, 1001)
self.assertEqual(w.errno, 22)
self.assertEqual(w.winerror, 1001)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, None)
self.assertEqual(w.filename2, None)
self.assertEqual(str(w), "[WinError 1001] foo")
# Non-numeric "errno"
w = OSError('bar', 'foo')
self.assertEqual(w.errno, 'bar')
self.assertEqual(w.winerror, None)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, None)
self.assertEqual(w.filename2, None)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to Windows')
def test_windows_message(self):
"""Should fill in unknown error code in Windows error message"""
ctypes = import_module('ctypes')
# this error code has no message, Python formats it as hexadecimal
code = 3765269347
with self.assertRaisesRegex(OSError, 'Windows Error 0x%x' % code):
ctypes.pythonapi.PyErr_SetFromWindowsErr(code)
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'args' : ()}),
(BaseException, (1, ), {'args' : (1,)}),
(BaseException, ('foo',),
{'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'args' : ('foo',), 'code' : 'foo'}),
(OSError, ('foo',),
{'args' : ('foo',), 'filename' : None, 'filename2' : None,
'errno' : None, 'strerror' : None}),
(OSError, ('foo', 'bar'),
{'args' : ('foo', 'bar'),
'filename' : None, 'filename2' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(OSError, ('foo', 'bar', 'baz'),
{'args' : ('foo', 'bar'),
'filename' : 'baz', 'filename2' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(OSError, ('foo', 'bar', 'baz', None, 'quux'),
{'args' : ('foo', 'bar'), 'filename' : 'baz', 'filename2': 'quux'}),
(OSError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(OSError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr',
'filename' : 'filenameStr', 'filename2' : None}),
(SyntaxError, (), {'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'args' : (),}),
(UnicodeEncodeError, ('ascii', 'a', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', 'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : 'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
{'args' : ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, ("\u3042", 0, 1, "ouch"),
{'args' : ('\u3042', 0, 1, 'ouch'),
'object' : '\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
(NaiveException, ('foo',),
{'args': ('foo',), 'x': 'foo'}),
(SlottedNaiveException, ('foo',),
{'args': ('foo',), 'x': 'foo'}),
]
try:
# More tests are in test_WindowsError
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : None,
'errno' : 1,
'filename' : 'filenameStr', 'filename2' : None})
)
except NameError:
pass
for exc, args, expected in exceptionList:
try:
e = exc(*args)
except:
print("\nexc=%r, args=%r" % (exc, args), file=sys.stderr)
raise
else:
# Verify module name
if not type(e).__name__.endswith('NaiveException'):
self.assertEqual(type(e).__module__, 'builtins')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
value = getattr(e, checkArgName)
self.assertEqual(repr(value),
repr(expected[checkArgName]),
'%r.%s == %r, expected %r' % (
e, checkArgName,
value, expected[checkArgName]))
# test for pickling support
for p in [pickle]:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
s = p.dumps(e, protocol)
new = p.loads(s)
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEqual(got, want,
'pickled "%r", attribute "%s' %
(e, checkArgName))
def testWithTraceback(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = BaseException().with_traceback(tb)
self.assertIsInstance(e, BaseException)
self.assertEqual(e.__traceback__, tb)
e = IndexError(5).with_traceback(tb)
self.assertIsInstance(e, IndexError)
self.assertEqual(e.__traceback__, tb)
class MyException(Exception):
pass
e = MyException().with_traceback(tb)
self.assertIsInstance(e, MyException)
self.assertEqual(e.__traceback__, tb)
def testInvalidTraceback(self):
try:
Exception().__traceback__ = 5
except TypeError as e:
self.assertIn("__traceback__ must be a traceback", str(e))
else:
self.fail("No exception raised")
def testInvalidAttrs(self):
self.assertRaises(TypeError, setattr, Exception(), '__cause__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__cause__')
self.assertRaises(TypeError, setattr, Exception(), '__context__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__context__')
def testNoneClearsTracebackAttr(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = Exception()
e.__traceback__ = tb
e.__traceback__ = None
self.assertEqual(e.__traceback__, None)
def testChainingAttrs(self):
e = Exception()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
e = TypeError()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
class MyException(OSError):
pass
e = MyException()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
def testChainingDescriptors(self):
try:
raise Exception()
except Exception as exc:
e = exc
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
self.assertFalse(e.__suppress_context__)
e.__context__ = NameError()
e.__cause__ = None
self.assertIsInstance(e.__context__, NameError)
self.assertIsNone(e.__cause__)
self.assertTrue(e.__suppress_context__)
e.__suppress_context__ = False
self.assertFalse(e.__suppress_context__)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEqual(x.fancy_arg, 42)
@no_tracing
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RecursionError, f)
def g():
try:
return g()
except ValueError:
return -1
self.assertRaises(RecursionError, g)
def test_str(self):
# Make sure both instances and classes have a str representation.
self.assertTrue(str(Exception))
self.assertTrue(str(Exception('a')))
self.assertTrue(str(Exception('a', 'b')))
def testExceptionCleanupNames(self):
# Make sure the local variable bound to the exception instance by
# an "except" statement is only visible inside the except block.
try:
raise Exception()
except Exception as e:
self.assertTrue(e)
del e
self.assertNotIn('e', locals())
def testExceptionCleanupState(self):
# Make sure exception state is cleaned up as soon as the except
# block is left. See #2507
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def inner_raising_func():
# Create some references in exception value and traceback
local_ref = obj
raise MyException(obj)
# Qualified "except" with "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException as e:
pass
obj = None
obj = wr()
self.assertIsNone(obj)
# Qualified "except" without "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
pass
obj = None
obj = wr()
self.assertIsNone(obj)
# Bare "except"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except:
pass
obj = None
obj = wr()
self.assertIsNone(obj)
# "except" with premature block leave
obj = MyObj()
wr = weakref.ref(obj)
for i in [0]:
try:
inner_raising_func()
except:
break
obj = None
obj = wr()
self.assertIsNone(obj)
# "except" block raising another exception
obj = MyObj()
wr = weakref.ref(obj)
try:
try:
inner_raising_func()
except:
raise KeyError
except KeyError as e:
# We want to test that the except block above got rid of
# the exception raised in inner_raising_func(), but it
# also ends up in the __context__ of the KeyError, so we
# must clear the latter manually for our test to succeed.
e.__context__ = None
obj = None
obj = wr()
# guarantee no ref cycles on CPython (don't gc_collect)
if check_impl_detail(cpython=False):
gc_collect()
self.assertIsNone(obj)
# Some complicated construct
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
try:
try:
raise
finally:
raise
except MyException:
pass
obj = None
if check_impl_detail(cpython=False):
gc_collect()
obj = wr()
self.assertIsNone(obj)
# Inside an exception-silencing "with" block
class Context:
def __enter__(self):
return self
def __exit__ (self, exc_type, exc_value, exc_tb):
return True
obj = MyObj()
wr = weakref.ref(obj)
with Context():
inner_raising_func()
obj = None
if check_impl_detail(cpython=False):
gc_collect()
obj = wr()
self.assertIsNone(obj)
def test_exception_target_in_nested_scope(self):
# issue 4617: This used to raise a SyntaxError
# "can not delete variable 'e' referenced in nested scope"
def print_error():
e
try:
something
except Exception as e:
print_error()
# implicit "del e" here
def test_generator_leaking(self):
# Test that generator exception state doesn't leak into the calling
# frame
def yield_raise():
try:
raise KeyError("caught")
except KeyError:
yield sys.exc_info()[0]
yield sys.exc_info()[0]
yield sys.exc_info()[0]
g = yield_raise()
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], None)
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], None)
self.assertEqual(next(g), None)
# Same test, but inside an exception handler
try:
raise TypeError("foo")
except TypeError:
g = yield_raise()
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(next(g), TypeError)
del g
self.assertEqual(sys.exc_info()[0], TypeError)
def test_generator_leaking2(self):
# See issue 12475.
def g():
yield
try:
raise RuntimeError
except RuntimeError:
it = g()
next(it)
try:
next(it)
except StopIteration:
pass
self.assertEqual(sys.exc_info(), (None, None, None))
def test_generator_leaking3(self):
# See issue #23353. When gen.throw() is called, the caller's
# exception state should be save and restored.
def g():
try:
yield
except ZeroDivisionError:
yield sys.exc_info()[1]
it = g()
next(it)
try:
1/0
except ZeroDivisionError as e:
self.assertIs(sys.exc_info()[1], e)
gen_exc = it.throw(e)
self.assertIs(sys.exc_info()[1], e)
self.assertIs(gen_exc, e)
self.assertEqual(sys.exc_info(), (None, None, None))
def test_generator_leaking4(self):
# See issue #23353. When an exception is raised by a generator,
# the caller's exception state should still be restored.
def g():
try:
1/0
except ZeroDivisionError:
yield sys.exc_info()[0]
raise
it = g()
try:
raise TypeError
except TypeError:
# The caller's exception state (TypeError) is temporarily
# saved in the generator.
tp = next(it)
self.assertIs(tp, ZeroDivisionError)
try:
next(it)
# We can't check it immediately, but while next() returns
# with an exception, it shouldn't have restored the old
# exception state (TypeError).
except ZeroDivisionError as e:
self.assertIs(sys.exc_info()[1], e)
# We used to find TypeError here.
self.assertEqual(sys.exc_info(), (None, None, None))
def test_generator_doesnt_retain_old_exc(self):
def g():
self.assertIsInstance(sys.exc_info()[1], RuntimeError)
yield
self.assertEqual(sys.exc_info(), (None, None, None))
it = g()
try:
raise RuntimeError
except RuntimeError:
next(it)
self.assertRaises(StopIteration, next, it)
def test_generator_finalizing_and_exc_info(self):
# See #7173
def simple_gen():
yield 1
def run_gen():
gen = simple_gen()
try:
raise RuntimeError
except RuntimeError:
return next(gen)
run_gen()
gc_collect()
self.assertEqual(sys.exc_info(), (None, None, None))
def _check_generator_cleanup_exc_state(self, testfunc):
# Issue #12791: exception state is cleaned up as soon as a generator
# is closed (reference cycles are broken).
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def raising_gen():
try:
raise MyException(obj)
except MyException:
yield
obj = MyObj()
wr = weakref.ref(obj)
g = raising_gen()
next(g)
testfunc(g)
g = obj = None
obj = wr()
self.assertIsNone(obj)
def test_generator_throw_cleanup_exc_state(self):
def do_throw(g):
try:
g.throw(RuntimeError())
except RuntimeError:
pass
self._check_generator_cleanup_exc_state(do_throw)
def test_generator_close_cleanup_exc_state(self):
def do_close(g):
g.close()
self._check_generator_cleanup_exc_state(do_close)
def test_generator_del_cleanup_exc_state(self):
def do_del(g):
g = None
self._check_generator_cleanup_exc_state(do_del)
def test_generator_next_cleanup_exc_state(self):
def do_next(g):
try:
next(g)
except StopIteration:
pass
else:
self.fail("should have raised StopIteration")
self._check_generator_cleanup_exc_state(do_next)
def test_generator_send_cleanup_exc_state(self):
def do_send(g):
try:
g.send(None)
except StopIteration:
pass
else:
self.fail("should have raised StopIteration")
self._check_generator_cleanup_exc_state(do_send)
def test_3114(self):
# Bug #3114: in its destructor, MyObject retrieves a pointer to
# obsolete and/or deallocated objects.
class MyObject:
def __del__(self):
nonlocal e
e = sys.exc_info()
e = ()
try:
raise Exception(MyObject())
except:
pass
self.assertEqual(e, (None, None, None))
def test_unicode_change_attributes(self):
# See issue 7309. This was a crasher.
u = UnicodeEncodeError('baz', 'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't encode character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1000-4: 965230951443685724997")
u = UnicodeDecodeError('baz', b'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't decode byte 0x78 in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1000-4: 965230951443685724997")
u = UnicodeTranslateError('xxxx', 1, 5, 'foo')
self.assertEqual(str(u), "can't translate characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "can't translate character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "can't translate characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
def test_unicode_errors_no_object(self):
# See issue #21134.
klasses = UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError
for klass in klasses:
self.assertEqual(str(klass.__new__(klass)), "")
@no_tracing
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception, metaclass=Meta):
pass
with captured_stderr() as stderr:
try:
raise KeyError()
except MyException as e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
if not hasattr(sys, "pyston_version_info"):
def g():
try:
return g()
except RecursionError:
return sys.exc_info()
e, v, tb = g()
self.assertIsInstance(v, RecursionError, type(v))
self.assertIn("maximum recursion depth exceeded", str(v))
@cpython_only
def test_trashcan_recursion(self):
# See bpo-33930
def foo():
o = object()
for x in range(1_000_000):
# Create a big chain of method objects that will trigger
# a deep chain of calls when they need to be destructed.
o = o.__dir__
foo()
support.gc_collect()
@cpython_only
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_recursion_normalizing_exception(self):
# Issue #22898.
# Test that a RecursionError is raised when tstate->recursion_depth is
# equal to recursion_limit in PyErr_NormalizeException() and check
# that a ResourceWarning is printed.
# Prior to #22898, the recursivity of PyErr_NormalizeException() was
# controlled by tstate->recursion_depth and a PyExc_RecursionErrorInst
# singleton was being used in that case, that held traceback data and
# locals indefinitely and would cause a segfault in _PyExc_Fini() upon
# finalization of these locals.
code = """if 1:
import sys
from _testcapi import get_recursion_depth
class MyException(Exception): pass
def setrecursionlimit(depth):
while 1:
try:
sys.setrecursionlimit(depth)
return depth
except RecursionError:
# sys.setrecursionlimit() raises a RecursionError if
# the new recursion limit is too low (issue #25274).
depth += 1
def recurse(cnt):
cnt -= 1
if cnt:
recurse(cnt)
else:
generator.throw(MyException)
def gen():
f = open(%a, mode='rb', buffering=0)
yield
generator = gen()
next(generator)
recursionlimit = sys.getrecursionlimit()
depth = get_recursion_depth()
try:
# Upon the last recursive invocation of recurse(),
# tstate->recursion_depth is equal to (recursion_limit - 1)
# and is equal to recursion_limit when _gen_throw() calls
# PyErr_NormalizeException().
recurse(setrecursionlimit(depth + 2) - depth - 1)
finally:
sys.setrecursionlimit(recursionlimit)
print('Done.')
""" % __file__
rc, out, err = script_helper.assert_python_failure("-Wd", "-c", code)
# Check that the program does not fail with SIGABRT.
self.assertEqual(rc, 1)
self.assertIn(b'RecursionError', err)
self.assertIn(b'ResourceWarning', err)
self.assertIn(b'Done.', out)
@cpython_only
def test_recursion_normalizing_infinite_exception(self):
# Issue #30697. Test that a RecursionError is raised when
# PyErr_NormalizeException() maximum recursion depth has been
# exceeded.
code = """if 1:
import _testcapi
try:
raise _testcapi.RecursingInfinitelyError
finally:
print('Done.')
"""
rc, out, err = script_helper.assert_python_failure("-c", code)
self.assertEqual(rc, 1)
self.assertIn(b'RecursionError: maximum recursion depth exceeded '
b'while normalizing an exception', err)
self.assertIn(b'Done.', out)
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables memory hooks")
@cpython_only
def test_recursion_normalizing_with_no_memory(self):
# Issue #30697. Test that in the abort that occurs when there is no
# memory left and the size of the Python frames stack is greater than
# the size of the list of preallocated MemoryError instances, the
# Fatal Python error message mentions MemoryError.
code = """if 1:
import _testcapi
class C(): pass
def recurse(cnt):
cnt -= 1
if cnt:
recurse(cnt)
else:
_testcapi.set_nomemory(0)
C()
recurse(16)
"""
with SuppressCrashReport():
rc, out, err = script_helper.assert_python_failure("-c", code)
self.assertIn(b'Fatal Python error: Cannot recover from '
b'MemoryErrors while normalizing exceptions.', err)
@cpython_only
def test_MemoryError(self):
# PyErr_NoMemory always raises the same exception instance.
# Check that the traceback is not doubled.
import traceback
from _testcapi import raise_memoryerror
def raiseMemError():
try:
raise_memoryerror()
except MemoryError as e:
tb = e.__traceback__
else:
self.fail("Should have raises a MemoryError")
return traceback.format_tb(tb)
tb1 = raiseMemError()
tb2 = raiseMemError()
self.assertEqual(tb1, tb2)
@cpython_only
def test_exception_with_doc(self):
import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
self.assertRaises(SystemError, _testcapi.make_exception_with_doc,
"error1")
# test basic usage of PyErr_NewException
error1 = _testcapi.make_exception_with_doc("_testcapi.error1")
self.assertIs(type(error1), type)
self.assertTrue(issubclass(error1, Exception))
self.assertIsNone(error1.__doc__)
# test with given docstring
error2 = _testcapi.make_exception_with_doc("_testcapi.error2", doc2)
self.assertEqual(error2.__doc__, doc2)
# test with explicit base (without docstring)
error3 = _testcapi.make_exception_with_doc("_testcapi.error3",
base=error2)
self.assertTrue(issubclass(error3, error2))
# test with explicit base tuple
class C(object):
pass
error4 = _testcapi.make_exception_with_doc("_testcapi.error4", doc4,
(error3, C))
self.assertTrue(issubclass(error4, error3))
self.assertTrue(issubclass(error4, C))
self.assertEqual(error4.__doc__, doc4)
# test with explicit dictionary
error5 = _testcapi.make_exception_with_doc("_testcapi.error5", "",
error4, {'a': 1})
self.assertTrue(issubclass(error5, error4))
self.assertEqual(error5.a, 1)
self.assertEqual(error5.__doc__, "")
@cpython_only
def test_memory_error_cleanup(self):
# Issue #5437: preallocated MemoryError instances should not keep
# traceback objects alive.
from _testcapi import raise_memoryerror
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
raise_memoryerror()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except MemoryError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("MemoryError not raised")
self.assertEqual(wr(), None)
@no_tracing
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_recursion_error_cleanup(self):
# Same test as above, but with "recursion exceeded" errors
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
inner()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except RecursionError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("RecursionError not raised")
self.assertEqual(wr(), None)
def test_errno_ENOTDIR(self):
# Issue #12802: "not a directory" errors are ENOTDIR even on Windows
with self.assertRaises(OSError) as cm:
os.listdir(__file__)
self.assertEqual(cm.exception.errno, errno.ENOTDIR, cm.exception)
def test_unraisable(self):
# Issue #22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
obj = BrokenDel()
with support.catch_unraisable_exception() as cm:
del obj
self.assertEqual(cm.unraisable.object, BrokenDel.__del__)
self.assertIsNotNone(cm.unraisable.exc_traceback)
def test_unhandled(self):
# Check for sensible reporting of unhandled exceptions
for exc_type in (ValueError, BrokenStrException):
with self.subTest(exc_type):
try:
exc = exc_type("test message")
# The following line is included in the traceback report:
raise exc
except exc_type:
with captured_stderr() as stderr:
sys.__excepthook__(*sys.exc_info())
report = stderr.getvalue()
self.assertIn("test_exceptions.py", report)
self.assertIn("raise exc", report)
self.assertIn(exc_type.__name__, report)
if exc_type is BrokenStrException:
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("test message", report)
self.assertTrue(report.endswith("\n"))
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables memory hooks")
@cpython_only
def test_memory_error_in_PyErr_PrintEx(self):
code = """if 1:
import _testcapi
class C(): pass
_testcapi.set_nomemory(0, %d)
C()
"""
# Issue #30817: Abort in PyErr_PrintEx() when no memory.
# Span a large range of tests as the CPython code always evolves with
# changes that add or remove memory allocations.
for i in range(1, 20):
rc, out, err = script_helper.assert_python_failure("-c", code % i)
self.assertIn(rc, (1, 120))
self.assertIn(b'MemoryError', err)
def test_yield_in_nested_try_excepts(self):
#Issue #25612
class MainError(Exception):
pass
class SubError(Exception):
pass
def main():
try:
raise MainError()
except MainError:
try:
yield
except SubError:
pass
raise
coro = main()
coro.send(None)
with self.assertRaises(MainError):
coro.throw(SubError())
def test_generator_doesnt_retain_old_exc2(self):
#Issue 28884#msg282532
def g():
try:
raise ValueError
except ValueError:
yield 1
self.assertEqual(sys.exc_info(), (None, None, None))
yield 2
gen = g()
try:
raise IndexError
except IndexError:
self.assertEqual(next(gen), 1)
self.assertEqual(next(gen), 2)
def test_raise_in_generator(self):
#Issue 25612#msg304117
def g():
yield 1
raise
yield 2
with self.assertRaises(ZeroDivisionError):
i = g()
try:
1/0
except:
next(i)
next(i)
def test_memory_error_subclasses(self):
# bpo-41654: MemoryError instances use a freelist of objects that are
# linked using the 'dict' attribute when they are inactive/dead.
# Subclasses of MemoryError should not participate in the freelist
# schema. This test creates a MemoryError object and keeps it alive
# (therefore advancing the freelist) and then it creates and destroys a
# subclass object. Finally, it checks that creating a new MemoryError
# succeeds, proving that the freelist is not corrupted.
class TestException(MemoryError):
pass
try:
raise MemoryError
except MemoryError as exc:
inst = exc
try:
raise TestException
except Exception:
pass
for _ in range(10):
try:
raise MemoryError
except MemoryError as exc:
pass
gc_collect()
class ImportErrorTests(unittest.TestCase):
def test_attributes(self):
# Setting 'name' and 'path' should not be a problem.
exc = ImportError('test')
self.assertIsNone(exc.name)
self.assertIsNone(exc.path)
exc = ImportError('test', name='somemodule')
self.assertEqual(exc.name, 'somemodule')
self.assertIsNone(exc.path)
exc = ImportError('test', path='somepath')
self.assertEqual(exc.path, 'somepath')
self.assertIsNone(exc.name)
exc = ImportError('test', path='somepath', name='somename')
self.assertEqual(exc.name, 'somename')
self.assertEqual(exc.path, 'somepath')
msg = "'invalid' is an invalid keyword argument for ImportError"
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', invalid='keyword')
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', name='name', invalid='keyword')
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', path='path', invalid='keyword')
with self.assertRaisesRegex(TypeError, msg):
ImportError(invalid='keyword')
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', invalid='keyword', another=True)
def test_reset_attributes(self):
exc = ImportError('test', name='name', path='path')
self.assertEqual(exc.args, ('test',))
self.assertEqual(exc.msg, 'test')
self.assertEqual(exc.name, 'name')
self.assertEqual(exc.path, 'path')
# Reset not specified attributes
exc.__init__()
self.assertEqual(exc.args, ())
self.assertEqual(exc.msg, None)
self.assertEqual(exc.name, None)
self.assertEqual(exc.path, None)
def test_non_str_argument(self):
# Issue #15778
with check_warnings(('', BytesWarning), quiet=True):
arg = b'abc'
exc = ImportError(arg)
self.assertEqual(str(arg), str(exc))
def test_copy_pickle(self):
for kwargs in (dict(),
dict(name='somename'),
dict(path='somepath'),
dict(name='somename', path='somepath')):
orig = ImportError('test', **kwargs)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
exc = pickle.loads(pickle.dumps(orig, proto))
self.assertEqual(exc.args, ('test',))
self.assertEqual(exc.msg, 'test')
self.assertEqual(exc.name, orig.name)
self.assertEqual(exc.path, orig.path)
for c in copy.copy, copy.deepcopy:
exc = c(orig)
self.assertEqual(exc.args, ('test',))
self.assertEqual(exc.msg, 'test')
self.assertEqual(exc.name, orig.name)
self.assertEqual(exc.path, orig.path)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_2932 | """
buildfarm dependencies that can be imported into other WORKSPACE files
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file", "http_jar")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
RULES_JVM_EXTERNAL_TAG = "3.3"
RULES_JVM_EXTERNAL_SHA = "d85951a92c0908c80bd8551002d66cb23c3434409c814179c0ff026b53544dab"
def archive_dependencies(third_party):
return [
{
"name": "rules_jvm_external",
"strip_prefix": "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG,
"sha256": RULES_JVM_EXTERNAL_SHA,
"url": "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG,
},
# Needed for "well-known protos" and @com_google_protobuf//:protoc.
{
"name": "com_google_protobuf",
"sha256": "dd513a79c7d7e45cbaeaf7655289f78fd6b806e52dbbd7018ef4e3cf5cff697a",
"strip_prefix": "protobuf-3.15.8",
"urls": ["https://github.com/protocolbuffers/protobuf/archive/v3.15.8.zip"],
},
{
"name": "com_github_bazelbuild_buildtools",
"sha256": "a02ba93b96a8151b5d8d3466580f6c1f7e77212c4eb181cba53eb2cae7752a23",
"strip_prefix": "buildtools-3.5.0",
"urls": ["https://github.com/bazelbuild/buildtools/archive/3.5.0.tar.gz"],
},
# Needed for @grpc_java//compiler:grpc_java_plugin.
{
"name": "io_grpc_grpc_java",
"sha256": "101b21af120901e9bf342384988f57af3332b59d997f64d5f41a1e24ffb96f19",
"strip_prefix": "grpc-java-1.42.0",
"urls": ["https://github.com/grpc/grpc-java/archive/v1.42.0.zip"],
},
# The APIs that we implement.
{
"name": "googleapis",
"build_file": "%s:BUILD.googleapis" % third_party,
"patch_cmds": ["find google -name 'BUILD.bazel' -type f -delete"],
"patch_cmds_win": ["Remove-Item google -Recurse -Include *.bazel"],
"sha256": "745cb3c2e538e33a07e2e467a15228ccbecadc1337239f6740d57a74d9cdef81",
"strip_prefix": "googleapis-6598bb829c9e9a534be674649ffd1b4671a821f9",
"url": "https://github.com/googleapis/googleapis/archive/6598bb829c9e9a534be674649ffd1b4671a821f9.zip",
},
{
"name": "remote_apis",
"build_file": "%s:BUILD.remote_apis" % third_party,
"patch_args": ["-p1"],
"patches": ["%s/remote-apis:remote-apis.patch" % third_party],
"sha256": "1d69f5f2f694fe93ee78a630f196047892ae51878297a89601c98964486655c6",
"strip_prefix": "remote-apis-6345202a036a297b22b0a0e7531ef702d05f2130",
"url": "https://github.com/bazelbuild/remote-apis/archive/6345202a036a297b22b0a0e7531ef702d05f2130.zip",
},
{
"name": "rules_cc",
"sha256": "34b2ebd4f4289ebbc27c7a0d854dcd510160109bb0194c0ba331c9656ffcb556",
"strip_prefix": "rules_cc-daf6ace7cfeacd6a83e9ff2ed659f416537b6c74",
"url": "https://github.com/bazelbuild/rules_cc/archive/daf6ace7cfeacd6a83e9ff2ed659f416537b6c74.tar.gz",
},
# Used to format proto files
{
"name": "com_grail_bazel_toolchain",
"sha256": "54b54eedc71b93b278c44b6c056a737dc68545c6da75f63d0810676e1181f559",
"strip_prefix": "bazel-toolchain-76ce37e977a304acf8948eadabb82c516320e286",
"url": "https://github.com/grailbio/bazel-toolchain/archive/76ce37e977a304acf8948eadabb82c516320e286.tar.gz",
},
# Ideally we would use the 0.14.4 release of rules_docker,
# but that version introduced new pypi and pkg dependncies on tar-related targets making the upgrade difficult.
# Those dependencies were then removed afterward. We pick a stable commit after 0.14.4 instead of cherry-picking in the different changes.
# https://github.com/bazelbuild/rules_docker/issues/1622
# When a new version after 0.14.4 is released, we can go back to a pinned version.
{
"name": "io_bazel_rules_docker",
"patch_args": ["-p1"],
"patches": ["%s/io_bazel_rules_docker:entrypoint.patch" % third_party],
"sha256": "d5609b7858246fa11e76237aa9b3e681615bdc8acf2ed29058426cf7c4cea099",
"strip_prefix": "rules_docker-f4822f3921f0c343dd9e5ae65c760d0fb70be1b3",
"urls": ["https://github.com/bazelbuild/rules_docker/archive/f4822f3921f0c343dd9e5ae65c760d0fb70be1b3.tar.gz"],
},
# Bazel is referenced as a dependency so that buildfarm can access the linux-sandbox as a potential execution wrapper.
{
"name": "bazel",
"sha256": "bca2303a43c696053317a8c7ac09a5e6d90a62fec4726e55357108bb60d7a807",
"strip_prefix": "bazel-3.7.2",
"urls": ["https://github.com/bazelbuild/bazel/archive/3.7.2.tar.gz"],
"patch_args": ["-p1"],
"patches": ["%s/bazel:bazel_visibility.patch" % third_party],
},
# Optional execution wrappers
{
"name": "skip_sleep",
"build_file": "%s:BUILD.skip_sleep" % third_party,
"sha256": "03980702e8e9b757df68aa26493ca4e8573770f15dd8a6684de728b9cb8549f1",
"strip_prefix": "TARDIS-f54fa4743e67763bb1ad77039b3d15be64e2e564",
"url": "https://github.com/Unilang/TARDIS/archive/f54fa4743e67763bb1ad77039b3d15be64e2e564.zip",
},
]
def buildfarm_dependencies(repository_name = "build_buildfarm"):
"""
Define all 3rd party archive rules for buildfarm
Args:
repository_name: the name of the repository
"""
third_party = "@%s//third_party" % repository_name
for dependency in archive_dependencies(third_party):
params = {}
params.update(**dependency)
name = params.pop("name")
maybe(http_archive, name, **params)
# Enhanced jedis 3.2.0 containing several convenience, performance, and
# robustness changes.
# Notable features include:
# Cluster request pipelining, used for batching requests for operation
# monitors and CAS index.
# Blocking request (b* prefix) interruptibility, using client
# connection reset.
# Singleton-redis-as-cluster - support treating a non-clustered redis
# endpoint as a cluster of 1 node.
# Other changes are redis version-forward treatment of spop and visibility
# into errors in cluster unreachable and cluster retry exhaustion.
# Details at https://github.com/werkt/jedis/releases/tag/3.2.0-e82e68e2f7
maybe(
http_jar,
"jedis",
sha256 = "294ff5e4e6ae3fda5ff00f0a3c398fa50c1ffa3bc9313800b32e34a75fbb93f3",
urls = [
"https://github.com/werkt/jedis/releases/download/3.2.0-e82e68e2f7/jedis-3.2.0-e82e68e2f7.jar",
],
)
http_file(
name = "tini",
urls = ["https://github.com/krallin/tini/releases/download/v0.18.0/tini"],
)
|
the-stack_0_2933 | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
This class serves as tool adaptor for ESBMC (http://www.esbmc.org/)
"""
REQUIRED_PATHS = ["cpachecker", "esbmc", "esbmc-wrapper.py", "tokenizer"]
def executable(self):
return util.find_executable("esbmc-wrapper.py")
def working_directory(self, executable):
executableDir = os.path.dirname(executable)
return executableDir
def version(self, executable):
return self._version_from_tool(executable, "-v")
def name(self):
return "ESBMC"
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
assert len(tasks) == 1, "only one inputfile supported"
inputfile = tasks[0]
return [executable] + ["-p", propertyfile] + options + [inputfile]
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = "\n".join(output)
status = result.RESULT_UNKNOWN
if self.allInText(["FALSE_DEREF"], output):
status = result.RESULT_FALSE_DEREF
elif self.allInText(["FALSE_FREE"], output):
status = result.RESULT_FALSE_FREE
elif self.allInText(["FALSE_MEMTRACK"], output):
status = result.RESULT_FALSE_MEMTRACK
elif self.allInText(["FALSE_OVERFLOW"], output):
status = result.RESULT_FALSE_OVERFLOW
elif self.allInText(["FALSE"], output):
status = result.RESULT_FALSE_REACH
elif "TRUE" in output:
status = result.RESULT_TRUE_PROP
elif "DONE" in output:
status = result.RESULT_DONE
if status == result.RESULT_UNKNOWN:
if isTimeout:
status = "TIMEOUT"
elif output.endswith(("error", "error\n")):
status = "ERROR"
return status
""" helper method """
def allInText(self, words, text):
"""
This function checks, if all the words appear in the given order in the text.
"""
index = 0
for word in words:
index = text[index:].find(word)
if index == -1:
return False
return True
|
the-stack_0_2934 | ############################################################################
# Copyright (C) 2008 by Volker Christian #
# [email protected] #
# #
# This program is free software; you can redistribute it and#or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
from Plugins.Extensions.VlcPlayer.VlcPlayer import VlcPlayer
from Components.ActionMap import ActionMap
from YouTubeContextMenu import YouTubeEntryContextMenu, YouTubeEntryContextMenuList
class YouTubePlayer(VlcPlayer):
def __init__(self, session, server, currentList, contextMenuEntries, infoCallback, name):
VlcPlayer.__init__(self, session, server, currentList)
self.contextMenuEntries = contextMenuEntries
self.infoCallback = infoCallback
self.name = name
self["menuactions"] = ActionMap(["YouTubePlayerScreenActions"],
{
"menu" : self.openContextMenu,
"info" : self.showVideoInfo,
}, -1)
def showVideoInfo(self):
if self.shown:
self.hideInfobar()
self.infoCallback()
def openContextMenu(self):
if self.shown:
self.hideInfobar()
contextMenuList = YouTubeEntryContextMenuList()
for entry in self.contextMenuEntries:
contextMenuList.appendEntry(entry)
self.session.openWithCallback(self.menuActionCoosen, YouTubeEntryContextMenu, contextMenuList, self.name)
def menuActionCoosen(self, cookie):
if cookie is not None:
if cookie[1]:
self.stop()
cookie[0]()
|
the-stack_0_2935 | import os
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
# Example
createFolder('./data/')
# Creates a folder in the current directory called data
|
the-stack_0_2936 | import json
import sys
from urllib import *
import argparse
from urllib.parse import urlparse, urlencode, parse_qs
from urllib.request import urlopen
YOUTUBE_COMMENT_URL = 'https://www.googleapis.com/youtube/v3/commentThreads'
YOUTUBE_SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search'
arr = []
def retrive1():
return arr
class YouTubeApi():
def load_comments(self, mat):
for item in mat["items"]:
comment = item["snippet"]["topLevelComment"]
author = comment["snippet"]["authorDisplayName"]
text = comment["snippet"]["textDisplay"]
print("Comment by {}: {}".format(author, text))
if 'replies' in item.keys():
for reply in item['replies']['comments']:
rauthor = reply['snippet']['authorDisplayName']
rtext = reply["snippet"]["textDisplay"]
print("\n\tReply by {}: {}".format(rauthor, rtext), "\n")
def get_video_comment(self):
parser = argparse.ArgumentParser()
mxRes = 8
vid = str()
parser.add_argument("--c", help="calls comment function by keyword function", action='store_true')
parser.add_argument("--max", help="number of comments to return")
parser.add_argument("--videourl", help="Required URL for which comments to return")
parser.add_argument("--key", help="Required API key")
args = parser.parse_args()
if not args.max:
args.max = mxRes
if not args.videourl:
exit("Please specify video URL using the --videourl=parameter.")
if not args.key:
exit("Please specify API key using the --key=parameter.")
try:
video_id = urlparse(str(args.videourl))
q = parse_qs(video_id.query)
vid = q["v"][0]
except:
print("Invalid YouTube URL")
parms = {
'part': 'snippet,replies',
'maxResults': args.max,
'videoId': vid,
'textFormat': 'plainText',
'key': args.key
}
try:
matches = self.openURL(YOUTUBE_COMMENT_URL, parms)
i = 2
mat = json.loads(matches)
nextPageToken = mat.get("nextPageToken")
print("\nPage : 1")
print("------------------------------------------------------------------")
self.load_comments(mat)
while nextPageToken:
parms.update({'pageToken': nextPageToken})
matches = self.openURL(YOUTUBE_COMMENT_URL, parms)
mat = json.loads(matches)
nextPageToken = mat.get("nextPageToken")
print("\nPage : ", i)
print("------------------------------------------------------------------")
self.load_comments(mat)
i += 1
except KeyboardInterrupt:
print("User Aborted the Operation")
except:
print("Cannot Open URL or Fetch comments at a moment")
def load_search_res(self, search_response):
videos, channels, playlists = [], [], []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append("{}".format(search_result["id"]["videoId"]))
arr.append("{}".format(search_result["id"]["videoId"]))
# elif search_result["id"]["kind"] == "youtube#channel":
# channels.append("{} ({})".format(search_result["snippet"]["title"],
# search_result["id"]["channelId"]))
# elif search_result["id"]["kind"] == "youtube#playlist":
# playlists.append("{} ({})".format(search_result["snippet"]["title"],
# search_result["id"]["playlistId"]))
print("Videos:\n", "\n".join(videos), "\n")
# print("Channels:\n", "\n".join(channels), "\n")
#print("Playlists:\n", "\n".join(playlists), "\n")
def search_keyword(self,word):
parser = argparse.ArgumentParser()
# word="hello"
mxRes = 2
parser.add_argument("--s", help="calls the search by keyword function", action='store_true')
parser.add_argument("--r", help="define country code for search results for specific country", default="IN")
parser.add_argument("--search", help="Search Term")
parser.add_argument("--max", help="number of results to return")
parser.add_argument("--key", help="Required API key", default="AIzaSyAP3or1BGNTc-H6gr9j26p3oWnwvcUonsc")
args = parser.parse_args()
if not args.max:
args.max = mxRes
if not args.search:
args.search = word
if not args.key:
exit("Please specify API key using the --key= parameter.")
parms = {
'q': args.search,
'part': 'id,snippet',
'maxResults': args.max,
'regionCode': args.r,
'key': args.key
}
try:
matches = self.openURL(YOUTUBE_SEARCH_URL, parms)
search_response = json.loads(matches)
i = 2
nextPageToken = search_response.get("nextPageToken")
print("\nPage : 1 --- Region : {}".format(args.r))
print("------------------------------------------------------------------")
self.load_search_res(search_response)
while nextPageToken:
parms.update({'pageToken': nextPageToken})
matches = self.openURL(YOUTUBE_SEARCH_URL, parms)
search_response = json.loads(matches)
nextPageToken = search_response.get("nextPageToken")
print("Page : {} --- Region : {}".format(i, args.r))
print("------------------------------------------------------------------")
self.load_search_res(search_response)
i += 1
if i==5:
break
except KeyboardInterrupt:
print("User Aborted the Operation")
except:
print("Cannot Open URL or Fetch comments at a moment")
def load_channel_vid(self, search_response):
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append("{} ({})".format(search_result["snippet"]["title"],
search_result["id"]["videoId"]))
print("###Videos:###\n", "\n".join(videos), "\n")
def channel_videos(self):
parser = argparse.ArgumentParser()
mxRes = 8
parser.add_argument("--sc", help="calls the search by channel by keyword function", action='store_true')
parser.add_argument("--channelid", help="Search Term", default="Srce Cde")
parser.add_argument("--max", help="number of results to return")
parser.add_argument("--key", help="Required API key")
args = parser.parse_args()
if not args.max:
args.max = mxRes
if not args.channelid:
exit("Please specify channelid using the --channelid= parameter.")
if not args.key:
exit("Please specify API key using the --key= parameter.")
parms = {
'part': 'id,snippet',
'channelId': args.channelid,
'maxResults': args.max,
'key': args.key
}
try:
matches = self.openURL(YOUTUBE_SEARCH_URL, parms)
search_response = json.loads(matches)
i = 2
nextPageToken = search_response.get("nextPageToken")
print("\nPage : 1")
print("------------------------------------------------------------------")
self.load_channel_vid(search_response)
while nextPageToken:
parms.update({'pageToken': nextPageToken})
matches = self.openURL(YOUTUBE_SEARCH_URL, parms)
search_response = json.loads(matches)
nextPageToken = search_response.get("nextPageToken")
print("Page : ", i)
print("------------------------------------------------------------------")
self.load_channel_vid(search_response)
i += 1
except KeyboardInterrupt:
print("User Aborted the Operation")
except:
print("Cannot Open URL or Fetch comments at a moment")
def openURL(self, url, parms):
f = urlopen(url + '?' + urlencode(parms))
data = f.read()
f.close()
matches = data.decode("utf-8")
return matches
def main():
y = YouTubeApi()
y.search_keyword("hello")
print(arr)
if __name__ == '__main__':
main() |
the-stack_0_2937 | """Data processing routines for MongoDB version
"""
import datetime
# import shutil
import pathlib
from pymongo import MongoClient
from pymongo.collection import Collection
cl = MongoClient()
db = cl.doctree_database
# support for older pymongo versions
try:
test = Collection.update_one
except AttributeError:
## Collection.insert_one = Collection.insert
Collection.update_one = Collection.update
Collection.replace_one = Collection.update
## # Collection.find_one_and_delete = Collection.remove
Collection.delete_many = Collection.remove
def _add_doc(filename, doc):
"""create new document in the dtree document
"""
try:
id_ = db[filename].insert_one(doc).inserted_id
except TypeError:
id_ = db[filename].insert(doc)
return id_
def _update_doc(filename, docid, doc):
"""change a document in the dtree document
"""
db[filename].update({'_id': docid}, doc)
def list_dtrees():
"""list all dtrees (collections) registered in the database
"""
return db.list_collection_names()
def create_new_dtree(filename):
"""set up a new dtree/collection
"""
if db[filename].find_one({'type': 'settings'}):
raise FileExistsError
db[filename].insert_one({'type': 'settings'})
# db[filename].insert_one({'type': 'textpos'})
db[filename].insert_one({'type': 'imagelist'})
def clear_dtree(filename, recreate=False):
"""remove (all data from) a dtree/collection
"""
if not db[filename].find_one({'type': 'settings'}):
raise FileNotFoundError
db[filename].drop()
if recreate:
create_new_dtree(filename)
def read_dtree(filename, readable=False):
"""read and return all data from a dtree/collection
"""
if not readable:
return db[filename].find()
views, itemdict, textpos = [], {}, {}
for item in read_dtree(filename):
if item['type'] == 'settings':
opts = item['data']
elif item['type'] == 'view':
views.append(item['data'])
elif item['type'] == 'textitem':
itemdict[item['textid']] = item['data']
textpos[item['textid']] = item['textpos']
# imagelist = [] # db[filename].find_one({'type': 'imagelist'})['data']
return opts, views, itemdict, textpos # , imagelist
def rename_dtree(filename, newname):
"""change the dtree/collection's name if possible
"""
if db[newname].find_one({'type': 'settings'}) is not None:
raise FileExistsError('new_name_taken')
db[filename].rename(newname)
# ----------- deze routines komen uit main - ombouwen voor mongodb
def read_from_files(this_file, other_file=''):
"(try to) load the data"
filename = other_file or this_file
if not filename:
return ['no file name given']
# read/init/check settings if possible, otherwise cancel
opts = db[filename].find_one({'type': 'settings'})['data']
if opts.get('Application', '') != 'DocTree':
return ["{} is not a valid Doctree data file".format(str(filename))] # is dit een Path?
# read views
views_from_db = db[filename].find({'type': 'view'})
views = [x['data'] for x in sorted(views_from_db, key=lambda x: x['viewno'])]
# read itemdict
# read text positions
data_from_db = list(db[filename].find({'type': 'textitem'}))
itemdict = {x['textid']: x['data'] for x in data_from_db}
text_positions = {x['textid']: x['textpos'] for x in data_from_db}
# als ik geen datafile aanmaak wil ik eigenlijk ook geen zipfile hebben
# hoe dan wel de plaatjes opslaan?
# imagelist = [] # db[filename].find_one({'type': 'imagelist'})['data']
# if not other_file:
# # if possible, build a list of referred-to image files
# ## path = os.path.dirname((self.project_file))
# path = str(this_file.parent)
# try:
# with zpf.ZipFile(str(this_file.with_suffix('.zip'))) as f_in:
# f_in.extractall(path=path)
# imagelist = f_in.namelist()
# except FileNotFoundError:
# pass
return opts, views, itemdict, text_positions # , imagelist
def write_to_files(filename, opts, views, itemdict, textpositions, toolkit, extra_images=None,
backup=True, save_images=True):
"""settings en tree data in een structuur omzetten en opslaan
images contained are saved in a separate zipfile (not needed for wx)
"""
# nt_data = {0: opts, 1: views, 2: itemdict, 3: textpositions}
# zipfile = filename.with_suffix('.zip')
# if backup:
# try:
# shutil.copyfile(str(filename), str(filename) + ".bak")
# shutil.copyfile(str(zipfile), str(zipfile) + ".bak")
# except FileNotFoundError:
# pass
# with filename.open("wb") as f_out:
# pck.dump(nt_data, f_out, protocol=2)
# nt_data = {'settings': opts, 'views': views, 'docdata': itemdict, 'textpos': textpositions}
db[filename].update_one({'type': 'settings'}, {'$set': {'data': opts}})
for seq, view in enumerate(views):
print(seq, view)
result = db[filename].update_one({'type': 'view', 'viewno': seq},
{'$set': {'data': view}}, upsert=True)
print(result.raw_result)
# kan dit met updatemany? Nou zo in elk geval niet:
# db[filename].update_many({'type': 'view', 'viewno': seq}, {'$set': {'data': view}},
# upsert=True) for (seq, view) in enumerate(views)
for docid, doc in itemdict.items():
pos = textpositions[docid]
db[filename].update_one({'type': 'textitem', 'textid': docid},
{'$set': {'data': doc, 'textpos': pos}}, upsert=True)
# db[filename].update_many({'type': 'textitem', 'textid': docid},
# {'$set': {'data': doc, 'textpos': textpositions[docid]}},
# upsert = True) for (docid, doc) in itemdict.items()
# -- nog even zonder plaatjes
# db[filename].update_one({'type': 'imagelist'}, {'$set': {'data': []}})
# if not save_images:
# return
# if extra_images is None:
# # scan de itemdict af op image files en zet ze in een list
# imagelist = []
# for _, data in nt_data[2].values():
# names = [img['src'] for img in bs.BeautifulSoup(data, 'lxml').find_all('img')]
# imagelist.extend(names)
# ## fname = os.path.basename(filename)
# mode = "w"
# else:
# imagelist = extra_images
# mode = "a"
# # rebuild zipfile or add extra images to the zipfile
# # FIXME: als er niks veranderd is hoeft het zipfile ook niet aangemaakt te worden?
# # kun je daarvoor imagelist vergelijken met self.imagelist?
# path = filename.parent # eventueel eerst absoluut maken
# zipped = []
# with zpf.ZipFile(str(zipfile), mode) as _out:
# for name in imagelist:
# # if name.startswith(str(filename)):
# imagepath = path / name # TODO: kijken of dit nu dubbel voorgevoegd wordt
# if imagepath.exists():
# ## _out.write(os.path.join(path, name), arcname=os.path.basename(name))
# # _out.write(str(path / name), arcname=pathlib.Path(name).name)
# _out.write(str(imagepath), arcname=name)
# zipped.append(name)
# return zipped
# misschien werkt het afhandelen van images als ik het als volgt doe:
# - een plaatje is opgeslagen als dtree001.png en wordt ook zo geïdentificeerd in de html
# - maak er een bytestream van door het te openen met PIL -> io
# img = PIL.Image.open('dtree001.png')
# img_bytes = io.BytesIO()
# img.save(img_bytes, format='PNG') # dit lijkt een ingewikkelde methode om dit hierin te krijgen
# zou het niet gewoon kunnen met
# img_bytes = open('dtree001.png', 'rb')
# de manier om dit mongodb in te krijgen lijkt in elk geval
# db[filename].insert_one({'type': 'image', 'name': 'dtree001.png', data: img_bytes})
# of misschien moet je dit nog omzetten met bson.Binary(img_bytes)
|
the-stack_0_2938 | # -*- coding: utf-8 -*-
'''
pytestsalt.utils
~~~~~~~~~~~~~~~~
Some pytest fixtures used in pytest-salt
'''
# Import Python libs
from __future__ import absolute_import
import os
import re
import sys
import json
import time
import errno
import atexit
import signal
import socket
import logging
import subprocess
import threading
from operator import itemgetter
from collections import namedtuple
# Import 3rd party libs
import pytest
import psutil
try:
import setproctitle
HAS_SETPROCTITLE = True
except ImportError:
HAS_SETPROCTITLE = False
log = logging.getLogger(__name__)
if sys.platform.startswith('win'):
SIGINT = SIGTERM = signal.CTRL_BREAK_EVENT # pylint: disable=no-member
else:
SIGINT = signal.SIGINT
SIGTERM = signal.SIGTERM
def set_proc_title(title):
if HAS_SETPROCTITLE is False:
return
setproctitle.setproctitle('[{}] - {}'.format(title, setproctitle.getproctitle()))
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
usock.close()
return port
def collect_child_processes(pid):
'''
Try to collect any started child processes of the provided pid
'''
# Let's get the child processes of the started subprocess
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
except psutil.NoSuchProcess:
children = []
return children
def _terminate_process_list(process_list, kill=False, slow_stop=False):
for process in process_list[:]: # Iterate over copy of the list
if not psutil.pid_exists(process.pid):
process_list.remove(process)
continue
try:
if not kill and process.status() == psutil.STATUS_ZOMBIE:
# Zombie processes will exit once child processes also exit
continue
try:
cmdline = process.cmdline()
except psutil.AccessDenied:
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
cmdline = process.as_dict()
if kill:
log.info('Killing process(%s): %s', process.pid, cmdline)
process.kill()
else:
log.info('Terminating process(%s): %s', process.pid, cmdline)
try:
if slow_stop:
# Allow coverage data to be written down to disk
process.send_signal(SIGTERM)
try:
process.wait(2)
except psutil.TimeoutExpired:
if psutil.pid_exists(process.pid):
continue
else:
process.terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not psutil.pid_exists(process.pid):
process_list.remove(process)
except psutil.NoSuchProcess:
process_list.remove(process)
def terminate_process_list(process_list, kill=False, slow_stop=False):
def on_process_terminated(proc):
log.info('Process %s terminated with exit code: %s', getattr(proc, '_cmdline', proc), proc.returncode)
# Try to terminate processes with the provided kill and slow_stop parameters
log.info('Terminating process list. 1st step. kill: %s, slow stop: %s', kill, slow_stop)
# Cache the cmdline since that will be inaccessible once the process is terminated
for proc in process_list:
try:
cmdline = proc.cmdline()
except (psutil.NoSuchProcess, psutil.AccessDenied):
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
try:
cmdline = proc
except (psutil.NoSuchProcess, psutil.AccessDenied):
cmdline = '<could not be retrived; dead process: {}>'.format(proc)
proc._cmdline = cmdline
_terminate_process_list(process_list, kill=kill, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=15, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, retry and kill them if slow_stop is False
log.info('Terminating process list. 2nd step. kill: %s, slow stop: %s', slow_stop is False, slow_stop)
_terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=10, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, just kill them, no slow stopping now
log.info('Terminating process list. 3rd step. kill: True, slow stop: False')
_terminate_process_list(process_list, kill=True, slow_stop=False)
psutil.wait_procs(process_list, timeout=5, callback=on_process_terminated)
if process_list:
# In there's still processes to be terminated, log a warning about it
log.warning('Some processes failed to properly terminate: %s', process_list)
def terminate_process(pid=None, process=None, children=None, kill_children=False, slow_stop=False):
'''
Try to terminate/kill the started processe
'''
children = children or []
process_list = []
# Always kill children if kill the parent process.
kill_children = True if slow_stop is False else kill_children
if pid and not process:
try:
process = psutil.Process(pid)
process_list.append(process)
except psutil.NoSuchProcess:
# Process is already gone
process = None
if kill_children:
if process:
if not children:
children = collect_child_processes(process.pid)
else:
# Let's collect children again since there might be new ones
children.extend(collect_child_processes(pid))
if children:
process_list.extend(children)
if process_list:
if process:
log.info('Stopping process %s and respective children: %s', process, children)
else:
log.info('Terminating process list: %s', process_list)
terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
def start_daemon(request,
daemon_name=None,
daemon_id=None,
daemon_log_prefix=None,
daemon_cli_script_name=None,
daemon_config=None,
daemon_config_dir=None,
daemon_class=None,
bin_dir_path=None,
fail_hard=False,
start_timeout=10,
slow_stop=True,
environ=None,
cwd=None,
max_attempts=3,
**kwargs):
'''
Returns a running salt daemon
'''
if fail_hard:
fail_method = pytest.fail
else:
fail_method = pytest.xfail
log.info('[%s] Starting pytest %s(%s)', daemon_name, daemon_log_prefix, daemon_id)
attempts = 0
process = None
while attempts <= max_attempts: # pylint: disable=too-many-nested-blocks
attempts += 1
process = daemon_class(request,
daemon_config,
daemon_config_dir,
bin_dir_path,
daemon_log_prefix,
cli_script_name=daemon_cli_script_name,
slow_stop=slow_stop,
environ=environ,
cwd=cwd,
**kwargs)
process.start()
if process.is_alive():
try:
connectable = process.wait_until_running(timeout=start_timeout)
if connectable is False:
connectable = process.wait_until_running(timeout=start_timeout/2)
if connectable is False:
process.terminate()
if attempts >= max_attempts:
fail_method(
'The pytest {}({}) has failed to confirm running status '
'after {} attempts'.format(daemon_name, daemon_id, attempts))
continue
except Exception as exc: # pylint: disable=broad-except
log.exception('[%s] %s', daemon_log_prefix, exc, exc_info=True)
terminate_process(process.pid, kill_children=True, slow_stop=slow_stop)
if attempts >= max_attempts:
fail_method(str(exc))
continue
log.info(
'[%s] The pytest %s(%s) is running and accepting commands '
'after %d attempts',
daemon_log_prefix,
daemon_name,
daemon_id,
attempts
)
def stop_daemon():
log.info('[%s] Stopping pytest %s(%s)', daemon_log_prefix, daemon_name, daemon_id)
terminate_process(process.pid, kill_children=True, slow_stop=slow_stop)
log.info('[%s] pytest %s(%s) stopped', daemon_log_prefix, daemon_name, daemon_id)
request.addfinalizer(stop_daemon)
return process
else:
terminate_process(process.pid, kill_children=True, slow_stop=slow_stop)
continue
else: # pylint: disable=useless-else-on-loop
# Wrong, we have a return, its not useless
if process is not None:
terminate_process(process.pid, kill_children=True, slow_stop=slow_stop)
fail_method(
'The pytest {}({}) has failed to start after {} attempts'.format(
daemon_name,
daemon_id,
attempts-1
)
)
class SaltScriptBase(object):
'''
Base class for Salt CLI scripts
'''
cli_display_name = None
def __init__(self,
request,
config,
config_dir,
bin_dir_path,
log_prefix,
cli_script_name=None,
slow_stop=False,
environ=None,
cwd=None):
self.request = request
self.config = config
if not isinstance(config_dir, str):
config_dir = config_dir.realpath().strpath
self.config_dir = config_dir
self.bin_dir_path = bin_dir_path
self.log_prefix = log_prefix
if cli_script_name is None:
raise RuntimeError('Please provide a value for the cli_script_name keyword argument')
self.cli_script_name = cli_script_name
if self.cli_display_name is None:
self.cli_display_name = '{}({})'.format(self.__class__.__name__,
self.cli_script_name)
self.slow_stop = slow_stop
self.environ = environ or os.environ.copy()
self.cwd = cwd or os.getcwd()
self._terminal = self._children = None
def get_script_path(self, script_name):
'''
Returns the path to the script to run
'''
script_path = os.path.join(self.bin_dir_path, script_name)
if not os.path.exists(script_path):
pytest.fail('The CLI script {!r} does not exist'.format(script_path))
return script_path
def get_base_script_args(self):
'''
Returns any additional arguments to pass to the CLI script
'''
return ['-c', self.config_dir]
def get_script_args(self): # pylint: disable=no-self-use
'''
Returns any additional arguments to pass to the CLI script
'''
return []
def init_terminal(self, cmdline, **kwargs):
'''
Instantiate a terminal with the passed cmdline and kwargs and return it.
Additionaly, it sets a reference to it in self._terminal and also collects
an initial listing of child processes which will be used when terminating the
terminal
'''
# Late import
import salt.utils.nb_popen as nb_popen
self._terminal = nb_popen.NonBlockingPopen(cmdline, **kwargs)
self._children = collect_child_processes(self._terminal.pid)
atexit.register(self.terminate)
return self._terminal
def terminate(self):
'''
Terminate the started daemon
'''
if self._terminal is None:
return
# Lets log and kill any child processes which salt left behind
if self._terminal.stdout:
self._terminal.stdout.close()
if self._terminal.stderr:
self._terminal.stderr.close()
terminate_process(pid=self._terminal.pid,
children=self._children,
kill_children=True,
slow_stop=self.slow_stop)
class SaltDaemonScriptBase(SaltScriptBase):
'''
Base class for Salt Daemon CLI scripts
'''
def __init__(self, *args, **kwargs):
self._process_cli_output_in_thread = kwargs.pop('process_cli_output_in_thread', True)
event_listener_config_dir = kwargs.pop('event_listener_config_dir', None)
if event_listener_config_dir and not isinstance(event_listener_config_dir, str):
event_listener_config_dir = event_listener_config_dir.realpath().strpath
self.event_listener_config_dir = event_listener_config_dir
super(SaltDaemonScriptBase, self).__init__(*args, **kwargs)
self._running = threading.Event()
self._connectable = threading.Event()
def is_alive(self):
'''
Returns true if the process is alive
'''
return self._running.is_set()
def get_check_ports(self): # pylint: disable=no-self-use
'''
Return a list of ports to check against to ensure the daemon is running
'''
return []
def get_check_events(self): # pylint: disable=no-self-use
'''
Return a list of event tags to check against to ensure the daemon is running
'''
return []
def get_salt_run_fixture(self):
if self.request.scope == 'session':
try:
return self.request.getfixturevalue('session_salt_run')
except AttributeError:
return self.request.getfuncargvalue('session_salt_run')
try:
return self.request.getfixturevalue('salt_run')
except AttributeError:
return self.request.getfuncargvalue('salt_run')
def start(self):
'''
Start the daemon subprocess
'''
# Late import
log.info('[%s][%s] Starting DAEMON in CWD: %s', self.log_prefix, self.cli_display_name, self.cwd)
proc_args = [
self.get_script_path(self.cli_script_name)
] + self.get_base_script_args() + self.get_script_args()
if sys.platform.startswith('win'):
# Windows needs the python executable to come first
proc_args.insert(0, sys.executable)
log.info('[%s][%s] Running \'%s\'...', self.log_prefix, self.cli_display_name, ' '.join(proc_args))
self.init_terminal(proc_args, env=self.environ, cwd=self.cwd)
self._running.set()
if self._process_cli_output_in_thread:
process_output_thread = threading.Thread(target=self._process_output_in_thread)
process_output_thread.daemon = True
process_output_thread.start()
return True
def _process_output_in_thread(self):
'''
The actual, coroutine aware, start method
'''
try:
while self._running.is_set() and self._terminal.poll() is None:
# We're not actually interested in processing the output, just consume it
if self._terminal.stdout is not None:
self._terminal.recv()
if self._terminal.stderr is not None:
self._terminal.recv_err()
time.sleep(0.125)
if self._terminal.poll() is not None:
self._running.clear()
except (SystemExit, KeyboardInterrupt):
self._running.clear()
finally:
if self._terminal.stdout:
self._terminal.stdout.close()
if self._terminal.stderr:
self._terminal.stderr.close()
@property
def pid(self):
terminal = getattr(self, '_terminal', None)
if not terminal:
return
return terminal.pid
def terminate(self):
'''
Terminate the started daemon
'''
# Let's get the child processes of the started subprocess
self._running.clear()
self._connectable.clear()
time.sleep(0.0125)
super(SaltDaemonScriptBase, self).terminate()
def wait_until_running(self, timeout=None):
'''
Blocking call to wait for the daemon to start listening
'''
# Late import
import salt.ext.six as six
if self._connectable.is_set():
return True
expire = time.time() + timeout
check_ports = self.get_check_ports()
if check_ports:
log.debug(
'[%s][%s] Checking the following ports to assure running status: %s',
self.log_prefix,
self.cli_display_name,
check_ports
)
check_events = self.get_check_events()
if check_events:
log.debug(
'[%s][%s] Checking the following event tags to assure running status: %s',
self.log_prefix,
self.cli_display_name,
check_events
)
log.debug('Wait until running expire: %s Timeout: %s Current Time: %s', expire, timeout, time.time())
event_listener = EventListener(
self.event_listener_config_dir or self.config_dir,
self.log_prefix
)
try:
while True:
if self._running.is_set() is False:
# No longer running, break
log.warning('No longer running!')
break
if time.time() > expire:
# Timeout, break
log.debug('Expired at %s(was set to %s)', time.time(), expire)
break
if not check_ports and not check_events:
self._connectable.set()
break
if check_events:
for tag in event_listener.wait_for_events(check_events, timeout=timeout - 0.5):
check_events.remove(tag)
if not check_events:
stop_sending_events_file = self.config.get('pytest_stop_sending_events_file')
if stop_sending_events_file and os.path.exists(stop_sending_events_file):
log.warning('Removing pytest_stop_sending_events_file: %s', stop_sending_events_file)
os.unlink(stop_sending_events_file)
for port in set(check_ports):
if isinstance(port, int):
log.debug('[%s][%s] Checking connectable status on port: %s',
self.log_prefix,
self.cli_display_name,
port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn = sock.connect_ex(('localhost', port))
try:
if conn == 0:
log.debug('[%s][%s] Port %s is connectable!',
self.log_prefix,
self.cli_display_name,
port)
check_ports.remove(port)
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
continue
finally:
sock.close()
del sock
elif isinstance(port, six.string_types):
salt_run = self.get_salt_run_fixture()
minions_joined = salt_run.run('manage.joined')
if minions_joined.exitcode == 0:
if minions_joined.json and port in minions_joined.json:
check_ports.remove(port)
log.warning('Removed ID %r Still left: %r', port, check_ports)
elif minions_joined.json is None:
log.debug('salt-run manage.join did not return any valid JSON: %s', minions_joined)
time.sleep(0.5)
except KeyboardInterrupt:
return self._connectable.is_set()
finally:
event_listener.terminate()
if self._connectable.is_set():
log.debug('[%s][%s] All ports checked. Running!', self.log_prefix, self.cli_display_name)
return self._connectable.is_set()
class ShellResult(namedtuple('Result', ('exitcode', 'stdout', 'stderr', 'json'))):
'''
This class serves the purpose of having a common result class which will hold the
data from the bigret backend(despite the backend being used).
This will allow filtering by access permissions and/or object ownership.
'''
__slots__ = ()
def __new__(cls, exitcode, stdout, stderr, json):
return super(ShellResult, cls).__new__(cls, exitcode, stdout, stderr, json)
# These are copied from the namedtuple verbose output in order to quiet down PyLint
exitcode = property(itemgetter(0), doc='Alias for field number 0')
stdout = property(itemgetter(1), doc='Alias for field number 1')
stderr = property(itemgetter(2), doc='Alias for field number 2')
json = property(itemgetter(3), doc='Alias for field number 3')
def __eq__(self, other):
'''
Allow comparison against the parsed JSON or the output
'''
if self.json:
return self.json == other
return self.stdout == other
class SaltCliScriptBase(SaltScriptBase):
'''
Base class which runs Salt's non daemon CLI scripts
'''
DEFAULT_TIMEOUT = 25
def __init__(self, *args, **kwargs):
self.default_timeout = kwargs.pop('default_timeout', self.DEFAULT_TIMEOUT)
super(SaltCliScriptBase, self).__init__(*args, **kwargs)
def get_base_script_args(self):
return SaltScriptBase.get_base_script_args(self) + ['--out=json']
def get_minion_tgt(self, **kwargs):
return kwargs.pop('minion_tgt', None)
def run(self, *args, **kwargs):
'''
Run the given command synchronously
'''
# Late import
import salt.ext.six as six
timeout = kwargs.get('timeout', self.default_timeout)
if 'fail_hard' in kwargs:
# Explicit fail_hard passed
fail_hard = kwargs.pop('fail_hard')
else:
# Get the value of the _salt_fail_hard fixture
try:
fail_hard = self.request.getfixturevalue('_salt_fail_hard')
except AttributeError:
fail_hard = self.request.getfuncargvalue('_salt_fail_hard')
if fail_hard is True:
fail_method = pytest.fail
else:
fail_method = pytest.xfail
log.info('The fail hard setting for %s is: %s', self.cli_script_name, fail_hard)
minion_tgt = self.get_minion_tgt(**kwargs)
timeout_expire = time.time() + kwargs.pop('timeout', self.default_timeout)
environ = self.environ.copy()
environ['PYTEST_LOG_PREFIX'] = '[{}] '.format(self.log_prefix)
environ['PYTHONUNBUFFERED'] = '1'
proc_args = [
self.get_script_path(self.cli_script_name)
] + self.get_base_script_args() + self.get_script_args()
if sys.platform.startswith('win'):
# Windows needs the python executable to come first
proc_args.insert(0, sys.executable)
if minion_tgt is not None:
proc_args.append(minion_tgt)
proc_args.extend(list(args))
for key in kwargs:
proc_args.append('{}={}'.format(key, kwargs[key]))
log.info('[%s][%s] Running \'%s\' in CWD: %s ...',
self.log_prefix, self.cli_display_name, ' '.join(proc_args), self.cwd)
terminal = self.init_terminal(proc_args,
cwd=self.cwd,
env=environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Consume the output
stdout = six.b('')
stderr = six.b('')
try:
while True:
# We're not actually interested in processing the output, just consume it
if terminal.stdout is not None:
try:
out = terminal.recv(4096)
except IOError:
out = six.b('')
if out:
stdout += out
if terminal.stderr is not None:
try:
err = terminal.recv_err(4096)
except IOError:
err = ''
if err:
stderr += err
if out is None and err is None:
break
if timeout_expire < time.time():
self.terminate()
fail_method(
'[{}][{}] Failed to run: args: {!r}; kwargs: {!r}; Error: {}'.format(
self.log_prefix,
self.cli_display_name,
args,
kwargs,
'[{}][{}] Timed out after {} seconds!'.format(self.log_prefix,
self.cli_display_name,
timeout)
)
)
except (SystemExit, KeyboardInterrupt):
pass
finally:
self.terminate()
if six.PY3:
# pylint: disable=undefined-variable
stdout = stdout.decode(__salt_system_encoding__)
stderr = stderr.decode(__salt_system_encoding__)
# pylint: enable=undefined-variable
exitcode = terminal.returncode
stdout, stderr, json_out = self.process_output(minion_tgt, stdout, stderr, cli_cmd=proc_args)
return ShellResult(exitcode, stdout, stderr, json_out)
def process_output(self, tgt, stdout, stderr, cli_cmd=None):
if stdout:
try:
json_out = json.loads(stdout)
except ValueError:
log.debug('[%s][%s] Failed to load JSON from the following output:\n%r',
self.log_prefix,
self.cli_display_name,
stdout)
json_out = None
else:
json_out = None
return stdout, stderr, json_out
class SaltRunEventListener(SaltCliScriptBase):
'''
Class which runs 'salt-run state.event *' to match agaist a provided set of event tags
'''
EVENT_MATCH_RE = re.compile(r'^(?P<tag>[\w/-]+)(?:[\s]+)(?P<data>[\S\W]+)$')
def get_base_script_args(self):
return SaltScriptBase.get_base_script_args(self)
def get_script_args(self): # pylint: disable=no-self-use
'''
Returns any additional arguments to pass to the CLI script
'''
return ['state.event']
def run(self, tags=(), timeout=10): # pylint: disable=arguments-differ
'''
Run the given command synchronously
'''
log.info('%s checking for tags: %s', self.__class__.__name__, tags)
# Late import
import salt.ext.six as six
exitcode = 0
timeout_expire = time.time() + timeout
environ = self.environ.copy()
environ['PYTEST_LOG_PREFIX'] = '{}[EventListen]'.format(self.log_prefix)
environ['PYTHONUNBUFFERED'] = '1'
proc_args = [
self.get_script_path(self.cli_script_name)
] + self.get_base_script_args() + self.get_script_args()
if sys.platform.startswith('win'):
# Windows needs the python executable to come first
proc_args.insert(0, sys.executable)
log.info('[%s][%s] Running \'%s\' in CWD: %s...',
self.log_prefix, self.cli_display_name, ' '.join(proc_args), self.cwd)
to_match_events = set(tags)
matched_events = {}
terminal = self.init_terminal(proc_args,
cwd=self.cwd,
env=environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Consume the output
stdout = six.b('')
stderr = six.b('')
process_output = six.b('')
events_processed = 0
try:
while True:
time.sleep(0.5)
if terminal.stdout is not None:
try:
out = terminal.recv(4096)
except IOError:
out = six.b('')
if out:
stdout += out
process_output += out
if terminal.stderr is not None:
try:
err = terminal.recv_err(4096)
except IOError:
err = ''
if err:
stderr += err
if out is None and err is None:
if to_match_events:
exitcode = 1
log.warning('[%s][%s] Premature exit?! Failed to find all of the required event tags. '
'Total events processed: %s',
self.log_prefix,
self.cli_display_name,
events_processed)
break
if process_output:
lines = process_output.split(b'}\n')
if lines[-1] != b'':
process_output = lines.pop()
else:
process_output = six.b('')
lines.pop()
for line in lines:
match = self.EVENT_MATCH_RE.match(line.decode(__salt_system_encoding__)) # pylint: disable=undefined-variable
if match:
events_processed += 1
tag, data = match.groups()
if tag in to_match_events:
matched_events[tag] = json.loads(data + '}')
to_match_events.remove(tag)
log.info('[%s][%s] Events processed so far: %d',
self.log_prefix,
self.cli_display_name,
events_processed)
if not to_match_events:
log.debug('[%s][%s] ALL EVENT TAGS FOUND!!!', self.log_prefix, self.cli_display_name)
break
if timeout_expire < time.time():
log.warning('[%s][%s] Failed to find all of the required event tags. Total events processed: %s',
self.log_prefix,
self.cli_display_name,
events_processed)
exitcode = 1
break
except (SystemExit, KeyboardInterrupt):
pass
finally:
self.terminate()
if six.PY3:
# pylint: disable=undefined-variable
stdout = stdout.decode(__salt_system_encoding__)
stderr = stderr.decode(__salt_system_encoding__)
# pylint: enable=undefined-variable
if to_match_events:
stop_sending_events_file = self.config.get('pytest_stop_sending_events_file')
if stop_sending_events_file and os.path.exists(stop_sending_events_file):
log.warning('Removing pytest_stop_sending_events_file: %s', stop_sending_events_file)
os.unlink(stop_sending_events_file)
json_out = {
'matched': matched_events,
'unmatched': to_match_events
}
return ShellResult(exitcode, stdout, stderr, json_out)
class EventListener:
DEFAULT_TIMEOUT = 60
def __init__(self, config_dir, log_prefix):
# Late import
self.config_dir = config_dir
self.log_prefix = '[{}][PyTestEventListener]'.format(log_prefix)
self._listener = None
def wait_for_events(self, check_events, timeout=None):
if timeout is None:
timeout = self.DEFAULT_TIMEOUT
log.info('%s waiting %s seconds for events: %s',
self.log_prefix,
timeout,
check_events)
matched_events = set()
events_to_match = set(check_events)
events_processed = 0
max_timeout = time.time() + timeout
while True:
if not events_to_match:
log.info('%s ALL EVENT TAGS FOUND!!!', self.log_prefix)
return matched_events
if time.time() > max_timeout:
log.warning(
'%s Failed to find all of the required event tags. '
'Total events processed: %s',
self.log_prefix,
events_processed
)
return matched_events
event = self.listener.get_event(full=True, auto_reconnect=True)
if event is None:
continue
tag = event['tag']
log.warning('Got event: %s', event)
if tag in events_to_match:
matched_events.add(tag)
events_to_match.remove(tag)
events_processed += 1
log.info('%s Events processed so far: %d',
self.log_prefix,
events_processed)
def terminate(self):
listener = self.listener
self._listener = None
listener.destroy()
@property
def listener(self):
if self._listener is None:
# Late import
import salt.config
import salt.utils.event
opts = salt.config.master_config(os.path.join(self.config_dir, 'master'))
self._listener = salt.utils.event.get_event('master', opts=opts, listen=True)
return self._listener
@pytest.mark.trylast
def pytest_configure(config):
pytest.helpers.utils.register(get_unused_localhost_port)
|
the-stack_0_2941 | """Config flow to configure the Synology DSM integration."""
from __future__ import annotations
import logging
from typing import Any
from urllib.parse import urlparse
from synology_dsm import SynologyDSM
from synology_dsm.exceptions import (
SynologyDSMException,
SynologyDSMLogin2SAFailedException,
SynologyDSMLogin2SARequiredException,
SynologyDSMLoginInvalidException,
SynologyDSMRequestException,
)
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.components import ssdp
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import (
CONF_DISKS,
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import DiscoveryInfoType
from .const import (
CONF_DEVICE_TOKEN,
CONF_VOLUMES,
DEFAULT_PORT,
DEFAULT_PORT_SSL,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TIMEOUT,
DEFAULT_USE_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
CONF_OTP_CODE = "otp_code"
def _discovery_schema_with_defaults(discovery_info: DiscoveryInfoType) -> vol.Schema:
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input: dict[str, Any]) -> vol.Schema:
user_schema = {
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, "")): str,
}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(
schema_input: dict[str, Any]
) -> dict[vol.Required | vol.Optional, Any]:
return {
vol.Required(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
vol.Optional(CONF_PORT, default=schema_input.get(CONF_PORT, "")): str,
vol.Optional(
CONF_SSL, default=schema_input.get(CONF_SSL, DEFAULT_USE_SSL)
): bool,
vol.Optional(
CONF_VERIFY_SSL,
default=schema_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
}
class SynologyDSMFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> SynologyDSMOptionsFlowHandler:
"""Get the options flow for this handler."""
return SynologyDSMOptionsFlowHandler(config_entry)
def __init__(self) -> None:
"""Initialize the synology_dsm config flow."""
self.saved_user_input: dict[str, Any] = {}
self.discovered_conf: dict[str, Any] = {}
async def _show_setup_form(
self,
user_input: dict[str, Any] | None = None,
errors: dict[str, str] | None = None,
) -> FlowResult:
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered_conf:
user_input.update(self.discovered_conf)
step_id = "link"
data_schema = _discovery_schema_with_defaults(user_input)
else:
step_id = "user"
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id=step_id,
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.discovered_conf or {},
)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form(user_input, None)
if self.discovered_conf:
user_input.update(self.discovered_conf)
host = user_input[CONF_HOST]
port = user_input.get(CONF_PORT)
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
use_ssl = user_input.get(CONF_SSL, DEFAULT_USE_SSL)
verify_ssl = user_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
otp_code = user_input.get(CONF_OTP_CODE)
if not port:
if use_ssl is True:
port = DEFAULT_PORT_SSL
else:
port = DEFAULT_PORT
api = SynologyDSM(
host, port, username, password, use_ssl, verify_ssl, timeout=30
)
try:
serial = await self.hass.async_add_executor_job(
_login_and_fetch_syno_info, api, otp_code
)
except SynologyDSMLogin2SARequiredException:
return await self.async_step_2sa(user_input)
except SynologyDSMLogin2SAFailedException:
errors[CONF_OTP_CODE] = "otp_failed"
user_input[CONF_OTP_CODE] = None
return await self.async_step_2sa(user_input, errors)
except SynologyDSMLoginInvalidException as ex:
_LOGGER.error(ex)
errors[CONF_USERNAME] = "invalid_auth"
except SynologyDSMRequestException as ex:
_LOGGER.error(ex)
errors[CONF_HOST] = "cannot_connect"
except SynologyDSMException as ex:
_LOGGER.error(ex)
errors["base"] = "unknown"
except InvalidData:
errors["base"] = "missing_data"
if errors:
return await self._show_setup_form(user_input, errors)
# unique_id should be serial for services purpose
await self.async_set_unique_id(serial, raise_on_progress=False)
# Check if already configured
self._abort_if_unique_id_configured()
config_data = {
CONF_HOST: host,
CONF_PORT: port,
CONF_SSL: use_ssl,
CONF_VERIFY_SSL: verify_ssl,
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_MAC: api.network.macs,
}
if otp_code:
config_data[CONF_DEVICE_TOKEN] = api.device_token
if user_input.get(CONF_DISKS):
config_data[CONF_DISKS] = user_input[CONF_DISKS]
if user_input.get(CONF_VOLUMES):
config_data[CONF_VOLUMES] = user_input[CONF_VOLUMES]
return self.async_create_entry(title=host, data=config_data)
async def async_step_ssdp(self, discovery_info: DiscoveryInfoType) -> FlowResult:
"""Handle a discovered synology_dsm."""
parsed_url = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION])
friendly_name = (
discovery_info[ssdp.ATTR_UPNP_FRIENDLY_NAME].split("(", 1)[0].strip()
)
mac = discovery_info[ssdp.ATTR_UPNP_SERIAL].upper()
# Synology NAS can broadcast on multiple IP addresses, since they can be connected to multiple ethernets.
# The serial of the NAS is actually its MAC address.
if self._mac_already_configured(mac):
return self.async_abort(reason="already_configured")
await self.async_set_unique_id(mac)
self._abort_if_unique_id_configured()
self.discovered_conf = {
CONF_NAME: friendly_name,
CONF_HOST: parsed_url.hostname,
}
self.context["title_placeholders"] = self.discovered_conf
return await self.async_step_user()
async def async_step_import(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Import a config entry."""
return await self.async_step_user(user_input)
async def async_step_link(self, user_input: dict[str, Any]) -> FlowResult:
"""Link a config entry from discovery."""
return await self.async_step_user(user_input)
async def async_step_2sa(
self, user_input: dict[str, Any], errors: dict[str, str] | None = None
) -> FlowResult:
"""Enter 2SA code to anthenticate."""
if not self.saved_user_input:
self.saved_user_input = user_input
if not user_input.get(CONF_OTP_CODE):
return self.async_show_form(
step_id="2sa",
data_schema=vol.Schema({vol.Required(CONF_OTP_CODE): str}),
errors=errors or {},
)
user_input = {**self.saved_user_input, **user_input}
self.saved_user_input = {}
return await self.async_step_user(user_input)
def _mac_already_configured(self, mac: str) -> bool:
"""See if we already have configured a NAS with this MAC address."""
existing_macs = [
mac.replace("-", "")
for entry in self._async_current_entries()
for mac in entry.data.get(CONF_MAC, [])
]
return mac in existing_macs
class SynologyDSMOptionsFlowHandler(OptionsFlow):
"""Handle a option flow."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): cv.positive_int,
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(
CONF_TIMEOUT, DEFAULT_TIMEOUT
),
): cv.positive_int,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
def _login_and_fetch_syno_info(api: SynologyDSM, otp_code: str) -> str:
"""Login to the NAS and fetch basic data."""
# These do i/o
api.login(otp_code)
api.utilisation.update()
api.storage.update()
api.network.update()
if (
not api.information.serial
or api.utilisation.cpu_user_load is None
or not api.storage.volumes_ids
or not api.network.macs
):
raise InvalidData
return api.information.serial # type: ignore[no-any-return]
class InvalidData(exceptions.HomeAssistantError):
"""Error to indicate we get invalid data from the nas."""
|
the-stack_0_2942 | import pandas as pd
import sklearn.model_selection as ms
class CrossValidation:
def __init__(self, df, shuffle,random_state=None):
self.df = df
self.random_state = random_state
self.shuffle = shuffle
if shuffle is True:
self.df = df.sample(frac=1,
random_state=self.random_state).reset_index(drop=True)
def hold_out_split(self,percent,stratify=None):
if stratify is not None:
y = self.df[stratify]
train,val = ms.train_test_split(self.df, test_size=percent/100,
stratify=y, random_state=self.random_state)
return train,val
size = len(self.df) - int(len(self.df)*(percent/100))
train = self.df.iloc[:size,:]
val = self.df.iloc[size:,:]
return train,val
def kfold_split(self, splits, stratify=None):
if stratify is not None:
kf = ms.StratifiedKFold(n_splits=splits,
random_state=self.random_state)
y = self.df[stratify]
for train, val in kf.split(X=self.df,y=y):
t = self.df.iloc[train,:]
v = self.df.iloc[val, :]
yield t,v
else:
kf = ms.KFold(n_splits=splits, shuffle=self.shuffle,
random_state=self.random_state)
for train, val in kf.split(X=self.df):
t = self.df.iloc[train,:]
v = self.df.iloc[val, :]
yield t,v |
the-stack_0_2943 | # -*- coding: utf-8 -*-
"""
Image Augmentation: Make it rain, make it snow. How to modify photos to train self-driving cars
by Ujjwal Saxena
https://medium.freecodecamp.org/image-augmentation-make-it-rain-make-it-snow-how-to-modify-a-photo-with-machine-learning-163c0cb3843f
"""
import numpy as np
import cv2
#
# Sunny and Shady
#
def add_brightness(img):
"""
The brightness of an image can be changed by changing the pixel values of the
“Lightness” channel [1] of the image in HLS color space. Converting the image
back to RGB gives the same image with enhanced or suppressed lighting.
"""
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_HLS = np.array(img_HLS, dtype=np.float64)
# Generate a random value in [0.5, 1.5].
random_brightness_coefficient = np.random.uniform() + 0.5
# Scale pixel values up or down for channel 1 (Lightness)
img_HLS[:, :, 1] = img_HLS[:, :, 1] * random_brightness_coefficient
# Make sure the color value does not exceed 255.
img_HLS[:, :, 1][img_HLS[:, :, 1] > 255] = 255
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
#
# Shadows
#
def add_shadow(img, nshadows=1):
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# Add shadows to an initially empty mask image.
mask = np.zeros_like(img)
# Generate a list of shadow polygons.
shadow_list = generate_shadow_coordinates(img.shape, nshadows)
# Add all shadow polygons to the empty mask; single 255 denotes only red channel.
for shadow in shadow_list:
cv2.fillPoly(mask, shadow, 255)
# Use the mask to adjust pixels in the original image.
# If red channel is hot, the image "Lightness" channel's brightness is lowered.
img_HLS[:, :, 1][mask[:, :, 0] == 255] = img_HLS[:, :, 1][mask[:, :, 0] == 255] * 0.5
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
def generate_shadow_coordinates(imshape, nshadows=1):
shadow_list = []
for _ in range(nshadows):
shadow = []
# Dimensionality of the shadow polygon.
for _ in range(np.random.randint(3, 15)):
shadow.append((imshape[1] * np.random.uniform(), imshape[0] // 3 + imshape[0] * np.random.uniform()))
# Add vertices for a single shadow polygon.
shadow = np.array([shadow], dtype=np.int32)
shadow_list.append(shadow)
# List of shadow vertices.
return shadow_list
#
# Snow
#
def add_snow(img, snow_brightness=2.5, snow_point=140):
"""
Brighten the darkest parts of the image.
Increase 'snow_point' for more snow.
"""
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_HLS = np.array(img_HLS, dtype=np.float64)
# Scale pixel values up for channel 1 (Lightness)
img_HLS[:, :, 1][img_HLS[:, :, 1] < snow_point] *= snow_brightness
# Make sure the color value does not exceed 255.
img_HLS[:, :, 1][img_HLS[:, :, 1] > 255] = 255
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
#
# Rain
#
def add_rain(img):
# Generate rain drops as lines.
slant_extreme = 10
slant = np.random.randint(-slant_extreme, slant_extreme)
drop_length = 10
drop_width = 2
drop_color = (200, 200, 200) # a shade of gray
rain_drops = generate_random_lines(img.shape, slant, drop_length)
# Add rain drops to the image.
for drop in rain_drops:
cv2.line(img, (drop[0], drop[1]),
(drop[0] + slant, drop[1] + drop_length),
drop_color, drop_width)
img = cv2.blur(img, (7, 7)) # Rainy views are blurry.
# Darken the image a bit - rainy days are usually shady.
brightness_coefficient = 0.7
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# Scale pixel values down for channel 1 (Lightness)
img_HLS[:, :, 1] = img_HLS[:, :, 1] * brightness_coefficient
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
def generate_random_lines(imshape, slant, drop_length, ndrops=500):
""" For heavy rain, try increasing 'ndrops'. """
drops = []
for _ in range(ndrops):
x = np.random.randint(slant, imshape[1]) if slant < 0 else \
np.random.randint(0, imshape[1] - slant)
y = np.random.randint(0, imshape[0] - drop_length)
drops.append((x, y))
return drops
#
# Fog
#
def add_fog(img, hw=100):
"""
Fog intensity is an important parameter to train a car for how much throttle it should give.
For coding such a function, you can take random patches from all over the image, and increase
the image’s lightness within those patches. With a simple blur, this gives a nice hazy effect.
"""
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_HLS[:, :, 1] = img_HLS[:, :, 1] * 0.8
haze_list = generate_random_blur_coordinates(img.shape, hw)
for haze_points in haze_list:
# # Make sure the color value does not exceed 255.
# img_HLS[:, :, 1][img_HLS[:, :, 1] > 255] = 255
img_HLS = add_blur(img_HLS, haze_points[0], haze_points[1], hw)
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
def generate_random_blur_coordinates(imshape, hw):
blur_points = []
midx = imshape[1] // 2 - hw - 100
midy = imshape[0] // 2 - hw - 100
# radially generating coordinates
index = 1
while (midx > -100 or midy > -100):
for _ in range(250 * index):
x = np.random.randint(midx, imshape[1] - midx - hw)
y = np.random.randint(midy, imshape[0] - midy - hw)
blur_points.append((x, y))
midx -= 250 * imshape[1] // sum(imshape)
midy -= 250 * imshape[0] // sum(imshape)
index += 1
return blur_points
# def add_blur(img, x, y, hw):
# # Increase 'L' channel by 1.
# img[y:y + hw, x:x + hw, 1] = img[y:y + hw, x:x + hw, 1] + 1
# # Make sure the adjusted value does not exceed 255.
# img[:, :, 1][img[:, :, 1] > 255] = 255
# img = np.array(img, dtype=np.uint8)
# # Blur
# img[y:y + hw, x:x + hw, 1] = cv2.blur(img[y:y + hw, x:x + hw, 1], (5, 5))
# return img
def add_blur(img, x, y, hw):
# Create a grid of wrapped indices since numpy arrays do not handle
# slicing with negative values and wrap-around without some help.
wrappedRowIndices = np.arange(y, y + hw) % img.shape[0]
wrappedColIndices = np.arange(x, x + hw) % img.shape[1]
index_grid = np.ix_(wrappedRowIndices, wrappedColIndices, [1])
# Increase 'L' channel by 1.
img[index_grid] = img[index_grid] + 1
# Make sure the adjusted value does not exceed 255.
img[:, :, 1][img[:, :, 1] > 255] = 255
img = np.array(img, dtype=np.uint8)
# Blur
blur_patch = cv2.blur(img[index_grid], (5, 5)).reshape(hw, hw, 1)
img[index_grid] = blur_patch
return img
|
the-stack_0_2945 | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import misc
import argparse
import os
import model
def train(args):
if args.dataset.lower() == 'celeba':
train_loader, _, _ = misc.load_celebA(args.batch_s, args.img_s)
img_c = 3
elif args.dataset.lower() == 'lsun':
train_loader, val_loader, _ = misc.load_LSUN(args.batch_s, args.img_s)
img_c = 3
elif args.dataset.lower() == 'imagenet':
train_loader, val_loader, _ = misc.load_imagenet(args.batch_s, args.img_s)
img_c = 3
elif args.dataset.lower() == 'mnist':
train_loader, val_loader, _ = misc.load_mnist(args.batch_s, args.img_s)
img_c = 1
else:
raise NotImplementedError
fm_gen = [args.base_fm_n*pow(2,args.layer_n-1-l) for l in range(args.layer_n)]
fm_disc = [args.base_fm_n*pow(2,l) for l in range(args.layer_n)]
gen = model.Generator(args.z_dim, img_c, fm_gen).cuda()
gen.apply(model.init_weights)
disc = model.Discriminator(img_c, fm_disc).cuda()
disc.apply(model.init_weights)
criterion = nn.BCELoss()
label_real = 1
label_fake = 0
optim_gen = optim.Adam(gen.parameters(), lr=args.learning_rate, betas=(args.beta1, 0.999))
optim_disc = optim.Adam(disc.parameters(), lr=args.learning_rate, betas=(args.beta1, 0.999))
if args.resume:
filename = args.ckpt_dir + args.resume
if os.path.isfile(filename):
print("==> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch'] + 1
gen.load_state_dict(checkpoint['state_dict_gen'])
disc.load_state_dict(checkpoint['state_dict_disc'])
optim_gen.load_state_dict(checkpoint['optimizer_gen'])
optim_disc.load_state_dict(checkpoint['optimizer_disc'])
print("==> loaded checkpoint '{}' (epoch {})".format(filename, checkpoint['epoch']))
else:
print("==> no checkpoint found at '{}'".format(filename))
else:
start_epoch = 0
if not os.path.isdir(args.img_dir):
os.system('mkdir ' + args.img_dir)
if not os.path.isdir(args.ckpt_dir):
os.system('mkdir ' + args.ckpt_dir)
#########################################
#### Train
## 1. Update Discriminator: maximize log(D(x)) + log(1-D(G(z)))
# 1-1. with real image x
# 1-2. with fake image G(z)
## 2. Update Generator: maximize log(D(G(z)))
for e in range(args.epochs):
epoch = start_epoch + e
loss_meter_gen = AverageMeter()
loss_meter_disc = AverageMeter()
out_meter_disc_f = AverageMeter()
out_meter_disc_r = AverageMeter()
out_meter_disc_g = AverageMeter()
for i, data in enumerate(train_loader):
img_real, _ = data
img_real = img_real.cuda()
batch_s = img_real.size(0)
optim_disc.zero_grad()
# 1-1. with real image x
label_r = torch.full((batch_s, 1), label_real).cuda()
out_disc_r = disc(img_real).view(batch_s, -1)
error_disc_r = criterion(out_disc_r, label_r)
error_disc_r.backward()
# 1-2. with fake image G(z)
img_fake = gen(torch.randn(batch_s, args.z_dim, 1, 1).cuda())
label_f = torch.full((batch_s, 1), label_fake).cuda()
out_disc_f = disc(img_fake.detach()).view(batch_s, -1)
error_disc_f = criterion(out_disc_f, label_f)
error_disc = error_disc_r + error_disc_f
error_disc_f.backward()
optim_disc.step()
# 2. Update Generator
for g_iter in range(3):
img_fake = gen(torch.randn(batch_s, args.z_dim, 1, 1).cuda())
out_disc_g = disc(img_fake).view(batch_s, -1)
error_gen = criterion(out_disc_g, label_r)
optim_gen.zero_grad()
error_gen.backward()
optim_gen.step()
loss_meter_gen.update(error_gen.item(), batch_s)
loss_meter_disc.update(error_disc.item(), batch_s)
out_meter_disc_f.update(torch.sum(out_disc_f).item(), batch_s)
out_meter_disc_r.update(torch.sum(out_disc_r).item(), batch_s)
out_meter_disc_g.update(torch.sum(out_disc_g).item(), batch_s)
if i % args.log_term == 0:
print('epoch: %d, batch: %d \t Loss(D/G): %.4f / %.4f \t D(R/F/G): %.4f / %.4f / %.4f'
% (epoch, i, loss_meter_disc.avg, loss_meter_gen.avg,
out_meter_disc_r.avg/batch_s, out_meter_disc_f.avg/batch_s, out_meter_disc_g.avg/batch_s))
fd = open('save_log.txt', 'a')
fd.write('epoch: %d, batch: %d \t Loss(D/G): /%.4f / %.4f/ || D(R/F/G): /%.4f / %.4f / %.4f/ \n'
% (epoch, i, loss_meter_disc.avg, loss_meter_gen.avg,
out_meter_disc_r.avg, out_meter_disc_f.avg, out_meter_disc_g.avg))
fd.close()
misc.plot_samples_from_images(img_fake, batch_s, args.img_dir, 'img_e{}b{}.jpg'.format(epoch, i))
torch.save({
'epoch': epoch,
'state_dict_gen': gen.state_dict(),
'state_dict_disc': disc.state_dict(),
'optimizer_gen': optim_gen.state_dict(),
'optimizer_disc': optim_disc.state_dict()
},
args.ckpt_dir + 'checkpoint_e{}b{}.pt'.format(epoch, i))
loss_meter_gen = AverageMeter()
loss_meter_disc = AverageMeter()
out_meter_disc_f = AverageMeter()
out_meter_disc_r = AverageMeter()
out_meter_disc_g = AverageMeter()
def test(args):
raise NotImplementedError
class AverageMeter(object):
'''
from https://github.com/pytorch/examples/blob/master/imagenet/main.py.
Computes and stores the average and current values
'''
def __init__(self):
self.reset()
def reset(self):
self.val = 0.0
self.avg = 0.0
self.sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def main(args):
if args.train:
train(args)
else:
test(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pytorch implementation of DCGAN')
parser.add_argument('--base_fm_n', default=64, type=int, help='The number of base FM')
parser.add_argument('--learning_rate', '-lr', default=0.0002, type=float, help='Learning rate for Adam')
parser.add_argument('--beta1', default=0.5, type=float, help='Beta1 for Adam')
parser.add_argument('--epochs', default=1000, type=int, help='Total epoch')
parser.add_argument('--dataset', default='celeba', type=str, help='Dataset name: MNIST, CelebA, LSUN or imagenet')
parser.add_argument('--z_dim', default=100, type=int, help='Dimension of z')
parser.add_argument('--resume', default='', type=str, help='Name of previouse checkpoint file (defalut: None)')
parser.add_argument('--img_dir', default='/export/scratch/a/choi574/saved_model/gan_face/plots/', type=str, help='Directory to save test plots')
parser.add_argument('--ckpt_dir', default='/export/scratch/a/choi574/saved_model/gan_face/', type=str, help='Name of previouse checkpoint dir')
parser.add_argument('--batch_s', default=128, type=int, help='Size of batch')
parser.add_argument('--img_s', default=64, type=int, help='Size of Image')
parser.add_argument('--layer_n', default=4, type=int, help='The number of layers')
parser.add_argument('--train', default=True, type=misc.str2bool, help='Train or generate')
parser.add_argument('--log_term', default=10, type=int, help='log recording term (save every N batch)')
args = parser.parse_args()
main(args)
|
the-stack_0_2947 | """Read and write notebooks as regular .py files.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .rwbase import NotebookReader, NotebookWriter
from .nbbase import new_code_cell, new_text_cell, new_worksheet, new_notebook
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class PyReaderError(Exception):
pass
class PyReader(NotebookReader):
def reads(self, s, **kwargs):
return self.to_notebook(s,**kwargs)
def to_notebook(self, s, **kwargs):
lines = s.splitlines()
cells = []
cell_lines = []
state = u'codecell'
for line in lines:
if line.startswith(u'# <nbformat>'):
pass
elif line.startswith(u'# <codecell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'codecell'
cell_lines = []
elif line.startswith(u'# <htmlcell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'htmlcell'
cell_lines = []
elif line.startswith(u'# <markdowncell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'markdowncell'
cell_lines = []
else:
cell_lines.append(line)
if cell_lines and state == u'codecell':
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
ws = new_worksheet(cells=cells)
nb = new_notebook(worksheets=[ws])
return nb
def new_cell(self, state, lines):
if state == u'codecell':
input = u'\n'.join(lines)
input = input.strip(u'\n')
if input:
return new_code_cell(input=input)
elif state == u'htmlcell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'html',source=text)
elif state == u'markdowncell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'markdown',source=text)
def _remove_comments(self, lines):
new_lines = []
for line in lines:
if line.startswith(u'#'):
new_lines.append(line[2:])
else:
new_lines.append(line)
text = u'\n'.join(new_lines)
text = text.strip(u'\n')
return text
def split_lines_into_blocks(self, lines):
if len(lines) == 1:
yield lines[0]
raise StopIteration()
import ast
source = '\n'.join(lines)
code = ast.parse(source)
starts = [x.lineno-1 for x in code.body]
for i in range(len(starts)-1):
yield '\n'.join(lines[starts[i]:starts[i+1]]).strip('\n')
yield '\n'.join(lines[starts[-1]:]).strip('\n')
class PyWriter(NotebookWriter):
def writes(self, nb, **kwargs):
lines = []
lines.extend([u'# <nbformat>2</nbformat>',''])
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == u'code':
input = cell.get(u'input')
if input is not None:
lines.extend([u'# <codecell>',u''])
lines.extend(input.splitlines())
lines.append(u'')
elif cell.cell_type == u'html':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <htmlcell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
elif cell.cell_type == u'markdown':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <markdowncell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
lines.append('')
return unicode('\n'.join(lines))
_reader = PyReader()
_writer = PyWriter()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
write = _writer.write
writes = _writer.writes
|
the-stack_0_2949 | import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
import time
# Baseline: VGG-11 finetuning
def return_baseline():
net = torchvision.models.vgg11_bn(pretrained=True)
for param in net.parameters():
param.requires_grad = False
# Reshape to 5 classes...
num_in = net.classifier[6].in_features
net.classifier[6] = nn.Linear(num_in, 5)
return net
# Draw images
def show(images):
images = images * 255.0 # denormalize
np_images = images.numpy()
print(np_images.shape)
plt.imshow(np.transpose(np_images, (1, 2, 0)))
plt.show()
# Show random images
def visualize(loader, categories):
temp = iter(loader)
images, labels = temp.next()
show(torchvision.utils.make_grid(images))
print(' '.join('%5s' % categories[labels[j]] for j in range(labels.size(0))))
# Load data
def init(batch_size):
transform_train = transforms.Compose(
[transforms.Resize((256, 256)),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0, 0, 0), (255.0, 255.0, 255.0))]) # !!! Order matters
transform_dev = transforms.Compose(
[transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0, 0, 0), (255.0, 255.0, 255.0))]) # !!! Order matters
train_set = torchvision.datasets.ImageFolder(root="../data/train", transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
shuffle=True, num_workers=4)
dev_set = torchvision.datasets.ImageFolder(root="../data/dev", transform=transform_dev)
dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=batch_size,
shuffle=False, num_workers=4)
print(train_set.classes)
print(dev_set.classes)
categories = ('cbb', 'cbsd', 'cgm', 'cmd', 'healthy')
return train_loader, dev_loader, categories
# Train data
def train(num_epochs, loader, device, optimizer, criterion, net):
for epoch in range(num_epochs):
running_loss = 0.0
time_now = time.time()
correct = 0
total = 0
for i, data in enumerate(loader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print('[%d, %d] loss: %.4f' % (epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
print('Epoch time: %.2fs' % (time.time() - time_now))
print('Train acc: %f' % (100 * correct / total))
# Test
def inference(loader, device, net):
correct = 0
total = 0
with torch.no_grad():
for data in loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test acc: %f' % (100 * correct / total))
|
the-stack_0_2950 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.models import model_from_json
import math
from sklearn.metrics import mean_squared_error
import requests
from textblob import TextBlob
#dataset_main = pd.read_csv('Google_Stock_Price_Train.csv')
#dataset = dataset_main.iloc[0:1259, 1:2].values
file = open("Stocks/bmi.us.txt", "r")
dataset = [[]]
count = 0
for line in file:
tokens = line.split(',')
array = [0]
if count > 0 :
array[0] = float(tokens[1])
dataset.insert(count,array)
count = count + 1
#print (count)
dataset.pop(0)
#print (dataset)
sc = MinMaxScaler(feature_range = (0, 1))
dataset_scaled = sc.fit_transform(dataset)
def train():
#training_set = dataset.iloc[0:4001, 2:3].values
#training_set_scaled = sc.fit_transform(training_set)
plt.plot(dataset, color = 'blue', label = 'Price')
plt.title('Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
X_train = []
y_train = []
X_train = dataset_scaled[0:2899]
y_train = dataset_scaled[1:2900]
plt.plot(X_train, color = 'red', label = 'Scaled Price')
plt.title('Scaled Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
regressor = Sequential()
regressor.add(LSTM(units = 4, activation = 'sigmoid', input_shape = (None, 1)))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(X_train, y_train, epochs = 200, batch_size = 32)
model_json = regressor.to_json()
with open("modelTempantm.json", "w") as json_file:
json_file.write(model_json)
regressor.save_weights("modelTempantm.h5")
print("Saved model to disk")
def load():
test_set = dataset[2900:3000]
#test_set_scaled = sc.transform(test_set)
test_set_scaled = dataset_scaled[2900:3000]
json_file = open('modelTempantm.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("modelTempantm.h5")
print("Loaded model from disk")
test_set_reshaped = np.reshape(test_set_scaled, (test_set_scaled.shape[0], test_set_scaled.shape[1], 1))
predicted_temprature = loaded_model.predict(test_set_reshaped)
predicted_temprature = sc.inverse_transform(predicted_temprature)
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 12
fig_size[1] = 5
plt.rcParams["figure.figsize"] = fig_size
plt.plot(predicted_temprature, color = 'blue', label = 'Predicted Price')
plt.plot(test_set, color = 'red', label = 'Real Price')
plt.title('Price Prediction')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
rmse = math.sqrt(mean_squared_error(test_set, predicted_temprature)) / 10
print (rmse)
def prediction():
#test_set = dataset_main.iloc[4001:4101, 2:3].values
#test_set_scaled = sc.transform(test_set)
test_set_scaled = dataset_scaled[3000:3010]
json_file = open('modelTempantm.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("modelTempantm.h5")
test_set_reshaped = np.reshape(test_set_scaled, (test_set_scaled.shape[0], test_set_scaled.shape[1], 1))
predicted_temprature = loaded_model.predict(test_set_reshaped)
predicted_temprature = sc.inverse_transform(predicted_temprature)
#print(predicted_temprature)
return predicted_temprature
def senti():
url = ('https://newsapi.org/v2/everything?q=%20bmi%20stock%20market&apiKey=6e593f373865401e803d6874594f9063')
response = requests.get(url)
#print (response.json())
parsed_json = response.json()
#print(parsed_json['status'])
array = parsed_json['articles']
polarity = 0.0;
count = 0;
for i in array:
#print(i['description'])
blob = TextBlob(i['description'])
count = count + 1
polarity = polarity + blob.sentiment.polarity
polarity = polarity / count
#print(polarity)
return polarity
def run():
print('Prediction of bmi Stock Price in Next 10 Days :')
p = prediction()
s = senti()
print("Date Price")
d = 10
m = 1
y = 2019
for i in range(0,9):
if (d == 31):
d = 1;
m += 1;
if (m == 13):
m = 1;
print(str(d) + "-" + str(m) + "-"+ str(y)+": "+ str(p[i][0]))
d += 1
print('news polarity : ' + str(s))
if s > 0.5 :
print('User Demand Is Very High')
elif s > 0:
print('User Demand Is High')
elif s < -0.5:
print('User Demand Is Very Low')
elif s < 0:
print('User Demand IS Low')
|
the-stack_0_2951 | import io
import xlsxwriter
from . import models
from sales import models as s_models, exports as s_exports
from datetime import datetime
import decimal
def date_parse(str, fmt='%Y-%m-%d %H:%M:%S'):
if str == None:
return None
return datetime.strptime(str, fmt)
def cell(i, j):
char = "A"
char = chr(ord(char[0]) + j - 1)
return f'{char}{i}'
def date_fmt(date):
if date == None:
return None
return date.strftime("%d/%m/%Y")
# def get_refs(sale):
# exit_ref = None
# c2_ref = None
# assessment_ref = None
# exit_doc = s_models.Document.objects.filter(doc_type=s_models.Document.DOC_EXIT, sale=sale).first()
# if exit_doc:
# exit_ref = exit_doc.ref_number
# assessment_doc = s_models.Document.objects.filter(doc_type=s_models.Document.DOC_ASSESSMENT, sale=sale).first()
# if assessment_doc:
# assessment_ref = assessment_doc.ref_number
# c2_doc = s_models.Document.objects.filter(doc_type=s_models.Document.DOC_C2, sale=sale).first()
# if c2_doc:
# c2_ref = c2_doc.ref_number
# return [c2_ref, assessment_ref, exit_ref]
def export_report_inv_details(request, sales):
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
main = workbook.add_worksheet("Report")
headers = ['ID', 'TRANS_DATE', 'CUSTOMER', 'DELIVERY NOTE', 'VEH#',
'TAX INVOICE', 'SO#', 'PRODUCT', 'QTY(TONS)', 'DESTINATION', 'VEH# TRAILER', 'AGENT', 'C2', 'ASSESSMENT', 'EXIT', 'RATE/T', 'TOTAL VALUE EX VAT', 'VAT AMOUNT 18%', 'TOTAL VALUE INC VAT', 'INV NUMBER', 'ASSIGN#'
]
rows = []
for prj in sales:
comm_amt = prj.quantity2 * prj.invoice.commission
row = []
row.append(prj.id)
row.append(date_fmt(date_parse(prj.transaction_date)))
row.append(prj.customer_name)
row.append(prj.delivery_note)
row.append(prj.vehicle_number)
row.append(prj.tax_invoice)
row.append(prj.sales_order)
row.append(prj.product_name)
row.append(float(prj.quantity2))
row.append(prj.destination)
row.append(prj.vehicle_number_trailer)
row.append(prj.agent.code if prj.agent else 'None')
row.extend(s_exports.get_refs(prj))
row.append(float(prj.invoice.commission))
row.append(float(comm_amt))
row.append(float(comm_amt * decimal.Decimal(0.18)))
row.append(float(comm_amt*decimal.Decimal(1.18)))
row.append(prj.invoice.number)
row.append(prj.assign_no)
rows.append(row)
for j, col in enumerate(headers, start=1):
main.write(f'{cell(1, j)}', col)
for i, row in enumerate(rows, start=2):
for j, col in enumerate(row, start=1):
main.write(f'{cell(i, j)}', col)
workbook.close()
xlsx_data = output.getvalue()
return xlsx_data
def export_invoices(request, invoices):
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
main = workbook.add_worksheet("Report")
headers = ['ID', 'INVOICE NO', 'PHY INVOICE NO', 'RATE', 'AGENT',
'QUANTITY(TONS)', 'VALUE(TZS)', 'VALUE(VAT INCL.)', 'STATUS']
rows = []
def get_phy_num(inv):
d = inv.docs.first()
ref_num = int(d.ref_number) if d else None
return ref_num
for prj in invoices:
row = []
row.append(prj.id)
row.append(prj.number)
row.append(get_phy_num(prj))
row.append(prj.commission)
row.append(prj.agent.code)
row.append(prj.quantity)
row.append(prj.value)
row.append(prj.value * decimal.Decimal(1.18))
row.append('Pending' if prj.status == 0 else 'Completed')
rows.append(row)
for j, col in enumerate(headers, start=1):
main.write(f'{cell(1, j)}', col)
for i, row in enumerate(rows, start=2):
for j, col in enumerate(row, start=1):
main.write(f'{cell(i, j)}', col)
workbook.close()
xlsx_data = output.getvalue()
return xlsx_data
|
the-stack_0_2952 |
import sys
import tarfile
from urllib.request import urlretrieve
import logging
import time
from pathlib import Path
from collections import defaultdict
logger = logging.getLogger(__name__)
MODEL_DIRECTORY = Path(__file__).parent / 'models'
MODELS = {
'en': (
'chainer',
'tri_headfirst',
'1mxl1HU99iEQcUYhWhvkowbE4WOH0UKxv',
MODEL_DIRECTORY / 'config_en.json'
),
'en[elmo]': (
'allennlp',
'lstm_parser_elmo',
'1UldQDigVq4VG2pJx9yf3krFjV0IYOwLr',
MODEL_DIRECTORY / 'config_en.json'
),
'en[rebank]': (
'allennlp',
'lstm_parser_char_rebanking',
'1Az840uCW8QuAkNCZq_Y8VOkW5j0Vtcj9',
MODEL_DIRECTORY / 'config_rebank.json'
),
'en[elmo_rebank]': (
'allennlp',
'lstm_parser_elmo_rebanking',
'1deyCjSgCuD16WkEhOL3IXEfQBfARh_ll',
MODEL_DIRECTORY / 'config_rebank.json'
),
'ja': (
'chainer',
'ja_headfinal',
'1bblQ6FYugXtgNNKnbCYgNfnQRkBATSY3',
MODEL_DIRECTORY / 'config_ja.json'
)
}
AVAILABLE_MODEL_VARIANTS = defaultdict(list)
for model in MODELS:
if '[' in model and ']' in model:
assert model[-1] == ']'
lang, variant = model[:-1].split('[')
AVAILABLE_MODEL_VARIANTS[lang].append(variant)
SEMANTIC_TEMPLATES = {
'en': MODEL_DIRECTORY / 'semantic_templates_en_event.yaml',
'ja': MODEL_DIRECTORY / 'semantic_templates_ja_event.yaml'
}
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = min(int(count * block_size * 100 / total_size), 100)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def download(lang, variant):
model_name = f'{lang}[{variant}]' if variant else lang
framework, basename, url, _ = MODELS[model_name]
from google_drive_downloader import GoogleDriveDownloader as gdd
logging.info(f'start downloading from {url}')
filename = (MODEL_DIRECTORY / basename).with_suffix('.tar.gz')
gdd.download_file_from_google_drive(file_id=url,
dest_path=filename,
unzip=False,
overwrite=True)
if framework == 'chainer':
logging.info(f'extracting files')
tf = tarfile.open(filename)
tf.extractall(MODEL_DIRECTORY)
logging.info(f'finished')
def load_model_directory(model_name):
framework, basename, _, config = MODELS[model_name]
model_path = MODEL_DIRECTORY / basename
if framework == 'allennlp':
model_path = model_path.with_suffix('.tar.gz')
if not model_path.exists():
lang, variant = model_name[:-1].split('[')
raise RuntimeError(f'please download the model by doing \'depccg_{lang} download VARIANT\'.')
return model_path, config
def model_is_available(model_name):
return model_name in list(MODELS.keys())
|
the-stack_0_2953 | from pytorch_lightning.callbacks import ModelCheckpoint
import os
from argparse import ArgumentParser
import os
import gc
import datetime
import numpy as np
import pandas as pd
import numpy as np
import torch
import pytorch_lightning as pl
from lightning_module import LightningModel
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
SEED = 1
torch.manual_seed(SEED)
np.random.seed(SEED)
def main(hparams):
"""
testing routine
Args:
hparams: checkpoint of the model to be tested and gpu, parallel backend etc.,
defined in the argument parser in if __name__ == '__main__':
Returns:
"""
checkpoint_path=hparams.ckp
model = LightningModel.load_from_checkpoint(
checkpoint_path=checkpoint_path,
tags_csv= hparams.hparams,
)
trainer = pl.Trainer(
gpus=[hparams.first_gpu+el for el in range(hparams.gpus)],
distributed_backend=hparams.distributed_backend,
)
trainer.test(model)
if __name__ == '__main__':
root_dir = os.path.dirname(os.path.realpath(__file__))
parent_parser = ArgumentParser(add_help=False)
parent_parser.add_argument(
'--gpus',
type=int,
default=4,
help='how many gpus'
)
parent_parser.add_argument(
'--distributed_backend',
type=str,
default='ddp',
help='supports three options dp, ddp, ddp2'
)
parent_parser.add_argument(
'--amp_optimization',
type=str,
default='00',
help="mixed precision format, default 00 (32), 01 mixed, 02 closer to 16, should not be used during testing"
)
parent_parser.add_argument(
'--first-gpu',
type=int,
default=0,
help='gpu number to use [first_gpu, ..., first_gpu+gpus]'
)
parent_parser.add_argument(
'--ckp',
type=str,
default='',
help='ckp path, if left empty no checkpoint is used'
)
parent_parser.add_argument(
'--hparams',
type=str,
default='',
help='path for hparams of ckp if left empty no checkpoint is used'
)
parent_parser.add_argument("--test",
action="store_true",
help="whether to train or test"
)
# each LightningModule defines arguments relevant to it
parser = LightningModel.add_model_specific_args(parent_parser)
hyperparams = parser.parse_args()
print(hyperparams)
main(hyperparams)
|
the-stack_0_2954 | # -*- coding: utf-8 -*-
# file: file_utils.py
# time: 2021/7/13 0020
# author: yangheng <[email protected]>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import copy
import json
import os
import pickle
import urllib.request
import torch
from findfile import find_files, find_dir, find_cwd_file
from google_drive_downloader import GoogleDriveDownloader as gdd
from pyabsa.core.atepc.dataset_utils.atepc_utils import split_text
from termcolor import colored
from pyabsa import __version__
# convert atepc_datasets in this repo for inferring_tutorials
from pyabsa.functional.dataset import DatasetItem
from pyabsa.utils.pyabsa_utils import save_args
def generate_inference_set_for_apc(dataset_path):
if isinstance(dataset_path, DatasetItem):
dataset_path = dataset_path.dataset_name
elif not os.path.exists(dataset_path):
dataset_path = os.getcwd()
train_datasets = find_files(dataset_path, ['dataset', 'train', 'apc'], exclude_key='.inference')
test_datasets = find_files(dataset_path, ['dataset', 'test', 'apc'], exclude_key='.inference')
for file in train_datasets + test_datasets:
try:
fin = open(file, 'r', newline='\n', encoding='utf-8')
lines = fin.readlines()
fin.close()
path_to_save = file + '.inference'
fout = open(path_to_save, 'w', encoding='utf-8', newline='\n', errors='ignore')
for i in range(0, len(lines), 3):
sample = lines[i].strip().replace('$T$', '[ASP]{}[ASP]'.format(lines[i + 1].strip()))
fout.write(sample + ' !sent! ' + lines[i + 2].strip() + '\n')
fout.close()
except:
print('Unprocessed file:', file)
print('save in: {}'.format(path_to_save))
print('process finished')
def is_similar(s1, s2):
count = 0.0
for token in s1.split(' '):
if token in s2:
count += 1
if count / len(s1.split(' ')) >= 0.8 and count / len(s2.split(' ')) >= 0.8:
return True
else:
return False
def assemble_aspects(fname):
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(len(lines)):
if i % 3 == 0 or i % 3 == 1:
lines[i] = ' '.join(split_text(lines[i].strip())).replace('$ t $', '$T$')
else:
lines[i] = lines[i].strip()
def unify_same_samples(same_samples):
text = same_samples[0][0].replace('$T$', same_samples[0][1])
polarities = [-999] * len(text.split())
tags = ['O'] * len(text.split())
samples = []
for sample in same_samples:
# print(sample)
polarities_tmp = copy.deepcopy(polarities)
try:
asp_begin = (sample[0].split().index('$T$'))
asp_end = sample[0].split().index('$T$') + len(sample[1].split())
for i in range(asp_begin, asp_end):
polarities_tmp[i] = sample[2]
if i - sample[0].split().index('$T$') < 1:
tags[i] = 'B-ASP'
else:
tags[i] = 'I-ASP'
samples.append([text, tags, polarities_tmp])
except:
print('Ignore Error:', sample[0])
return samples
samples = []
aspects_in_one_sentence = []
for i in range(0, len(lines), 3):
lines[i] = lines[i].replace('$T$', ' $T$ ').replace(' ', ' ')
if len(aspects_in_one_sentence) == 0:
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
continue
if is_similar(aspects_in_one_sentence[-1][0], lines[i]):
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
else:
samples.extend(unify_same_samples(aspects_in_one_sentence))
aspects_in_one_sentence = []
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
samples.extend(unify_same_samples(aspects_in_one_sentence))
return samples
def split_aspects(sentence):
single_aspect_with_contex = []
aspect_num = len(sentence[1].split("|"))
aspects = sentence[1].split("|")
polarity = sentence[2].split("|")
pre_position = 0
aspect_context = sentence[0]
for i in range(aspect_num):
aspect_context = aspect_context.replace("$A$", aspects[i], 1)
single_aspect_with_contex.append(
(aspect_context[pre_position:aspect_context.find("$A$")], aspects[i], polarity[i]))
pre_position = aspect_context.find(aspects[i]) + len(aspects[i]) + 1
return single_aspect_with_contex
def convert_atepc(fname):
print('converting:', fname)
dist_fname = fname.replace('apc_datasets', 'atepc_datasets') + '.atepc'
lines = []
samples = assemble_aspects(fname)
for sample in samples:
for token_index in range(len(sample[1])):
token, label, polarity = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarity))
lines.append('\n')
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
fout = open(dist_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
# 将数据集中的aspect切割出来
def convert_apc_set_to_atepc_set(path):
if isinstance(path, DatasetItem):
path = path.dataset_name
if not os.path.exists(path):
files = find_files(os.getcwd(), [path, 'dataset', 'apc'], exclude_key='.inference')
else:
files = find_files(path, '', exclude_key='infer')
print('Find datasets files at {}:'.format(path))
for f in files:
print(f)
for target_file in files:
if not (target_file.endswith('.inference') or target_file.endswith('.atepc')):
try:
convert_atepc(target_file)
except:
print('failed to process"{}'.format(target_file))
else:
print('Ignore ', target_file)
print('finished')
# 将数据集中的aspect切割出来
def refactor_chinese_dataset(fname, train_fname, test_fname):
lines = []
samples = assemble_aspects(fname)
positive = 0
negative = 0
sum = 0
# refactor testset
for sample in samples[:int(len(samples) / 5)]:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive += 1
else:
negative += 1
sum += 1
print(train_fname + f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(test_fname):
os.remove(test_fname)
fout = open(test_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
positive = 0
negative = 0
sum = 0
# refactor trainset
for sample in samples[int(len(samples) / 5):]:
for token_index in range(len(sample[1])):
tokens = sample[0].split()
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive += 1
else:
negative += 1
sum += 1
print(train_fname + f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(train_fname):
os.remove(train_fname)
fout = open(train_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
def detect_error_in_dataset(dataset):
f = open(dataset, 'r', encoding='utf8')
lines = f.readlines()
for i in range(0, len(lines), 3):
# print(lines[i].replace('$T$', lines[i + 1].replace('\n', '')))
if i + 3 < len(lines):
if is_similar(lines[i], lines[i + 3]) and len((lines[i] + " " + lines[i + 1]).split()) != len(
(lines[i + 3] + " " + lines[i + 4]).split()):
print(lines[i].replace('$T$', lines[i + 1].replace('\n', '')))
print(lines[i + 3].replace('$T$', lines[i + 4].replace('\n', '')))
def save_model(opt, model, tokenizer, save_path):
if not opt.save_mode:
return
# Save a trained model, configuration and tokenizer
if hasattr(model, 'module') or hasattr(model, 'core'):
# print("save model from data-parallel!")
model_to_save = model.module
else:
# print("save a single cuda model!")
model_to_save = model
if opt.save_mode == 1 or opt.save_mode == 2:
if not os.path.exists(save_path):
os.makedirs(save_path)
f_config = open(save_path + opt.model_name + '.config', mode='wb')
f_tokenizer = open(save_path + opt.model_name + '.tokenizer', mode='wb')
pickle.dump(opt, f_config)
pickle.dump(tokenizer, f_tokenizer)
f_config.close()
f_tokenizer.close()
save_args(opt, save_path + opt.model_name + '.args.txt')
if opt.save_mode == 1:
torch.save(model_to_save.state_dict(), save_path + opt.model_name + '.state_dict') # save the state dict
elif opt.save_mode == 2:
torch.save(model.cpu(), save_path + opt.model_name + '.model') # save the state dict
elif opt.save_mode == 3:
# save the fine-tuned bert model
model_output_dir = save_path + 'fine-tuned-pretrained-model'
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
output_model_file = os.path.join(model_output_dir, 'pytorch_model.bin')
output_config_file = os.path.join(model_output_dir, 'config.json')
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.bert.config.to_json_file(output_config_file)
tokenizer.tokenizer.save_vocabulary(model_output_dir)
else:
raise ValueError('Invalid save_mode: {}'.format(opt.save_mode))
model.to(opt.device)
def check_update_log():
print(colored('check update log at https://github.com/yangheng95/PyABSA/blob/release/release-note.json', 'red'))
def query_remote_version():
try:
dataset_url = 'https://raw.githubusercontent.com/yangheng95/ABSADatasets/v1.2/datasets/__init__.py'
content = urllib.request.urlopen(dataset_url, timeout=5)
content = content.read().decode("utf-8").split('\'')
version = content[-2]
except Exception as e:
return 'N.A.'
return version
def query_local_version():
try:
fin = open(find_cwd_file(['__init__.py', 'integrated_datasets']))
local_version = fin.read().split('\'')[-2]
fin.close()
except:
return 'N.A.'
return local_version
def check_dataset(): # retry_count is for unstable conn to GitHub
try:
local_version = query_local_version()
remote_version = query_remote_version()
print('Remote ABSADataset version: {} Local ABSADatasets version: {}'.format(remote_version, local_version))
if remote_version == 'N.A.':
print('Unknown remote version for ABSADatasets, please check the latest version of ABSADatasets')
elif local_version == 'N.A.':
print('Unknown local version for ABSADatasets, please check the latest version of ABSADatasets')
elif remote_version > local_version:
print(colored('There is a new version of ABSADatasets({}), please remove the downloaded datasets to automatically download the new version.'.format(remote_version), 'green'))
except Exception as e:
print(colored('ABSADatasets version check failed: {}, please check the latest datasets on GitHub manually.'.format(e), 'red'))
|
the-stack_0_2957 | # Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Plotting support for experimental runs"""
import typing as t
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn as sns
from exot.experiment._base import Run
from exot.experiment.frequency_sweep import FrequencySweepRun
from exot.experiment.performance import PerformanceRun
from exot.util.attributedict import AttributeDict
from exot.util.plotting import add_spine, remove_spine, rugplot
from exot.util.scinum import is_fitted, unpack_array
from ._base import Plotter
__all__ = ("RunPlotter", "PerformanceRunPlotter", "FrequencySweepRunPlotter")
class RunPlotter(Plotter):
"""Plotting support for Run instances
Attributes:
PLOT_FILENAMES (dict): Default filenames for supported plots
"""
PLOT_FILENAMES = {}
def __init__(self, run, *args, **kwargs):
if not run.digested and run.ingested:
raise ValueError("Plotter requires ingested and digested Run's")
self._run = run
if "save_path" not in kwargs:
kwargs["save_path"] = self.run.path
super().__init__(*args, **kwargs)
@property
def run(self) -> Run:
return self._run
def _raw_plot_helper(
self,
source: pandas.DataFrame,
start: t.Optional[float] = None,
end: t.Optional[float] = None,
dim_count: t.Optional[int] = None,
):
SUBPLOT_HEIGHT = 1 # inches
timestamps = source.iloc[:, 0]
data = source.iloc[:, 1:]
dims = data.shape[1]
dims_to_plot = data.shape[1]
if dim_count is not None:
if dim_count > dims:
raise ValueError(f"dim_count ({dim_count}) > dims ({dims})")
dims_to_plot = dim_count
start = start if start else timestamps.iloc[0]
end = end if end else timestamps.iloc[-1]
interval = timestamps.between(start, end)
timestamps = timestamps[interval]
data = data[interval]
# Create subplots: 3 columns, ndim rows
f, axes = plt.subplots(
dims_to_plot,
1,
figsize=(self._width, 1 + dims_to_plot * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharex="col",
sharey="col",
squeeze=False,
)
lower_ylim, upper_ylim = np.quantile(data, [0.01, 0.99])
for i, axis in enumerate(axes[:, 0]):
axis.plot(
timestamps, data.iloc[:, i], marker="+", markersize=2, linewidth=0.5, alpha=0.5
)
if lower_ylim < upper_ylim:
axis.set_ylim(0.975 * lower_ylim, 1.025 * upper_ylim)
axis.get_xaxis().get_major_formatter().set_useOffset(False)
axis.set_xlim(timestamps.iloc[0], timestamps.iloc[-1])
axis.set_ylabel("{}\n{}\n{} ({})".format(*data.columns[i].split(":")), color="gray")
annotations = None
if "io" in self.run.intermediates:
if np.isclose(timestamps.iloc[0], 0.0) and "src_log" in self.run.intermediates.io:
annotations = self.run.intermediates.io.src_log.iloc[[0, -1], 0]
elif (
not np.isclose(timestamps.iloc[0], 0.0)
) and "raw_src_log" in self.run.intermediates.io:
annotations = self.run.intermediates.io.raw_src_log.iloc[[0, -1], 0]
if annotations is not None:
for axis in axes.ravel():
axis.vlines(
annotations,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
linestyle="--",
)
sns.despine()
axes[-1, 0].set_xlabel(source.columns[0], color="gray")
_title = (
"Processed raw data stream"
if np.isclose(timestamps.iloc[0], 0.0)
else "Raw data stream"
)
if dims_to_plot != dims:
_title += " [{} of {} dimensions]".format(dims_to_plot, dims)
plt.suptitle(_title, y=1.01, verticalalignment="bottom")
f.tight_layout()
return f
def plot_rawstream(
self,
start: t.Optional[float] = None,
end: t.Optional[float] = None,
dim_count: t.Optional[int] = None,
**kwargs,
):
f = self._raw_plot_helper(
self.run.i_rawstream, start=start, end=end, dim_count=dim_count
)
self._plot_save_helper(f, **kwargs)
def plot_rdpstream(
self,
start: t.Optional[float] = None,
end: t.Optional[float] = None,
dim_count: t.Optional[int] = None,
**kwargs,
):
f = self._raw_plot_helper(
self.run.i_rdpstream, start=start, end=end, dim_count=dim_count
)
self._plot_save_helper(f, **kwargs)
class FrequencySweepRunPlotter(RunPlotter):
"""Plotting support for FrequencySweepRun instances
Attributes:
PLOT_FILENAMES (dict): Default filenames for supported plots
"""
def __init__(self, run: FrequencySweepRun, *args, **kwargs):
if not isinstance(run, FrequencySweepRun):
raise TypeError("FrequencySweepRunPlotter accepts only FrequencySweepRun instances")
super().__init__(run, *args, **kwargs)
def plot_spectrum(self, window=8192):
pass
class PerformanceRunPlotter(RunPlotter):
"""Plotting support for PerformanceRun instances
Attributes:
PLOT_FILENAMES (dict): Default filenames for supported plots
"""
def __init__(self, run: PerformanceRun, *args, **kwargs):
if not isinstance(run, PerformanceRun):
raise TypeError("PerformanceRunPlotter accepts only PerformanceRun instances")
super().__init__(run, *args, **kwargs)
def plot_slicing(
self, start: int = 0, count: int = 10, dim_count: t.Optional[int] = None, **kwargs
):
SUBPLOT_HEIGHT = 1 # inches
samples_per_symbol = (
self.run.i_lnestream.shape[1]
if self.run.i_lnestream.ndim == 2
else self.run.i_lnestream.shape[2]
)
subsymbol_count = getattr(self.run.parent.layers.lne, "subsymbol_count", 1)
count = min([count, self.run.i_symstream.size])
start_idx = start
start_sample = start * samples_per_symbol
end_idx = start_idx + count
end_sample = start_sample + samples_per_symbol * (count)
selection_idx = slice(start_idx, start_idx + count)
selection_sample = slice(start_sample, end_sample)
selection_gt = slice(
start_idx * subsymbol_count, (start_idx + count + 1) * subsymbol_count
)
selection_slicing = self.run.intermediates.rdp.slicing[
slice(start_idx, start_idx + count + 1)
]
selection_raw = self.run.i_rdpstream.iloc[:, 0].between(
selection_slicing[0], selection_slicing[-1]
)
annotations = None
if "src_log" in self.run.intermediates.io:
annotations = self.run.intermediates.io.src_log.iloc[:, 0]
annotations = annotations[
annotations.between(0.99 * selection_slicing[0], 1.01 * selection_slicing[-1])
]
# Create plotting data, figures, and plot data
raw: pandas.DataFrame = self.run.i_rdpstream[selection_raw]
data: np.ndarray
gt: np.ndarray = self.run.o_lnestream[selection_gt]
# Handle 2-d and 3-d data
if self.run.i_lnestream.ndim == 2:
dims = 1
data = np.vstack(self.run.i_lnestream).reshape(
self.run.i_lnestream.shape[0] * samples_per_symbol, 1
)[selection_sample]
elif self.run.i_lnestream.ndim == 3:
dims = self.run.i_lnestream.shape[1]
data = (
self.run.i_lnestream.transpose(1, 0, 2)
.reshape(dims, self.run.i_lnestream.size // dims)
.T[selection_sample, :]
)
dims_to_plot = dims
if dim_count is not None:
if dim_count > dims:
raise ValueError(f"dim_count ({dim_count}) > dims ({dims})")
dims_to_plot = dim_count
# Create subplots: 3 columns, ndim rows
f, axes = plt.subplots(
dims_to_plot,
3,
figsize=(self._width, 1 + dims_to_plot * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharex="col",
sharey="col",
squeeze=False,
)
if dims_to_plot == 1:
gt = gt.reshape(-1, 1)
else:
# if there are more than one data dimensions, that likely means that we're dealing with
# MIMO symbols, which need to be "unpacked".
gt = np.flip(unpack_array(gt, n=dims), axis=1)
# Handle printing symbolstreams with -1 saturating values
if -1 in gt:
gt = np.vectorize(lambda v: 1 if v == -1 else v)(gt)
# Plot raw, ground truth, and samples
for i, axis_group in enumerate(axes):
axis_group[0].plot(raw.iloc[:, 0], raw.iloc[:, i + 1], alpha=1.0, linestyle="-")
# gt_t is ground truth timing, same as raw timing
# gt_d is ground truth "data"
if subsymbol_count != 1:
gt_t, gt_d = (
np.linspace(selection_slicing[0], selection_slicing[-1], gt.shape[0] - 1),
gt[:-1, i],
)
else:
gt_t, gt_d = (selection_slicing, np.array(gt[:, i]))
gt_t, gt_d = gt_t[: gt_d.size], gt_d[: gt_t.size]
axis_group[1].plot(gt_t, gt_d, marker=".", drawstyle="steps-post")
axis_group[2].plot(
data[:, i],
alpha=min(1.0, 100 / samples_per_symbol),
linestyle=":",
marker="+",
markersize=2,
linewidth=0.5,
)
sns.despine()
# Column 1, raw data
axes[-1, 0].set_ylim(
0.975 * np.quantile(raw.iloc[:, 1:], 0.01),
1.025 * np.quantile(raw.iloc[:, 1:], 0.99),
)
axes[-1, 0].set_xlabel(raw.columns[0], color="gray")
for i, axis in enumerate(axes[:, 0].ravel()):
_ = raw.columns[i + 1].split(":")
axis.set_ylabel("{}\n{}\n{} ({})".format(*_), color="gray")
# Column 2, ground truth
axes[-1, 1].set_ylim(np.nanmin(gt_d) - 0.5, np.nanmax(gt_d) + 0.5)
axes[-1, 1].set_yticks(np.unique(gt_d))
axes[-1, 1].set_xlabel(raw.columns[0], color="gray")
for i, axis in enumerate(axes[:, 1].ravel()):
axis.set_ylabel("Data[{}]".format(i), color="gray")
# Column 3, sampled data
xticks = np.arange(0, samples_per_symbol * (count + 1), samples_per_symbol)
xticklabels = np.arange(
start_sample, end_sample + samples_per_symbol, samples_per_symbol
)
xlabel = "Sample #"
axes[-1, 2].set_ylim(0.975 * np.quantile(data, 0.1), 1.025 * np.quantile(data, 0.9))
axes[-1, 2].set_xticks(xticks)
axes[-1, 2].set_xticklabels(xticklabels, rotation=45)
axes[-1, 2].set_xlabel(xlabel, color="gray")
# Plot symbol boundaries in real time space
for axis in axes[:, [0, 1]].ravel():
axis.vlines(
selection_slicing,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.5,
linestyle="--",
color="k",
)
if annotations is not None:
for axis in axes[:, [0, 1]].ravel():
axis.vlines(
annotations,
0.0,
0.75,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.5,
linestyle="-.",
color="r",
)
# Plot symbol boundaries in sample space
for axis in axes[:, 2].ravel():
axis.vlines(
xticks,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.5,
linestyle="--",
color="k",
)
# Align labels
f.align_ylabels(axes[:, :])
f.align_xlabels(axes[-1, :])
# Set titles
axes[0, 0].set_title("Raw data", fontstyle="italic", color="gray")
axes[0, 1].set_title("Ground truth", fontstyle="italic", color="gray")
axes[0, 2].set_title("Sample stream", fontstyle="italic", color="gray")
_title = "Symbol stream for symbols {} to {}".format(start_idx, start_idx + count)
if dims_to_plot != dims:
_title += " [{} of {} dimensions]".format(dims_to_plot, dims)
plt.suptitle(_title, y=1.01, verticalalignment="bottom")
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_symstream(
self, start: t.Optional[int] = None, count: t.Optional[int] = 10, **kwargs
):
SUBPLOT_HEIGHT = 1.5 # inches
start = start if start else 0
end = start + count if count else len(self.run.o_symstream) - start
dims = self.run.i_rdpstream.shape[1] - 1
_out = self.run.o_symstream[slice(start, end)]
_in = self.run.i_symstream[slice(start, end)]
_x = np.arange(start, end)
# if dims != 1, then we're dealing with MIMO
if dims != 1:
_out = np.flip(unpack_array(_out, n=dims), axis=1)
_in = np.flip(unpack_array(_in, n=dims), axis=1)
f, axes = plt.subplots(
dims,
1,
figsize=(self._width, 1 + dims * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharey=True,
sharex=True,
)
if dims == 1:
axes = np.array([axes])
_out = _out.reshape(-1, 1)
_in = _in.reshape(-1, 1)
def minmax(x):
return (x.min(), x.max())
for i, axis in enumerate(axes):
_xlim, _ylim = minmax(np.hstack([_out, _in]))
_lower_margin, _h = 0.15, 0.35
axis.plot(_x, _out[:, i], marker="+", color="C0", drawstyle="steps-post")
axis.set_xlim(_x.min() - 0.5, _x.max() + 0.5)
axis.set_ylim(_out.min() - _lower_margin, _out.max() + _h)
axis.set_ylabel("Output[{}]".format(i))
axis.yaxis.label.set_color("C0")
axis.tick_params(axis="y", color="C0")
twin = axis.twinx()
twin.plot(_x, _in[:, i], marker="x", color="C1", drawstyle="steps-post")
twin.set_ylim(_in.min() - _h, _in.max() + _lower_margin)
twin.set_ylabel("Input[{}]".format(i))
twin.yaxis.label.set_color("C1")
twin.tick_params(axis="y", color="C1")
twin.spines["left"].set_color("C0")
twin.spines["right"].set_color("C1")
axis.grid(axis="y", color="C0", dashes=(5, 5), alpha=0.5)
twin.grid(axis="y", color="C1", dashes=(5, 5), alpha=0.5)
axes[-1].xaxis.set_major_locator(plt.MultipleLocator(base=1.0))
axes[-1].set_xlabel("Symbol #", color="gray")
if _x.size >= 50:
plt.setp(axes[-1].xaxis.get_majorticklabels(), rotation=90)
if _x.size >= 60:
pass
for axis in axes:
sns.despine(ax=axis, top=True, bottom=False, right=False, left=False)
plt.suptitle(
"Symbol stream for symbols {} to {}".format(*minmax(_x)),
y=1.01,
verticalalignment="bottom",
)
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_eye_diagram(self, **kwargs):
SUBPLOT_HEIGHT = self._width // 3 # inches
f, axis = plt.subplots(
1, 1, figsize=(self._width, SUBPLOT_HEIGHT), dpi=self._screen_dpi
)
data: np.ndarray
# Handle 2-d and 3-d data
if self.run.i_lnestream.ndim == 2:
data = self.run.i_lnestream.T[:, : self.run.i_symstream.size]
elif self.run.i_lnestream.ndim == 3:
data = np.vstack(self.run.i_lnestream).T[:, : self.run.i_symstream.size]
axis.plot(data, color="C0", linestyle="-", marker=".", alpha=(10 / data.shape[1]))
sns.despine()
lower_ylim, upper_ylim = np.quantile(data, [0.05, 0.95])
axis.set_ylim(0.975 * lower_ylim, 1.025 * upper_ylim)
axis.set_xlabel("Sample #", color="gray")
ylabel = self.run.i_rdpstream.columns[1].split(":")
ylabel = map(ylabel.__getitem__, [0, 1, -1])
ylabel = "\n".join(ylabel)
axis.set_ylabel(ylabel, color="gray")
plt.suptitle(
"Eye diagram for a total of {} symbols".format(data.shape[1]),
verticalalignment="bottom",
)
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_symbol_space(self, **kwargs):
symbol_space = self.run.intermediates.lne.symbol_space
decision_device = self.run.intermediates.lne.decision_device["decision_device"][0]
if not is_fitted(decision_device):
raise RuntimeError("decision device must be fitted for symbol space plotting")
sca = decision_device.named_steps.get("standardscaler", None)
pca = decision_device.named_steps.get("pca", None)
cla = decision_device.steps[-1][1]
X = sca.transform(symbol_space) if sca else symbol_space
X = pca.transform(symbol_space) if pca else symbol_space
if X.shape[1] == 1:
# plotting_data
f, axes = plt.subplots(
4,
1,
figsize=(self._width, 8),
dpi=self._screen_dpi,
sharex=True,
gridspec_kw={"height_ratios": [2, 3, 2, 1]},
)
X = X.ravel()
pred = self.run.i_bitstream.ravel()
gt = self.run.o_bitstream.ravel()
if X.size == pred.size:
# Jitter the symbol space slightly to avoid covariance calculation errors
# when all data points are the same. Also, jitter more heavily for the swarm/scatter
# plot representation to improve readability.
plotting_data = pandas.DataFrame(
{
"X": X + 1e-6 * np.random.randn(*X.shape),
"Jittered": pred + 0.1 * np.random.randn(*pred.shape),
"Prediction": pred,
"Error": pred[: gt.size] != gt[: pred.size],
}
)
sns.scatterplot(
x="X",
y="Jittered",
hue="Prediction",
style="Error",
legend="brief",
alpha=0.3,
style_order=[False, True],
palette=sns.color_palette("pastel", n_colors=2),
data=plotting_data.query("Error == False"),
ax=axes[1],
)
if plotting_data.query("Error == True").size > 0:
n_colors = plotting_data.query("Error == True")["Prediction"].unique().size
sns.scatterplot(
x="X",
y="Jittered",
hue="Prediction",
style="Error",
legend=None,
palette=sns.color_palette(palette=None, n_colors=n_colors),
style_order=[False, True],
data=plotting_data.query("Error == True"),
ax=axes[1],
)
sns.distplot(plotting_data.query("Prediction == 0").X, ax=axes[0], color="C0")
sns.rugplot(
plotting_data.query("Prediction == 0").X, alpha=0.5, ax=axes[0], color="C0"
)
sns.distplot(plotting_data.query("Prediction == 1").X, ax=axes[0], color="C1")
sns.rugplot(
plotting_data.query("Prediction == 1").X, alpha=0.5, ax=axes[0], color="C1"
)
if plotting_data.query("Error == True").size > 0:
# ValueError's can be thrown when only a single error exists
try:
sns.distplot(
plotting_data.query("Prediction == 0").query("Error == True").X,
ax=axes[2],
color="C0",
)
rugplot(
plotting_data.query("Prediction == 0").query("Error == True").X,
alpha=0.5,
ax=axes[2],
color="C0",
top=True,
)
except ValueError:
pass
try:
sns.distplot(
plotting_data.query("Prediction == 1").query("Error == True").X,
ax=axes[2],
color="C1",
)
rugplot(
plotting_data.query("Prediction == 1").query("Error == True").X,
alpha=0.5,
ax=axes[2],
color="C1",
top=True,
)
except ValueError:
pass
axes[2].set_ylim(*reversed(axes[2].get_ylim()))
for axis in axes:
remove_spine(axis, "right")
for axis in axes[[1, 2]]:
remove_spine(axis, "bottom")
for axis in axes[[0, 1, -1]]:
remove_spine(axis, "top")
add_spine(axes[3], "bottom", ticks_only=True)
axes[0].grid(dashes=(5, 5), alpha=0.5, axis="x")
axes[1].grid(dashes=(5, 5), alpha=0.5, axis="x")
axes[2].grid(dashes=(5, 5), alpha=0.5, axis="x")
for axis in axes[:-1]:
axis.set_xlabel(None)
axis.set_ylabel(None)
axes[1].yaxis.set_ticks(np.unique(plotting_data.Prediction))
axes[0].set_ylabel("Measurement\ndistribution", color="gray")
axes[1].set_ylabel("Predicted\nsymbol", color="gray")
axes[2].set_ylabel("Error\ndistribution", color="gray")
f.align_ylabels(axes[:])
else:
# No known layer uses different encoding at the moment
pass
_x = np.linspace(X.min(), X.max(), 100).reshape(-1, 1)
_d = (
decision_device.decision_function(_x)
if hasattr(decision_device, "decision_function")
else decision_device.predict_proba(_x)
)
axes[-1].plot(_x, _d)
axes[-1].grid(dashes=(5, 5), alpha=0.5)
axes[-1].set_xlim(0.975 * X.min(), 1.025 * X.max())
# Labels
xlabel = self.run.i_rdpstream.columns[1].split(":")
xlabel = map(xlabel.__getitem__, [0, 1, -1])
xlabel = "{}, {} ({})".format(*xlabel)
axes[-1].set_xlabel(xlabel, color="gray")
ylabel = (
"Decision\nfunction"
if hasattr(decision_device, "decision_function")
else "Prediction\nprobability"
)
axes[-1].set_ylabel(ylabel, color="gray")
else:
pred = self.run.i_symstream
gt = self.run.o_symstream
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
resolution = 0.1
XX, YY = np.meshgrid(
np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution)
)
plotting_data = pandas.DataFrame(
{
"Re": X[:, 0],
"Im": X[:, 1],
"Symbol": [f"Symbol {x:2b}" for x in pred],
"Error": pred[: gt.size] != gt[: pred.size],
}
)
f, axis = plt.subplots(
1, 1, figsize=(self._width, self._width), dpi=self._screen_dpi
)
axis = sns.scatterplot(
x="Re", y="Im", hue="Symbol", style="Error", data=plotting_data, ax=axis
)
axis.set_ylabel("Quadrature")
axis.set_xlabel("In-phase")
axis.set_aspect("equal", "box")
axis.grid(dashes=(5, 5), alpha=0.5)
try:
params = AttributeDict()
if hasattr(cla, "decision_function"):
Z = cla.decision_function(np.c_[XX.ravel(), YY.ravel()])
step = 1
params.levels = np.arange(-1, 1 + step, step)
params.linewidths = 1.5 - np.abs(params.levels)
else:
Z = cla.predict_proba(np.c_[XX.ravel(), YY.ravel()])
step = 0.5
params.levels = np.arange(-1, 1 + step, step)
params.linewidths = 1.5 - np.abs(params.levels)
for dim in range(Z.shape[1]):
ZZ = Z[:, dim].reshape(XX.shape)
contours = plt.contour(
XX,
YY,
ZZ,
colors=[sns.color_palette()[dim]],
linestyles=["--", ":"][dim % 2],
**params,
)
plt.gca().clabel(contours, inline=1, fontsize=10)
except Exception:
pass
plt.suptitle("Symbol space", y=0.95, verticalalignment="bottom")
self._plot_save_helper(f, **kwargs)
def plot_error(self, roll: t.Optional[int] = None, **kwargs):
SUBPLOT_HEIGHT = 2
f, axes = plt.subplots(
2,
1,
figsize=(self._width, 1 + 2 * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharey=False,
sharex=False,
)
bit_mismatch_length = self.run.o_bitstream.size - self.run.i_bitstream.size
sym_mismatch_length = self.run.o_symstream.size - self.run.i_symstream.size
bit_errors = (
self.run.i_bitstream[: self.run.o_bitstream.size]
!= self.run.o_bitstream[: self.run.i_bitstream.size]
)
sym_errors = (
self.run.i_symstream[: self.run.o_symstream.size]
!= self.run.o_symstream[: self.run.i_symstream.size]
)
bit_x = np.arange(0, bit_errors.size)
sym_x = np.arange(0, sym_errors.size)
bit_mismatch = (
np.arange(bit_errors.size, bit_errors.size + bit_mismatch_length)
if bit_mismatch_length != 0
else None
)
sym_mismatch = (
np.arange(sym_errors.size, sym_errors.size + sym_mismatch_length)
if sym_mismatch_length != 0
else None
)
bit_roll = roll if roll else bit_errors.size // 10
sym_roll = roll if roll else sym_errors.size // 10
bit_errors_series = (
pandas.Series(bit_errors)
.rolling(window=bit_roll, min_periods=1, center=True)
.mean()
)
sym_errors_series = (
pandas.Series(sym_errors)
.rolling(window=sym_roll, min_periods=1, center=True)
.mean()
)
axes[1].plot(bit_x, bit_errors_series)
axes[0].plot(sym_x, sym_errors_series)
if bit_mismatch:
axes[1].plot(bit_mismatch, [1.0] * bit_mismatch, linestyle="--")
if sym_mismatch:
axes[1].plot(sym_mismatch, [1.0] * sym_mismatch, linestyle="--")
axes[1].set_ylim(0, 0.5)
axes[0].set_ylim(0, 1)
axes[1].set_xlim(0, bit_errors.size - 1 + bit_mismatch_length)
axes[0].set_xlim(0, sym_errors.size - 1 + sym_mismatch_length)
axes[1].set_title(
"Windowed bit error rate (window={})".format(bit_roll),
fontstyle="italic",
color="gray",
)
axes[1].set_xlabel("Bit #", color="gray")
axes[1].set_ylabel("Bit error rate", color="gray")
axes[0].set_title(
"Windowed symbol error rate (window={})".format(sym_roll),
fontstyle="italic",
color="gray",
)
axes[0].set_xlabel("Symbol #", color="gray")
axes[0].set_ylabel("Symbol error rate", color="gray")
plt.suptitle("Error rates", verticalalignment="bottom")
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_timing(self, **kwargs):
SUBPLOT_HEIGHT = 2
f, axes = plt.subplots(
2,
2,
figsize=(self._width, 1 * 2 * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharey="row",
sharex=False,
gridspec_kw={"height_ratios": [1, 2]},
)
raw_timing = self.run.i_rdpstream.iloc[:, 0]
raw_timing_delta = raw_timing.diff()
if self.run.intermediates.rdp.timestamps.ndim < 3:
rdp_timing = pandas.Series(np.hstack(self.run.intermediates.rdp.timestamps))
else:
rdp_timing = pandas.Series(
np.hstack(self.run.intermediates.rdp.timestamps[:, 0, :])
)
rdp_timing_delta = rdp_timing.diff()
axes[0, 0].plot(np.linspace(0, 1, raw_timing.size), raw_timing)
axes[0, 1].plot(np.linspace(0, 1, rdp_timing.size), rdp_timing)
axes[1, 0].plot(np.linspace(0, 1, raw_timing_delta.size), raw_timing_delta)
axes[1, 1].plot(np.linspace(0, 1, rdp_timing_delta.size), rdp_timing_delta)
for axis in axes.ravel():
axis.set_xticks([])
axes[0, 0].set_ylabel("timestamp (s)", color="gray")
axes[1, 0].set_ylabel("timestamp diff (s)", color="gray")
axes[0, 0].set_title(
"Sample-wise timestamp differences\nRaw data", fontstyle="italic", color="gray"
)
axes[0, 1].set_title(
"Sample-wise timestamp differences\nInterpolated data",
fontstyle="italic",
color="gray",
)
sns.despine()
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_synchronisation(self, **kwargs):
SUBPLOT_HEIGHT = 2 # inches
timestamps = self.run.i_rdpstream.iloc[:, 0]
data = self.run.i_rdpstream.iloc[:, 1:]
dims = data.shape[1]
start = timestamps.iloc[0]
end = self.run.intermediates.rdp.slicing[10]
interval = timestamps.between(start, end)
ZOOM_BEFORE = 3
ZOOM_AFTER = 5
origin, *edges = self.run.intermediates.rdp.edge_detection
slicing = self.run.intermediates.rdp.slicing
zoom_start = slicing[0] - ZOOM_BEFORE * (slicing[1] - slicing[0])
zoom_end = slicing[0] + ZOOM_AFTER * (slicing[1] - slicing[0])
zoom_interval = timestamps.between(zoom_start, zoom_end)
# Create subplots: 3 columns, ndim rows
f, axes = plt.subplots(
dims,
2,
figsize=(self._width, 1 + dims * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharex="col",
sharey="row",
squeeze=False,
gridspec_kw={"width_ratios": [3, 2]},
)
lower_ylim, upper_ylim = np.quantile(data, [0.01, 0.99])
for i, axis in enumerate(axes[:, 0]):
axis.plot(
timestamps[interval],
data[interval].iloc[:, i],
marker="+",
markersize=2,
linewidth=0.5,
alpha=0.5,
)
if lower_ylim < upper_ylim:
axis.set_ylim(0.975 * lower_ylim, 1.025 * upper_ylim)
axis.set_ylabel(
"{}\n{}\n{} ({})".format(*data[interval].columns[i].split(":")), color="gray"
)
axis.vlines(
origin,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.5,
alpha=0.7,
linestyle="--",
color="k",
)
axes[0, 0].vlines(
edges,
0,
1,
transform=axes[0, 0].get_xaxis_transform(),
linewidth=1.5,
alpha=0.7,
linestyle="--",
color="C1",
)
for i, axis in enumerate(axes[:, 1]):
axis.plot(
timestamps[zoom_interval],
data[zoom_interval].iloc[:, i],
marker="+",
markersize=2,
linewidth=0.5,
alpha=0.5,
)
axis.tick_params(axis="x", rotation=45)
axis.vlines(
slicing[0 : ZOOM_AFTER + 1],
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.7,
linestyle=":",
color="C0",
)
axis.vlines(
origin,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.7,
linestyle="--",
color="k",
)
sns.despine()
axes[-1, 0].set_xlabel(self.run.i_rdpstream.columns[0], color="gray")
axes[-1, 1].set_xlabel(self.run.i_rdpstream.columns[0], color="gray")
f.align_xlabels(axes[-1, :])
fmt = lambda x: np.format_float_scientific(x, precision=3)
axes[0, 0].set_title(
"Preprocessed data\nin interval {} to {}".format(*map(fmt, [start, end])),
fontstyle="italic",
color="gray",
)
axes[0, 1].set_title(
"Preprocessed data\nin interval {} to {}".format(*map(fmt, [zoom_start, zoom_end])),
fontstyle="italic",
color="gray",
)
plt.suptitle("Synchronisation", y=1.01, verticalalignment="bottom")
f.tight_layout()
plt.subplots_adjust(hspace=0.5)
self._plot_save_helper(f, **kwargs)
|
the-stack_0_2958 | """
Unit tests for Python ICE-CASCADE hillslope erosion-deposition forward-time
centered-space model component
References:
(1) Holman, J. P. (2002). Heat transfer (pp. 75)
"""
import unittest
import numpy as np
from py_ice_cascade import hillslope
class ftcs_TestCase(unittest.TestCase):
"""Tests for hillslope ftcs model component"""
# arbitrary valid values for input arguments
hh = np.random.rand(10,10)
dd = 1.0
mm = np.ones((10,10))
kon = 1.0
koff = 0.0
bb = ['constant']*4
def test_input_valid_bc(self):
"""Allow all supported BC names, and fail for others"""
hillslope.ftcs_model(self.hh, self.mm, self.dd, self.kon, self.koff,
['constant', 'closed', 'open', 'mirror'])
hillslope.ftcs_model(self.hh, self.mm, self.dd, self.kon, self.koff,
['cyclic', 'cyclic', 'constant', 'constant'])
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['ooga_booga', 'cyclic', 'constant', 'constant'])
def test_input_cyclic_bc(self):
"""Unmatched cyclic BCs should throw an error"""
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['cyclic', 'constant', 'constant', 'constant'])
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['constant', 'cyclic', 'constant', 'constant'])
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['constant', 'constant', 'cyclic', 'constant'])
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['constant', 'constant', 'constant', 'cyclic'])
def test_consistent_dims(self):
"""Unequal array dims for height and mask throws error"""
self.assertRaises(ValueError, hillslope.ftcs_model, np.random.rand(11,11), self.mm, self.dd, self.kon, self.koff, self.bb)
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, np.random.rand(11,11), self.dd, self.kon, self.koff, self.bb)
def test_protect_model_dims(self):
"""Attempt to set model grid with incorrect size array throw error"""
model = hillslope.ftcs_model(self.hh, self.mm, self.dd, self.kon, self.koff, self.bb)
self.assertRaises(ValueError, model.set_height, np.random.rand(11,11))
self.assertRaises(ValueError, model.set_mask, np.random.rand(11,11))
def test_steady_bc_constant(self):
"""Compare against exact solution for sinusoid y=max and zero at other bnd"""
# parameters
h0 = 1.0
nx = 100
ny = 50
lx = 1.0
delta = lx/(nx-1)
ly = delta*(ny-1)
t_end = 0.25
epsilon = 0.001
# Case 1:
# # exact solution
xx = np.linspace(0, lx, nx, dtype=np.double).reshape(( 1,nx))
yy = np.linspace(0, ly, ny, dtype=np.double).reshape((ny, 1))
h_exact = h0/np.sinh(np.pi*ly/lx)*np.sin(np.pi*xx/lx)*np.sinh(np.pi*yy/lx)
# # numerical solution
h_init = np.zeros((ny, nx))
h_init[-1,:] = h0*np.sin(np.pi*xx/lx)
kappa = 1.0
mask = np.ones((ny,nx))
bcs = ['constant']*4
model = hillslope.ftcs_model(h_init, mask, delta, kappa, kappa, bcs)
model.run(t_end)
# # check errors
h_error = np.abs(model.get_height()-h_exact)
self.assertTrue(np.max(h_error) < epsilon)
# Case 2: rotate 90 degrees
# # exact solution
h_exact = np.rot90(h_exact)
# # numerical solution
h_init = np.rot90(h_init)
mask = np.rot90(mask)
bcs = ['constant']*4
model = hillslope.ftcs_model(h_init, mask, delta, kappa, kappa, bcs)
model.run(t_end)
# # check errors
h_error = np.abs(model.get_height()-h_exact)
self.assertTrue(np.max(h_error) < epsilon)
def test_steady_layered_kappa(self):
"""Compare against exact solution for diffusion in 2 layered material"""
# parameters
nx = 100
ny = 5
lx = 1.0
delta = lx/(nx-1)
xx = np.linspace(0, lx, nx, dtype=np.double).reshape((1,nx))*np.ones((ny,1))
l0 = 0.5*(xx[0,50]+xx[0,51]) # transition at midpoint
l1 = lx-l0
h0 = 1.0
h1 = 0.0
k0 = 1.0
k1 = 0.5
t_end = 1.5
epsilon = 0.001
# Case 1:
# # exact solution (resistance = l/k in series)
qq = (h0-h1)/(l0/k0+l1/k1)
hb = h0-qq*l0/k0 # or: hb = qq*l1/k1-h1
xx = np.linspace(0, lx, nx, dtype=np.double).reshape((1,nx))*np.ones((ny,1))
h_exact = np.where(xx <= l0, h0+(hb-h0)/l0*xx, hb+(h1-hb)/l1*(xx-l0))
# # numerical solution
h_init = np.zeros((ny, nx))
h_init[:,0] = h0
h_init[:,-1] = h1
mask = np.where(xx <= l0, True, False)
bcs = ['closed', 'closed', 'constant', 'constant']
model = hillslope.ftcs_model(h_init, mask, delta, k0, k1, bcs)
model.run(t_end)
# # check errors
h_error = np.abs(model.get_height()-h_exact)
self.assertTrue(np.max(h_error) < epsilon)
# Case 2: rotate 90 degrees
# # exact solution
h_exact = np.rot90(h_exact)
# # numerical solution
h_init = np.rot90(h_init)
mask = np.rot90(mask)
bcs = ['constant', 'constant', 'closed', 'closed']
model = hillslope.ftcs_model(h_init, mask, delta, k0, k1, bcs)
model.run(t_end)
# # check errors
h_error = np.abs(model.get_height()-h_exact)
self.assertTrue(np.max(h_error) < epsilon)
def test_mass_conservation(self):
"""Confirm mass conservation with closed and cyclic BCs"""
# parameters
nx = ny = 100
delta = 1.0/(nx-1)
h_init = np.linspace(0.0, 1.0, nx).reshape(1,nx)*np.linspace(0.0, 1.0, ny).reshape(ny,1)
h_init += 0.1*(np.random.rand(ny, nx)-0.5)
mask = np.where(np.random.rand(ny, nx)>0.5, True, False)
kappa1 = 1.0
kappa0 = 0.5
t_end = 0.25
epsilon = 0.0001
# Case 1
# # exact solution
h_total = np.sum(h_init)
# # numerical solution
bcs = ['cyclic', 'cyclic', 'closed', 'closed']
model = hillslope.ftcs_model(h_init, mask, delta, kappa1, kappa0, bcs)
model.run(t_end)
# # check error
h_error = np.abs(h_total-np.sum(model.get_height()))
self.assertTrue(h_error < epsilon)
# Case 2: rotate 90 deg
# # exact solution
# # numerical solution
h_init = np.rot90(h_init)
mask = np.rot90(mask)
bcs = ['closed', 'closed', 'cyclic', 'cyclic']
model = hillslope.ftcs_model(h_init, mask, delta, kappa1, kappa0, bcs)
model.run(t_end)
# # check error
h_error = np.abs(h_total-np.sum(model.get_height()))
self.assertTrue(h_error < epsilon)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_2959 | # -*- coding: utf-8 -*-
import copy
from pathlib import Path
from collections import OrderedDict, namedtuple
import numpy as np
from parfive import Downloader
import astropy.table
import astropy.units as u
import parfive
import sunpy
from sunpy import config
from sunpy.net.base_client import BaseClient
from sunpy.net.vso.attrs import Time, Wavelength, _Range
from sunpy.time import TimeRange
TIME_FORMAT = config.get("general", "time_format")
__all__ = ['QueryResponse', 'GenericClient']
class QueryResponseBlock:
"""
Represents url, source along with other information
"""
def __init__(self, map0, url, time=None):
"""
Parameters
----------
map0 : Dict with relevant information
url : Uniform Resource Locator
"""
self._map = map0
self.source = map0.get('source', "Data not Available")
self.provider = map0.get('provider', "Data not Available")
self.physobs = map0.get('physobs', "Data not Available")
self.instrument = map0.get('instrument', "Data not Available")
self.url = url
self.time = TimeRange(map0.get('Time_start'),
map0.get('Time_end')) if time is None else time
self.wave = map0.get('wavelength', np.NaN)
def iter_urls(amap, url_list, time):
"""Helper Function"""
for aurl, t in zip(url_list, time):
tmp = QueryResponseBlock(amap, aurl, t)
yield tmp
class QueryResponse(list):
"""
Container of QueryResponseBlocks
"""
def __init__(self, lst):
super().__init__(lst)
@classmethod
def create(cls, amap, lst, time=None):
if time is None:
time = [None] * len(lst)
return cls(iter_urls(amap, lst, time))
def time_range(self):
"""
Returns the time-span for which records are available
"""
return TimeRange(min(qrblock.time.start for qrblock in self),
max(qrblock.time.end for qrblock in self))
def response_block_properties(self):
"""
Returns a set of class attributes on all the response blocks.
"""
s = {a if not a.startswith('_') else None for a in dir(self[0])}
for resp in self[1:]:
s = s.intersection({a if not a.startswith('_') else None for a in dir(resp)})
s.remove(None)
return s
def __repr__(self):
return repr(type(self)) + repr(self._build_table())
def __str__(self):
return str(self._build_table())
def _repr_html_(self):
return self._build_table()._repr_html_()
def _build_table(self):
columns = OrderedDict((('Start Time', []), ('End Time', []),
('Source', []), ('Instrument', []),
('Wavelength', [])))
for i, qrblock in enumerate(self):
columns['Start Time'].append(
(qrblock.time.start).strftime(TIME_FORMAT))
columns['End Time'].append(
(qrblock.time.end).strftime(TIME_FORMAT))
columns['Source'].append(qrblock.source)
columns['Instrument'].append(qrblock.instrument)
columns['Wavelength'].append(str(u.Quantity(qrblock.wave)))
return astropy.table.Table(columns)
class GenericClient(BaseClient):
"""
Base class for simple web clients for the data retriever module. This class
is mainly designed for downloading data from FTP and HTTP type data
sources, although should in theory be general enough to get data from any
web service.
This class has two user facing methods
`~sunpy.net.dataretriever.client.GenericClient.search` and
`~sunpy.net.dataretriever.client.GenericClient.fetch` the former generates a
set of results for files available through the service the client is
querying and the latter downloads that data.
The `~sunpy.net.dataretriever.client.GenericClient.search` method takes a
set of `sunpy.net.attrs` objects and then converts these into a call to
`~sunpy.net.dataretriever.client.GenericClient._get_url_for_timerange`. It
does this through the `map\\_` dictionary which represents the
`~sunpy.net.attrs` objects as a dictionary.
"""
def __init__(self):
self.map_ = {}
def _makeargs(self, *args):
"""
Construct the `map\\_` internal representation of the query.
This `map\\_` dictionary is passed through to the
`_get_url_for_timerange` method to get the URL results.
Parameters
----------
\\*args: `tuple`
The query attributes.
"""
for elem in args:
if isinstance(elem, Time):
self.map_['TimeRange'] = TimeRange(elem.start, elem.end)
self.map_['Time_start'] = elem.start
self.map_['Time_end'] = elem.end
elif isinstance(elem, _Range):
a_min = elem.min
a_max = elem.max
if a_min == a_max:
self.map_[elem.__class__.__name__.lower()] = a_min
else:
if isinstance(elem, Wavelength):
prefix = 'wave'
else:
prefix = ''
minmax = namedtuple("minmax", "{0}min {0}max".format(prefix))
self.map_[elem.__class__.__name__.lower()] = minmax(a_min, a_max)
else:
if hasattr(elem, 'value'):
self.map_[elem.__class__.__name__.lower()] = elem.value
else:
# This will only get hit if the attr is something like
# Extent, which is a unique subclass of Attr. Currently no
# unidown Clients support this, so we skip this line.
# Anything that hits this will require special code to
# convert it into the map_ dict.
raise ValueError(
"GenericClient can not add {} to the map_ dictionary to pass "
"to the Client.".format(elem.__class__.__name__)) # pragma: no cover
self._makeimap()
@classmethod
def _get_url_for_timerange(cls, timerange, **kwargs):
"""
Method which generates URL results from a timerange and the `map\\_`
dictionary.
Parameters
----------
timerange: `sunpy.time.TimeRange`
The timerange to extract the URLs for.
\\*\\*kwargs: `dict`
Any extra keywords to refine the search. Generated from the
attributes passed to
`~sunpy.net.dataretriever.client.GenericClient.search`.
"""
raise NotImplementedError
def _makeimap(self):
"""
Add client specific information to the _map dict.
Normally this is extra metadata which is not downloaded, but known
a priori.
"""
raise NotImplementedError
@classmethod
def _can_handle_query(cls, *query):
"""
Method the
`sunpy.net.fido_factory.UnifiedDownloaderFactory`
class uses to dispatch queries to this Client.
"""
raise NotImplementedError
def _get_full_filenames(self, qres, filenames, path):
"""
Download a set of results.
Parameters
----------
qres : `~sunpy.net.dataretriever.QueryResponse`
Results to download.
filenames : list
List of base filenames (ex - "xyz.txt")
path : str
Path to download files to
Returns
-------
List of full pathnames for each file (download_directory + filename)
"""
# Create function to compute the filepath to download to if not set
default_dir = Path(sunpy.config.get("downloads", "download_dir"))
paths = []
for i, filename in enumerate(filenames):
fname = Path(filename)
if path is None:
fname = default_dir / '{file}'
elif '{file}' not in str(path):
fname = path / '{file}'
temp_dict = qres[i]._map.copy()
temp_dict['file'] = str(filename)
fname = fname.expanduser()
fname = Path(str(fname).format(**temp_dict))
paths.append(fname)
return paths
def _get_time_for_url(self, urls):
"""
This method allows clients to customise the timerange displayed for
each URL.
It should return a sunpy.time.TimeRange object per URL.
"""
return NotImplemented
def search(self, *args, **kwargs):
"""
Query this client for a list of results.
Parameters
----------
\\*args: `tuple`
`sunpy.net.attrs` objects representing the query.
"""
GenericClient._makeargs(self, *args, **kwargs)
kwergs = copy.copy(self.map_)
kwergs.update(kwargs)
urls = self._get_url_for_timerange(
self.map_.get('TimeRange'), **kwergs)
if urls:
times = self._get_time_for_url(urls)
if times and times is not NotImplemented:
return QueryResponse.create(self.map_, urls, times)
return QueryResponse.create(self.map_, urls)
def fetch(self, qres, path=None, overwrite=False,
progress=True, downloader=None, wait=True):
"""
Download a set of results.
Parameters
----------
qres : `~sunpy.net.dataretriever.QueryResponse`
Results to download.
path : `str` or `pathlib.Path`, optional
Path to the download directory, or file template including the
``{file}`` string which will be replaced with the filename.
overwrite : `bool` or `str`, optional
Determine how to handle downloading if a file already exists with the
same name. If `False` the file download will be skipped and the path
returned to the existing file, if `True` the file will be downloaded
and the existing file will be overwritten, if `'unique'` the filename
will be modified to be unique.
progress : `bool`, optional
If `True` show a progress bar showing how many of the total files
have been downloaded. If `False`, no progress bar will be shown.
downloader : `parfive.Downloader`, optional
The download manager to use.
wait : `bool`, optional
If `False` ``downloader.download()`` will not be called. Only has
any effect if `downloader` is not `None`.
Returns
-------
results: `parfive.Results`
"""
if path is not None:
path = Path(path)
urls = [qrblock.url for qrblock in qres]
filenames = [url.split('/')[-1] for url in urls]
paths = self._get_full_filenames(qres, filenames, path)
dl_set = True
if not downloader:
dl_set = False
downloader = Downloader(progress=progress, overwrite=overwrite)
for url, filename in zip(urls, paths):
downloader.enqueue_file(url, filename=filename)
if dl_set and not wait:
return
return downloader.download()
def _link(self, map_):
"""Helper Function"""
paths = []
for k, v in map_.items():
paths.append(map_[k]['path'])
return paths
|
the-stack_0_2960 | """Produce custom labelling for a colorbar.
Contributed by Scott Sinclair
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from numpy.random import randn
# Make plot with vertical (default) colorbar
fig, ax = plt.subplots()
data = np.clip(randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest', cmap=cm.coolwarm)
ax.set_title('Gaussian noise with vertical colorbar')
# Add colorbar, make sure to specify tick locations to match desired ticklabels
cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
cbar.ax.set_yticklabels(['< -1', '0', '> 1'])# vertically oriented colorbar
# Make plot with horizontal colorbar
fig, ax = plt.subplots()
data = np.clip(randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest', cmap=cm.afmhot)
ax.set_title('Gaussian noise with horizontal colorbar')
cbar = fig.colorbar(cax, ticks=[-1, 0, 1], orientation='horizontal')
cbar.ax.set_xticklabels(['Low', 'Medium', 'High'])# horizontal colorbar
plt.show()
|
the-stack_0_2961 | # -*- coding: utf-8 -*-
'''
Manage Elasticsearch Domains
=================
.. versionadded:: 2016.11.0
Create and destroy Elasticsearch domains. Be aware that this interacts with Amazon's services,
and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
vpc.keyid: GKTADJGHEIQSXMKKRBJ08H
vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile,
either passed in as a dict, or as a string to pull from pillars or minion
config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
Ensure domain exists:
boto_elasticsearch_domain.present:
- DomainName: mydomain
- profile='user-credentials'
- ElasticsearchVersion: "2.3"
- ElasticsearchClusterConfig:
InstanceType": "t2.micro.elasticsearch"
InstanceCount: 1
DedicatedMasterEnabled: False
ZoneAwarenessEnabled: False
- EBSOptions:
EBSEnabled: True
VolumeType: "gp2"
VolumeSize: 10
Iops: 0
- AccessPolicies:
Version: "2012-10-17"
Statement:
- Effect: "Allow"
- Principal:
AWS: "*"
- Action:
- "es:*"
- Resource: "arn:aws:es:*:111111111111:domain/mydomain/*
- Condition:
IpAddress:
"aws:SourceIp":
- "127.0.0.1",
- "127.0.0.2",
- SnapshotOptions:
AutomatedSnapshotStartHour: 0
- AdvancedOptions:
rest.action.multi.allow_explicit_index": "true"
- Tags:
a: "b"
- region: us-east-1
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
# Import Salt libs
import salt.utils.json
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if boto is available.
'''
return 'boto_elasticsearch_domain' if 'boto_elasticsearch_domain.exists' in __salt__ else False
def _compare_json(current, desired):
return __utils__['boto3.json_objs_equal'](current, desired)
def present(name, DomainName,
ElasticsearchClusterConfig=None,
EBSOptions=None,
AccessPolicies=None,
SnapshotOptions=None,
AdvancedOptions=None,
Tags=None,
region=None, key=None, keyid=None, profile=None,
ElasticsearchVersion="1.5"):
'''
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
if ElasticsearchClusterConfig is None:
ElasticsearchClusterConfig = {
'DedicatedMasterEnabled': False,
'InstanceCount': 1,
'InstanceType': 'm3.medium.elasticsearch',
'ZoneAwarenessEnabled': False
}
if EBSOptions is None:
EBSOptions = {
'EBSEnabled': False,
}
if SnapshotOptions is None:
SnapshotOptions = {
'AutomatedSnapshotStartHour': 0
}
if AdvancedOptions is None:
AdvancedOptions = {
'rest.action.multi.allow_explicit_index': 'true'
}
if Tags is None:
Tags = {}
if AccessPolicies is not None and isinstance(AccessPolicies, six.string_types):
try:
AccessPolicies = salt.utils.json.loads(AccessPolicies)
except ValueError as e:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(e.message)
return ret
r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be created.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.create'](DomainName=DomainName,
ElasticsearchClusterConfig=ElasticsearchClusterConfig,
EBSOptions=EBSOptions,
AccessPolicies=AccessPolicies,
SnapshotOptions=SnapshotOptions,
AdvancedOptions=AdvancedOptions,
ElasticsearchVersion=str(ElasticsearchVersion), # future lint: disable=blacklisted-function
region=region, key=key,
keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'domain': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Domain {0} created.'.format(DomainName)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain {0} is present.'.format(DomainName)])
ret['changes'] = {}
# domain exists, ensure config matches
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion): # future lint: disable=blacklisted-function
ret['result'] = False
ret['comment'] = (
'Failed to update domain: version cannot be modified '
'from {0} to {1}.'.format(
_status.get('ElasticsearchVersion'),
str(ElasticsearchVersion) # future lint: disable=blacklisted-function
)
)
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
_describe['AccessPolicies'] = salt.utils.json.loads(_describe['AccessPolicies'])
# When EBSEnabled is false, describe returns extra values that can't be set
if not _describe.get('EBSOptions', {}).get('EBSEnabled'):
opts = _describe.get('EBSOptions', {})
opts.pop('VolumeSize', None)
opts.pop('VolumeType', None)
comm_args = {}
need_update = False
es_opts = {'ElasticsearchClusterConfig': ElasticsearchClusterConfig,
'EBSOptions': EBSOptions,
'AccessPolicies': AccessPolicies,
'SnapshotOptions': SnapshotOptions,
'AdvancedOptions': AdvancedOptions}
for k, v in six.iteritems(es_opts):
if not _compare_json(v, _describe[k]):
need_update = True
comm_args[k] = v
ret['changes'].setdefault('new', {})[k] = v
ret['changes'].setdefault('old', {})[k] = _describe[k]
if need_update:
if __opts__['test']:
msg = 'Domain {0} set to be modified.'.format(DomainName)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain to be modified'])
r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName,
region=region, key=key,
keyid=keyid, profile=profile,
**comm_args)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update domain: {0}.'.format(r['error'])
ret['changes'] = {}
return ret
return ret
def absent(name, DomainName,
region=None, key=None, keyid=None, profile=None):
'''
Ensure domain with passed properties is absent.
name
The name of the state definition.
DomainName
Name of the domain.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
r = __salt__['boto_elasticsearch_domain.exists'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to delete domain: {0}.'.format(r['error']['message'])
return ret
if r and not r['exists']:
ret['comment'] = 'Domain {0} does not exist.'.format(DomainName)
return ret
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be removed.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.delete'](DomainName,
region=region, key=key,
keyid=keyid, profile=profile)
if not r['deleted']:
ret['result'] = False
ret['comment'] = 'Failed to delete domain: {0}.'.format(r['error']['message'])
return ret
ret['changes']['old'] = {'domain': DomainName}
ret['changes']['new'] = {'domain': None}
ret['comment'] = 'Domain {0} deleted.'.format(DomainName)
return ret
|
the-stack_0_2964 | import os
from datetime import datetime
import pandas as pd
import src.config.constants as constants
import src.munging as process_data
import src.common as common
if __name__ == "__main__":
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
logger = common.get_logger("blend")
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger,
constants.PROCESSED_DATA_DIR,
train=True,
test=True,
sample_submission=True,
)
# File with public score 7.84988
# https://www.kaggle.com/pavfedotov/blending-tool-tps-aug-2021
df_sub_ext = pd.read_csv(f"{constants.PUB_SUBMISSION_DIR}/0.part")
# https://www.kaggle.com/vaby667/84996to-improve-your-ranking
# PL : 0.84996
df_2 = pd.read_csv(
f"{constants.PUB_SUBMISSION_DIR}/file1_7.84996_file2_7.84996_blend.csv"
)
# LGB Benchamrk with StratifiedKFold (10) with frequency encoding params from Kaggle, seed 20 (0.9, 0.1)
df_lgb_log_loss_top_10 = pd.read_csv(
f"{constants.SUBMISSION_DIR}/sub_lgb_SKF_freq_params_f_kaggle_0817_1247_7.84284.csv"
)
# Giving more importnace to external submission
sample_submission_df.loss = (
0.5 * df_sub_ext.loss + 0.49 * df_2.loss + 0.01 * df_lgb_log_loss_top_10.loss
).values
file_name = f"sub_{MODEL_NAME}_{RUN_ID}.csv"
logger.info(f"Saving to submission file {constants.SUBMISSION_DIR}/{file_name}")
sample_submission_df.to_csv(f"{constants.SUBMISSION_DIR}/{file_name}")
logger.info(pd.read_csv(f"{constants.SUBMISSION_DIR}/{file_name}"))
|
the-stack_0_2966 | import ast
from typing import Any, List
from vyper.parser.context import Context
from vyper.parser.expr import Expr
from vyper.parser.function_definitions.utils import (
get_default_names_to_set,
get_nonreentrant_lock,
get_sig_statements,
make_unpacker,
)
from vyper.parser.lll_node import LLLnode
from vyper.parser.parser_utils import getpos, make_setter
from vyper.parser.stmt import parse_body
from vyper.signatures import FunctionSignature, sig_utils
from vyper.signatures.function_signature import VariableRecord
from vyper.types.types import BaseType, ByteArrayLike, get_size_of_type
from vyper.utils import MemoryPositions
def get_internal_arg_copier(total_size: int, memory_dest: int) -> List[Any]:
"""
Copy arguments.
For internal functions, MSTORE arguments and callback pointer from the stack.
:param total_size: total size to copy
:param memory_dest: base memory position to copy to
:return: LLL list that copies total_size of memory
"""
copier: List[Any] = ["seq"]
for pos in range(0, total_size, 32):
copier.append(["mstore", memory_dest + pos, "pass"])
return copier
def parse_internal_function(
code: ast.FunctionDef, sig: FunctionSignature, context: Context
) -> LLLnode:
"""
Parse a internal function (FuncDef), and produce full function body.
:param sig: the FuntionSignature
:param code: ast of function
:return: full sig compare & function body
"""
# Get nonreentrant lock
nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock(sig, context.global_ctx)
# Create callback_ptr, this stores a destination in the bytecode for a internal
# function to jump to after a function has executed.
clampers: List[LLLnode] = []
# Allocate variable space.
context.memory_allocator.expand_memory(sig.max_copy_size)
_post_callback_ptr = f"{sig.name}_{sig.method_id}_post_callback_ptr"
context.callback_ptr = context.new_internal_variable(typ=BaseType("uint256"))
clampers.append(
LLLnode.from_list(
["mstore", context.callback_ptr, "pass"], annotation="pop callback pointer",
)
)
if sig.total_default_args > 0:
clampers.append(LLLnode.from_list(["label", _post_callback_ptr]))
# internal functions without return types need to jump back to
# the calling function, as there is no return statement to handle the
# jump.
if sig.output_type is None:
stop_func = [["jump", ["mload", context.callback_ptr]]]
else:
stop_func = [["stop"]]
# Generate copiers
if len(sig.base_args) == 0:
copier = ["pass"]
clampers.append(LLLnode.from_list(copier))
elif sig.total_default_args == 0:
copier = get_internal_arg_copier(
total_size=sig.base_copy_size, memory_dest=MemoryPositions.RESERVED_MEMORY
)
clampers.append(LLLnode.from_list(copier))
# Fill variable positions
for arg in sig.args:
if isinstance(arg.typ, ByteArrayLike):
mem_pos = context.memory_allocator.expand_memory(32 * get_size_of_type(arg.typ))
context.vars[arg.name] = VariableRecord(arg.name, mem_pos, arg.typ, False)
else:
context.vars[arg.name] = VariableRecord(
arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False,
)
# internal function copiers. No clamping for internal functions.
dyn_variable_names = [a.name for a in sig.base_args if isinstance(a.typ, ByteArrayLike)]
if dyn_variable_names:
i_placeholder = context.new_internal_variable(typ=BaseType("uint256"))
unpackers: List[Any] = []
for idx, var_name in enumerate(dyn_variable_names):
var = context.vars[var_name]
ident = f"_load_args_{sig.method_id}_dynarg{idx}"
o = make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var.pos)
unpackers.append(o)
if not unpackers:
unpackers = ["pass"]
# 0 added to complete full overarching 'seq' statement, see internal_label.
unpackers.append(0)
clampers.append(
LLLnode.from_list(
["seq_unchecked"] + unpackers,
typ=None,
annotation="dynamic unpacker",
pos=getpos(code),
)
)
# Function has default arguments.
if sig.total_default_args > 0: # Function with default parameters.
default_sigs = sig_utils.generate_default_arg_sigs(code, context.sigs, context.global_ctx)
sig_chain: List[Any] = ["seq"]
for default_sig in default_sigs:
sig_compare, internal_label = get_sig_statements(default_sig, getpos(code))
# Populate unset default variables
set_defaults = []
for arg_name in get_default_names_to_set(sig, default_sig):
value = Expr(sig.default_values[arg_name], context).lll_node
var = context.vars[arg_name]
left = LLLnode.from_list(
var.pos, typ=var.typ, location="memory", pos=getpos(code), mutable=var.mutable
)
set_defaults.append(make_setter(left, value, "memory", pos=getpos(code)))
current_sig_arg_names = [x.name for x in default_sig.args]
# Load all variables in default section, if internal,
# because the stack is a linear pipe.
copier_arg_count = len(default_sig.args)
copier_arg_names = current_sig_arg_names
# Order copier_arg_names, this is very important.
copier_arg_names = [x.name for x in default_sig.args if x.name in copier_arg_names]
# Variables to be populated from calldata/stack.
default_copiers: List[Any] = []
if copier_arg_count > 0:
# Get map of variables in calldata, with thier offsets
offset = 4
calldata_offset_map = {}
for arg in default_sig.args:
calldata_offset_map[arg.name] = offset
offset += (
32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32
)
# Copy set default parameters from calldata
dynamics = []
for arg_name in copier_arg_names:
var = context.vars[arg_name]
if isinstance(var.typ, ByteArrayLike):
_size = 32
dynamics.append(var.pos)
else:
_size = var.size * 32
default_copiers.append(
get_internal_arg_copier(memory_dest=var.pos, total_size=_size,)
)
# Unpack byte array if necessary.
if dynamics:
i_placeholder = context.new_internal_variable(typ=BaseType("uint256"))
for idx, var_pos in enumerate(dynamics):
ident = f"unpack_default_sig_dyn_{default_sig.method_id}_arg{idx}"
default_copiers.append(
make_unpacker(
ident=ident, i_placeholder=i_placeholder, begin_pos=var_pos,
)
)
default_copiers.append(0) # for over arching seq, POP
sig_chain.append(
[
"if",
sig_compare,
[
"seq",
internal_label,
LLLnode.from_list(
["mstore", context.callback_ptr, "pass"],
annotation="pop callback pointer",
pos=getpos(code),
),
["seq"] + set_defaults if set_defaults else ["pass"],
["seq_unchecked"] + default_copiers if default_copiers else ["pass"],
["goto", _post_callback_ptr],
],
]
)
# With internal functions all variable loading occurs in the default
# function sub routine.
_clampers = [["label", _post_callback_ptr]]
# Function with default parameters.
o = LLLnode.from_list(
[
"seq",
sig_chain,
[
"if",
0, # can only be jumped into
[
"seq",
["seq"]
+ nonreentrant_pre
+ _clampers
+ [parse_body(c, context) for c in code.body]
+ nonreentrant_post
+ stop_func,
],
],
],
typ=None,
pos=getpos(code),
)
else:
# Function without default parameters.
sig_compare, internal_label = get_sig_statements(sig, getpos(code))
o = LLLnode.from_list(
[
"if",
sig_compare,
["seq"]
+ [internal_label]
+ nonreentrant_pre
+ clampers
+ [parse_body(c, context) for c in code.body]
+ nonreentrant_post
+ stop_func,
],
typ=None,
pos=getpos(code),
)
return o
return o
|
the-stack_0_2968 | #!/usr/bin/env python3
import argparse
import socketserver
import signal
import sys
import handlers
from util import eprint
def get_arguments():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--port", "-p", help="Local port to bind to", default=2525, type=int)
parser.add_argument("--host", help="Hostname to bind to", default="localhost")
return parser.parse_args()
def main():
args = get_arguments()
# Enable socket reuse for quicker testing
socketserver.TCPServer.allow_reuse_address = True
with socketserver.ThreadingTCPServer((args.host, args.port), handlers.SMTPHandler) as server:
def close_handler(signal, frame):
eprint("Shutdown requested")
server.server_close()
eprint("Shutting down")
sys.exit(0)
signal.signal(signal.SIGINT, close_handler)
server.serve_forever()
if __name__ == '__main__':
main()
|
the-stack_0_2970 | import os
from sent2vec.vectorizer import Vectorizer
from scipy import spatial
def compare_two_sentences(sentence_1, sentence_2):
sentences = [sentence_1, sentence_2]
vectorizer = Vectorizer()
vectorizer.bert(sentences)
vec_1, vec_2 = vectorizer.vectors
dist = spatial.distance.cosine(vec_1, vec_2)
return dist
def dir_creator(dir_name):
# print('checking_dir:', dir_name)
try:
tmp_dir = os.getcwd()
os.chdir(dir_name)
for _ in dir_name.split('/')[:-1]:
os.chdir('..')
os.chdir(tmp_dir)
except FileNotFoundError:
if len(dir_name.split('/')) > 1:
tot_dir = ''
for dir in dir_name.split('/'):
tot_dir += dir
try:os.mkdir(tot_dir)
except FileExistsError:pass
tot_dir += '/'
else:
os.mkdir(dir_name)
|
the-stack_0_2972 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"], False, False, True)
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"], False, False, True)
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 500})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"], False, False, True)
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
# Doesn't apply to Pion Core - we have txindex always on
# assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
|
the-stack_0_2973 | import itertools
import uuid
from dataclasses import dataclass, field
from typing import (
Generator,
Iterator,
Dict,
Sequence,
Optional,
TYPE_CHECKING,
Union,
Tuple,
List,
)
import numpy as np
import weaviate
from ..base.backend import BaseBackendMixin
from .... import Document
from ....helper import dataclass_from_dict
from ..registry import _REGISTRY
if TYPE_CHECKING:
from ....types import (
DocumentArraySourceType,
)
@dataclass
class WeaviateConfig:
"""This class stores the config variables to initialize
connection to the Weaviate server"""
n_dim: int
client: Union[str, weaviate.Client] = 'http://localhost:8080'
name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[WeaviateConfig, Dict]] = None,
**kwargs,
):
"""Initialize weaviate storage.
:param docs: the list of documents to initialize to
:param config: the config object used to ininitialize connection to weaviate server
:param kwargs: extra keyword arguments
:raises ValueError: only one of name or docs can be used for initialization,
raise an error if both are provided
"""
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(WeaviateConfig, config)
from ... import DocumentArray
self._n_dim = config.n_dim
self._serialize_config = config.serialize_config
if config.name and config.name != config.name.capitalize():
raise ValueError(
'Weaviate class name has to be capitalized. '
'Please capitalize when declaring the name field in config.'
)
self._persist = bool(config.name)
if isinstance(config.client, str):
self._client = weaviate.Client(config.client)
else:
self._client = config.client
self._config = config
self._schemas = self._load_or_create_weaviate_schema()
self._offset2ids, self._offset2ids_wid = self._get_offset2ids_meta()
_REGISTRY[self.__class__.__name__][self._class_name].append(self)
# To align with Sqlite behavior; if `docs` is not `None` and table name
# is provided, :class:`DocumentArraySqlite` will clear the existing
# table and load the given `docs`
if _docs is None:
return
elif isinstance(
_docs, (DocumentArray, Sequence, Generator, Iterator, itertools.chain)
):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _get_weaviate_class_name(self) -> str:
"""Generate the class/schema name using the ``uuid1`` module with some
formatting to tailor to weaviate class name convention
:return: string representing the name of weaviate class/schema name of
this :class:`DocumentArrayWeaviate` object
"""
return ''.join([i for i in uuid.uuid1().hex if not i.isdigit()]).capitalize()
def _get_schema_by_name(self, cls_name: str) -> Dict:
"""Return the schema dictionary object with the class name
Content of the all dictionaries by this method are the same except the name
of the weaviate's ``class``
:param cls_name: the name of the schema/class in weaviate
:return: the schema dictionary
"""
# TODO: ideally we should only use one schema. this will allow us to deal with
# consistency better
return {
'classes': [
{
'class': cls_name,
"vectorizer": "none",
'vectorIndexConfig': {'skip': False},
'properties': [
{
'dataType': ['blob'],
'name': '_serialized',
'indexInverted': False,
},
],
},
{
'class': cls_name + 'Meta',
"vectorizer": "none",
'vectorIndexConfig': {'skip': True},
'properties': [
{
'dataType': ['string[]'],
'name': '_offset2ids',
'indexInverted': False,
},
],
},
]
}
def _load_or_create_weaviate_schema(self):
"""Create a new weaviate schema for this :class:`DocumentArrayWeaviate` object
if not present in weaviate or if ``self._config.name`` is None. If ``self._config.name``
is provided and not None and schema with the specified name exists in weaviate,
then load the object with the given ``self._config.name``
:return: the schemas of this :class`DocumentArrayWeaviate` object and its meta
"""
if not self._config.name:
name_candidate = self._get_weaviate_class_name()
doc_schemas = self._get_schema_by_name(name_candidate)
while self._client.schema.contains(doc_schemas):
name_candidate = self._get_weaviate_class_name()
doc_schemas = self._get_schema_by_name(name_candidate)
self._client.schema.create(doc_schemas)
self._config.name = name_candidate
return doc_schemas
doc_schemas = self._get_schema_by_name(self._config.name)
if self._client.schema.contains(doc_schemas):
return doc_schemas
self._client.schema.create(doc_schemas)
return doc_schemas
def _update_offset2ids_meta(self):
"""Update the offset2ids in weaviate the the current local version"""
if self._offset2ids_wid is not None and self._client.data_object.exists(
self._offset2ids_wid
):
self._client.data_object.update(
data_object={'_offset2ids': self._offset2ids},
class_name=self._meta_name,
uuid=self._offset2ids_wid,
)
else:
self._offset2ids_wid = str(uuid.uuid1())
self._client.data_object.create(
data_object={'_offset2ids': self._offset2ids},
class_name=self._meta_name,
uuid=self._offset2ids_wid,
)
def _get_offset2ids_meta(self) -> Tuple[List, str]:
"""Return the offset2ids stored in weaviate along with the name of the schema/class
in weaviate that stores meta information of this object
:return: a tuple with first element as a list of offset2ids and second element
being name of weaviate class/schema of the meta object
:raises ValueError: error is raised if meta class name is not defined
"""
if not self._meta_name:
raise ValueError('meta object is not defined')
resp = (
self._client.query.get(self._meta_name, ['_offset2ids', '_additional {id}'])
.do()
.get('data', {})
.get('Get', {})
.get(self._meta_name, [])
)
if not resp:
return [], None
elif len(resp) == 1:
return resp[0]['_offset2ids'], resp[0]['_additional']['id']
else:
raise ValueError('received multiple meta copies which is invalid')
@property
def name(self):
"""An alias to _class_name that returns the id/name of the class
in the weaviate of this :class:`DocumentArrayWeaviate`
:return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`
"""
return self._class_name
@property
def _class_name(self):
"""Return the name of the class in weaviate of this :class:`DocumentArrayWeaviate
:return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`
"""
if not self._schemas:
return None
return self._schemas['classes'][0]['class']
@property
def _meta_name(self):
"""Return the name of the class in weaviate that stores the meta information of
this :class:`DocumentArrayWeaviate`
:return: name of weaviate class/schema of class that stores the meta information
"""
# TODO: remove this after we combine the meta info to the DocumentArray class
if not self._schemas:
return None
return self._schemas['classes'][1]['class']
@property
def _class_schema(self) -> Optional[Dict]:
"""Return the schema dictionary of this :class:`DocumentArrayWeaviate`'s weaviate schema
:return: the dictionary representing this weaviate schema
"""
if not self._schemas:
return None
return self._schemas['classes'][0]
@property
def _meta_schema(self):
"""Return the schema dictionary of this weaviate schema that stores this object's meta
:return: the dictionary representing a meta object's weaviate schema
"""
if not self._schemas and len(self._schemas) < 2:
return None
return self._schemas['classes'][1]
def _doc2weaviate_create_payload(self, value: 'Document'):
"""Return the payload to store :class:`Document` into weaviate
:param value: document to create a payload for
:return: the payload dictionary
"""
if value.embedding is None:
embedding = np.zeros(self._n_dim)
else:
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(value.embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
if embedding.shape != (self._n_dim,):
raise ValueError(
f'All documents must have embedding of shape n_dim: {self._n_dim}, receiving shape: {embedding.shape}'
)
# Weaviate expects vector to have dim 2 at least
# or get weaviate.exceptions.UnexpectedStatusCodeException: models.C11yVector
# hence we cast it to list of a single element
if len(embedding) == 1:
embedding = [embedding[0]]
return dict(
data_object={'_serialized': value.to_base64(**self._serialize_config)},
class_name=self._class_name,
uuid=self._wmap(value.id),
vector=embedding,
)
def _wmap(self, doc_id: str):
"""the function maps doc id to weaviate id
:param doc_id: id of the document
:return: weaviate object id
"""
# appending class name to doc id to handle the case:
# daw1 = DocumentArrayWeaviate([Document(id=str(i), text='hi') for i in range(3)])
# daw2 = DocumentArrayWeaviate([Document(id=str(i), text='bye') for i in range(3)])
# daw2[0, 'text'] == 'hi' # this will be False if we don't append class name
return str(uuid.uuid5(uuid.NAMESPACE_URL, doc_id + self._class_name))
def _get_storage_infos(self) -> Dict:
storage_infos = super()._get_storage_infos()
return {
'Backend': 'Weaviate (www.semi.technology/developers/weaviate)',
'Hostname': self._config.client,
'Schema Name': self._config.name,
'Serialization Protocol': self._config.serialize_config.get('protocol'),
**storage_infos,
}
|
the-stack_0_2974 | # coding: utf-8
import pprint
import re
import six
class UpdateDomainLoginPolicyRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain_id': 'str',
'body': 'UpdateDomainLoginPolicyRequestBody'
}
attribute_map = {
'domain_id': 'domain_id',
'body': 'body'
}
def __init__(self, domain_id=None, body=None):
"""UpdateDomainLoginPolicyRequest - a model defined in huaweicloud sdk"""
self._domain_id = None
self._body = None
self.discriminator = None
self.domain_id = domain_id
if body is not None:
self.body = body
@property
def domain_id(self):
"""Gets the domain_id of this UpdateDomainLoginPolicyRequest.
:return: The domain_id of this UpdateDomainLoginPolicyRequest.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this UpdateDomainLoginPolicyRequest.
:param domain_id: The domain_id of this UpdateDomainLoginPolicyRequest.
:type: str
"""
self._domain_id = domain_id
@property
def body(self):
"""Gets the body of this UpdateDomainLoginPolicyRequest.
:return: The body of this UpdateDomainLoginPolicyRequest.
:rtype: UpdateDomainLoginPolicyRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateDomainLoginPolicyRequest.
:param body: The body of this UpdateDomainLoginPolicyRequest.
:type: UpdateDomainLoginPolicyRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateDomainLoginPolicyRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_2976 | import os
h, d, aim = 0, 0, 0
def forward(x):
global h, d, aim
h += x
d += aim * x
def down(x):
global aim
aim += x
def up(x):
global aim
aim -= x
if __name__ == '__main__':
with open(os.path.join('inputs', 'day2.txt')) as f:
moves = list(map(lambda x: x.split(' '), f.readlines()))
moves = list(map(lambda x: (x[0], int(x[1])), moves))
for q in moves:
locals()[q[0]](q[1])
print(h, d, h * d)
|
the-stack_0_2977 | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
bphEfficiency = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/BPH/*"),
verbose = cms.untracked.uint32(0), # Set to 2 for all messages
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_muPhi 'mu efficiency vs phi; mu phi [rad]; efficiency' muPhi_numerator muPhi_denominator",
"effic_muEta 'mu efficiency vs eta; mu eta [rad]; efficiency' muEta_numerator muEta_denominator",
"effic_muPt 'mu efficiency vs pt; mu pt [GeV]; efficiency' muPt_numerator muPt_denominator",
"effic_mu_d0 'mu efficiency vs d0; mu d0 [cm]; efficiency' mu_d0_numerator mu_d0_denominator",
"effic_mu_z0 'mu efficiency vs z0; mu z0 [cm]; efficiency' mu_z0_numerator mu_z0_denominator",
),
# efficiencyProfile = cms.untracked.vstring(
# "effic_met_vs_LS 'MET efficiency vs LS; LS; PF MET efficiency' metVsLS_numerator metVsLS_denominator"
# ),
)
bphClient = cms.Sequence(
bphEfficiency
)
|
the-stack_0_2978 | from data.loveda import LoveDALoader
from utils.tools import *
from skimage.io import imsave
import os
def predict_test(model, cfg, ckpt_path=None, save_dir='./submit_test'):
os.makedirs(save_dir, exist_ok=True)
seed_torch(2333)
model_state_dict = torch.load(ckpt_path)
model.load_state_dict(model_state_dict, strict=True)
count_model_parameters(model)
model.eval()
print(cfg.EVAL_DATA_CONFIG)
eval_dataloader = LoveDALoader(cfg.EVAL_DATA_CONFIG)
with torch.no_grad():
for ret, ret_gt in tqdm(eval_dataloader):
ret = ret.to(torch.device('cuda'))
cls = model(ret)
cls = cls.argmax(dim=1).cpu().numpy()
for fname, pred in zip(ret_gt['fname'], cls):
imsave(os.path.join(save_dir, fname), pred.astype(np.uint8))
torch.cuda.empty_cache()
if __name__ == '__main__':
ckpt_path = './log/CBST_2Urban.pth'
from module.Encoder import Deeplabv2
cfg = import_config('st.cbst.2urban')
model = Deeplabv2(dict(
backbone=dict(
resnet_type='resnet50',
output_stride=16,
pretrained=True,
),
multi_layer=False,
cascade=False,
use_ppm=True,
ppm=dict(
num_classes=cfg.NUM_CLASSES,
use_aux=False,
),
inchannels=2048,
num_classes=cfg.NUM_CLASSES
)).cuda()
predict_test(model, cfg, ckpt_path) |
the-stack_0_2981 | import logging
import logging.handlers
import argparse
import sys
import os
import time
from bluetooth import *
from . import gpioservice
from .powerControllerModule import PowerThread
from .configControllerModule import ConfigController
from . import stateControllerModule
from .libInstaller import LibInstaller
from subprocess import call
class LoggerHelper(object):
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
gpioservice.setup()
gpioservice.loadInitialData()
stateController = stateControllerModule.StateController()
powerThread = PowerThread()
client_sock = None
config = ConfigController()
def setup_logging():
# Default logging settings
LOG_FILE = "/var/log/bluetoothservice.log"
LOG_LEVEL = logging.INFO
# Define and parse command line arguments
argp = argparse.ArgumentParser(description="Raspberry PI Bluetooth Server")
argp.add_argument("-l", "--log", help="log (default '" + LOG_FILE + "')")
# Grab the log file from arguments
args = argp.parse_args()
if args.log:
LOG_FILE = args.log
# Setup the logger
logger = logging.getLogger(__name__)
# Set the log level
logger.setLevel(LOG_LEVEL)
# Make a rolling event log that resets at midnight and backs-up every 3 days
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE,
when="midnight",
backupCount=3)
# Log messages should include time stamp and log level
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
# Attach the formatter to the handler
handler.setFormatter(formatter)
# Attach the handler to the logger
logger.addHandler(handler)
# Replace stdout with logging to file at INFO level
sys.stdout = LoggerHelper(logger, logging.INFO)
# Replace stderr with logging to file at ERROR level
sys.stderr = LoggerHelper(logger, logging.ERROR)
def loadPersistentData():
if config.loadPreferences():
if not config.isSQLiteInstalled():
libInstaller = LibInstaller()
libInstaller.installSQLite()
if config.setSQLiteInstalled():
restart(None, None)
else:
shutDown(None)
# Main loop
def main():
powerThread.start()
loadPersistentData()
print ("Starting main")
# We need to wait until Bluetooth init is done
time.sleep(5)
# print ("Bluetooth initalised")
print (read_local_bdaddr())
# Make device visible
call("sudo hciconfig hci0 piscan", shell=True)
# Create a new server socket using RFCOMM protocol
server_sock = BluetoothSocket(RFCOMM)
# Bind to any port
server_sock.bind(("", PORT_ANY))
# Start listening
server_sock.listen(1)
# Get the port the server socket is listening
port = server_sock.getsockname()[1]
# The service UUID to advertise
uuid = "aaabf455-b0e1-4b88-b9c8-184e53f15663"
# Start advertising the service
advertise_service( server_sock, "TrainmoteServer",
service_id=uuid,
service_classes=[uuid, SERIAL_PORT_CLASS],
profiles=[SERIAL_PORT_PROFILE])
# Main Bluetooth server loop
client_sock = None
while True:
try:
# This will block until we get a new connection
if client_sock is None:
stateController.setState(stateControllerModule.STATE_NOT_CONNECTED)
print ("Waiting for connection on RFCOMM channel %d" % port)
client_sock, client_info = server_sock.accept()
print ("Accepted connection from ", client_info)
stateController.setState(stateControllerModule.STATE_CONNECTED)
# Read the data sent by the client
data = client_sock.recv(1024)
if len(data) == 0:
break
print ("Received [%s]" % data)
# Handle the request
response = gpioservice.receivedMessage(data)
client_sock.send(response)
print ("Sent back [%s]" % response)
# Check if respone is firmware update, load from git and restart script.
if 'PERFORM_GIT_UPDATE' in response and 'success' in response:
call('sudo sh ./scripts/updateScript.sh', shell=True)
restart(server_sock, client_sock)
break
except IOError:
print ("Error occured")
closeClientConnection(client_sock)
client_sock = None
pass
except KeyboardInterrupt:
closeClientConnection(client_sock)
shutDown(server_sock)
break
def restart(server_sock, client_sock):
closeClientConnection(client_sock)
shutDown(server_sock)
os.execv(sys.executable, ['python'] + sys.argv)
def shutDown(server_sock):
powerThread.kill.set()
powerThread.isTurningOff = True
powerThread.join()
stateController.setState(stateControllerModule.STATE_SHUTDOWN)
if server_sock is not None:
server_sock.close()
print ("Server going down")
stateController.stop()
def closeClientConnection(client_sock):
print ("Closing client socket")
if client_sock is not None:
client_sock.close()
client_sock = None
if __name__ == '__main__':
main() |
the-stack_0_2982 | import requests
def esmoneda(cripto):
return cripto in monedas
def main():
monedas_list=[]
data=requests.get("https://api.coinmarketcap.com/v2/listings/").json()
for cripto in data["data"]:
monedas_list.append(cripto["symbol"])
monedas=tuple(monedas_list)
moneda=input("Indique el nombre de la moneda a verificar: ")
while not esmoneda(moneda):
print("Moneda Invalida.")
moneda=input("Ingrese el nombre de la moneda: ")
else:
print("La moneda,",moneda,"es valida porque existe en coimnmarketcap.com")
main()
|
the-stack_0_2983 | import frappe
def execute():
frappe.reload_doc("contacts", "doctype", "contact_email")
frappe.reload_doc("contacts", "doctype", "contact_phone")
frappe.reload_doc("contacts", "doctype", "contact")
contact_details = frappe.db.sql(
"""
SELECT
`name`, `email_id`, `phone`, `mobile_no`, `modified_by`, `creation`, `modified`
FROM `tabContact`
where not exists (select * from `tabContact Email`
where `tabContact Email`.parent=`tabContact`.name
and `tabContact Email`.email_id=`tabContact`.email_id)
""",
as_dict=True,
)
email_values = []
phone_values = []
for count, contact_detail in enumerate(contact_details):
phone_counter = 1
is_primary = 1
if contact_detail.email_id:
email_values.append(
(
1,
frappe.generate_hash(contact_detail.email_id, 10),
contact_detail.email_id,
"email_ids",
"Contact",
contact_detail.name,
1,
contact_detail.creation,
contact_detail.modified,
contact_detail.modified_by,
)
)
if contact_detail.phone:
is_primary_phone = 1 if phone_counter == 1 else 0
phone_values.append(
(
phone_counter,
frappe.generate_hash(contact_detail.email_id, 10),
contact_detail.phone,
"phone_nos",
"Contact",
contact_detail.name,
is_primary_phone,
0,
contact_detail.creation,
contact_detail.modified,
contact_detail.modified_by,
)
)
phone_counter += 1
if contact_detail.mobile_no:
is_primary_mobile_no = 1 if phone_counter == 1 else 0
phone_values.append(
(
phone_counter,
frappe.generate_hash(contact_detail.email_id, 10),
contact_detail.mobile_no,
"phone_nos",
"Contact",
contact_detail.name,
0,
is_primary_mobile_no,
contact_detail.creation,
contact_detail.modified,
contact_detail.modified_by,
)
)
if email_values and (count % 10000 == 0 or count == len(contact_details) - 1):
frappe.db.sql(
"""
INSERT INTO `tabContact Email`
(`idx`, `name`, `email_id`, `parentfield`, `parenttype`, `parent`, `is_primary`, `creation`,
`modified`, `modified_by`)
VALUES {}
""".format(
", ".join(["%s"] * len(email_values))
),
tuple(email_values),
)
email_values = []
if phone_values and (count % 10000 == 0 or count == len(contact_details) - 1):
frappe.db.sql(
"""
INSERT INTO `tabContact Phone`
(`idx`, `name`, `phone`, `parentfield`, `parenttype`, `parent`, `is_primary_phone`, `is_primary_mobile_no`, `creation`,
`modified`, `modified_by`)
VALUES {}
""".format(
", ".join(["%s"] * len(phone_values))
),
tuple(phone_values),
)
phone_values = []
frappe.db.add_index("Contact Phone", ["phone"])
frappe.db.add_index("Contact Email", ["email_id"])
|
the-stack_0_2985 | # Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
Index('uuid', instances.c.uuid, unique=True).create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
Index('uuid', instances.c.uuid, unique=True).drop(migrate_engine)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.