ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5d302ab5c78d30819ec0739f84a71e2d30d9e8
|
class Map:
class _Node:
def __init__(self, key=None, value=None, next_node=None):
self.key = key
self.value = value
self.next = next_node
def __init__(self):
self._dummy_head = self._Node()
self._size = 0
def get_size(self):
return self._size
def is_empty(self):
return self._size == 0
def get_node(self, key):
cur = self._dummy_head.next
while cur is not None:
if cur.key == key:
return cur
cur = cur.next
return
def contains(self, key):
return self.get_node(key) is not None
def get(self, key):
node = self.get_node(key)
return None if node is None else node.value
def add(self, key, val):
node = self.get_node(key)
if node is None:
self._dummy_head.next = self._Node(
key=key, value=val, next_node=self._dummy_head.next
)
self._size += 1
else:
node.value = val
def set(self, key, val):
self.add(key, val)
def remove(self, key):
prev = self._dummy_head
while prev.next is not None:
if prev.next.key == key:
break
prev = prev.next
if prev.next is not None:
ret = prev.next.value
prev.next = prev.next.next
self._size -= 1
return ret
return
|
py
|
1a5d3042701b2eed38fc1db504ee1cf809e74b67
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_0
from isi_sdk_8_1_0.models.create_storagepool_tier_response import CreateStoragepoolTierResponse # noqa: E501
from isi_sdk_8_1_0.rest import ApiException
class TestCreateStoragepoolTierResponse(unittest.TestCase):
"""CreateStoragepoolTierResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCreateStoragepoolTierResponse(self):
"""Test CreateStoragepoolTierResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_0.models.create_storagepool_tier_response.CreateStoragepoolTierResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py
|
1a5d30d05cf79bf0ebe0a31015642b9b9c04be06
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import aws_cdk.core as cdk
import aws_cdk.aws_s3 as s3
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_glue as glue
import aws_cdk.aws_iam as iam
import aws_cdk.aws_kms as kms
import aws_cdk.aws_s3_deployment as s3_deployment
from .configuration import (
AVAILABILITY_ZONE_1, SUBNET_ID_1,
S3_ACCESS_LOG_BUCKET, S3_KMS_KEY, S3_CONFORMED_BUCKET, S3_PURPOSE_BUILT_BUCKET, SHARED_SECURITY_GROUP_ID,
get_environment_configuration, get_logical_id_prefix, get_resource_name_prefix
)
class GlueStack(cdk.Stack):
def __init__(
self,
scope: cdk.Construct,
construct_id: str,
target_environment: str,
**kwargs
) -> None:
"""
CloudFormation stack to create Glue Jobs, Connections,
Script Bucket, Temporary Bucket, and an IAM Role for permissions.
@param scope cdk.Construct: Parent of this stack, usually an App or a Stage, but could be any construct.
@param construct_id str:
The construct ID of this stack. If stackName is not explicitly defined,
this id (and any parent IDs) will be used to determine the physical ID of the stack.
@param target_environment str: The target environment for stacks in the deploy stage
@param kwargs:
"""
super().__init__(scope, construct_id, **kwargs)
self.mappings = get_environment_configuration(target_environment)
logical_id_prefix = get_logical_id_prefix()
resource_name_prefix = get_resource_name_prefix()
existing_access_logs_bucket_name = cdk.Fn.import_value(self.mappings[S3_ACCESS_LOG_BUCKET])
access_logs_bucket = s3.Bucket.from_bucket_attributes(
self,
'ImportedBucket',
bucket_name=existing_access_logs_bucket_name
)
s3_kms_key_parameter = cdk.Fn.import_value(self.mappings[S3_KMS_KEY])
s3_kms_key = kms.Key.from_key_arn(self, 'ImportedKmsKey', s3_kms_key_parameter)
shared_security_group_parameter = cdk.Fn.import_value(self.mappings[SHARED_SECURITY_GROUP_ID])
glue_connection_subnet = cdk.Fn.import_value(self.mappings[SUBNET_ID_1])
glue_connection_availability_zone = cdk.Fn.import_value(self.mappings[AVAILABILITY_ZONE_1])
conformed_bucket_name = cdk.Fn.import_value(self.mappings[S3_CONFORMED_BUCKET])
conformed_bucket = s3.Bucket.from_bucket_name(
self,
id='ImportedConformedBucket',
bucket_name=conformed_bucket_name
)
purposebuilt_bucket_name = cdk.Fn.import_value(self.mappings[S3_PURPOSE_BUILT_BUCKET])
purposebuilt_bucket = s3.Bucket.from_bucket_name(
self,
id='ImportedPurposeBuiltBucket',
bucket_name=purposebuilt_bucket_name
)
shared_security_group = ec2.SecurityGroup.from_security_group_id(
self,
'ImportedSecurityGroup',
shared_security_group_parameter
)
subnet = ec2.Subnet.from_subnet_attributes(
self,
'ImportedSubnet',
subnet_id=glue_connection_subnet,
availability_zone=glue_connection_availability_zone
)
glue_scripts_bucket = self.glue_scripts_bucket(
target_environment,
logical_id_prefix,
resource_name_prefix,
s3_kms_key,
access_logs_bucket
)
glue_scripts_temp_bucket = self.glue_scripts_temporary_bucket(
target_environment,
logical_id_prefix,
resource_name_prefix,
s3_kms_key,
access_logs_bucket
)
glue_role = self.get_role(
target_environment,
logical_id_prefix,
resource_name_prefix,
s3_kms_key,
)
job_connection = glue.Connection(
self,
f'{target_environment}{logical_id_prefix}RawToConformedWorkflowConnection',
type=glue.ConnectionType.NETWORK,
connection_name=f'{target_environment.lower()}-{resource_name_prefix}-raw-to-conformed-connection',
security_groups=[shared_security_group],
subnet=subnet
)
self.raw_to_conformed_job = glue.CfnJob(
self,
f'{target_environment}{logical_id_prefix}RawToConformedJob',
name=f'{target_environment.lower()}-{resource_name_prefix}-raw-to-conformed-job',
command=glue.CfnJob.JobCommandProperty(
name='glueetl',
python_version='3',
script_location=f's3://{glue_scripts_bucket.bucket_name}/etl/etl_raw_to_conformed.py'
),
connections=glue.CfnJob.ConnectionsListProperty(
connections=[job_connection.connection_name],
),
default_arguments={
'--enable-glue-datacatalog': '""',
'--target_database_name': 'datablog_arg',
'--target_bucket': conformed_bucket.bucket_name,
'--target_table_name': 'datablog_nyc_raw',
'--TempDir': f's3://{glue_scripts_temp_bucket.bucket_name}/etl/raw-to-conformed',
},
execution_property=glue.CfnJob.ExecutionPropertyProperty(
max_concurrent_runs=1,
),
glue_version='2.0',
max_retries=0,
number_of_workers=5,
role=glue_role.role_arn,
worker_type='G.1X',
)
self.conformed_to_purpose_built_job = glue.CfnJob(
self,
f'{target_environment}{logical_id_prefix}ConformedToPurposeBuiltJob',
name=f'{target_environment.lower()}-{resource_name_prefix}-conformed-to-purpose-built-job',
command=glue.CfnJob.JobCommandProperty(
name='glueetl',
python_version='3',
script_location=f's3://{glue_scripts_bucket.bucket_name}/etl/etl_conformed_to_purposebuilt.py'
),
connections=glue.CfnJob.ConnectionsListProperty(
connections=[job_connection.connection_name],
),
default_arguments={
'--enable-glue-datacatalog': '""',
'--target_database_name': 'datablog_conformed_arg',
'--target_bucketname': purposebuilt_bucket.bucket_name,
'--target_table_name': 'datablog_nyc_purposebuilt',
'--txn_bucket_name': glue_scripts_bucket.bucket_name,
'--txn_sql_prefix_path': '/etl/transformation-sql/',
'--TempDir': f's3://{glue_scripts_temp_bucket.bucket_name}/etl/conformed-to-purpose-built'
},
execution_property=glue.CfnJob.ExecutionPropertyProperty(
max_concurrent_runs=1,
),
glue_version='2.0',
max_retries=0,
number_of_workers=5,
role=glue_role.role_arn,
worker_type='G.1X',
)
def glue_scripts_bucket(
self,
target_environment,
logical_id_prefix: str,
resource_name_prefix: str,
s3_kms_key: kms.Key,
access_logs_bucket: s3.Bucket
) -> s3.Bucket:
"""
Creates S3 Bucket that contains glue scripts used in Job execution
@param target_environment str: The target environment for stacks in the deploy stage
@param logical_id_prefix str: The logical id prefix to apply to all CloudFormation resources
@param resource_name_prefix str: The prefix applied to all resource names
@param s3_kms_key kms.Key: The KMS Key to use for encryption of data at rest
@param access_logs_bucket s3.Bucket: The access logs target for this bucket
"""
bucket_name = f'{target_environment.lower()}-{resource_name_prefix}-{self.account}-etl-scripts'
bucket = s3.Bucket(
self,
f'{target_environment}{logical_id_prefix}RawGlueScriptsBucket',
bucket_name=bucket_name,
access_control=s3.BucketAccessControl.PRIVATE,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
bucket_key_enabled=s3_kms_key is not None,
encryption=s3.BucketEncryption.KMS,
encryption_key=s3_kms_key,
public_read_access=False,
removal_policy=cdk.RemovalPolicy.DESTROY,
versioned=True,
object_ownership=s3.ObjectOwnership.OBJECT_WRITER,
server_access_logs_bucket=access_logs_bucket,
server_access_logs_prefix=bucket_name,
)
# Dynamically upload resources to the script target
s3_deployment.BucketDeployment(
self,
'DeployGlueJobScript',
# This path is relative to the root of the project
sources=[s3_deployment.Source.asset('./lib/glue_scripts')],
destination_bucket=bucket,
destination_key_prefix='etl',
)
return bucket
def glue_scripts_temporary_bucket(
self, target_environment, logical_id_prefix: str, resource_name_prefix: str,
s3_kms_key: kms.Key, access_logs_bucket: s3.Bucket
) -> s3.Bucket:
"""
Creates S3 Bucket used as a temporary file store in Job execution
@param target_environment str: The target environment for stacks in the deploy stage
@param logical_id_prefix str: The logical id prefix to apply to all CloudFormation resources
@param resource_name_prefix str: The prefix applied to all resource names
@param s3_kms_key kms.Key: The KMS Key to use for encryption of data at rest
@param access_logs_bucket s3.Bucket: The access logs target for this bucket
"""
bucket_name = f'{target_environment.lower()}-{resource_name_prefix}-{self.account}-glue-temporary-scripts'
bucket = s3.Bucket(
self,
f'{target_environment}{logical_id_prefix}RawGlueScriptsTemporaryBucket',
bucket_name=bucket_name,
access_control=s3.BucketAccessControl.PRIVATE,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
bucket_key_enabled=s3_kms_key is not None,
encryption=s3.BucketEncryption.KMS if s3_kms_key else s3.BucketEncryption.S3_MANAGED,
encryption_key=s3_kms_key if s3_kms_key else None,
public_read_access=False,
removal_policy=cdk.RemovalPolicy.DESTROY,
versioned=True,
object_ownership=s3.ObjectOwnership.OBJECT_WRITER,
server_access_logs_bucket=access_logs_bucket,
server_access_logs_prefix=bucket_name,
)
return bucket
def get_role(
self,
target_environment: str,
logical_id_prefix: str,
resource_name_prefix: str,
s3_kms_key: kms.Key,
) -> iam.Role:
"""
Creates the role used during Glue Job execution
@param target_environment str: The target environment for stacks in the deploy stage
@param logical_id_prefix str: The logical id prefix to apply to all CloudFormation resources
@param resource_name_prefix str: The prefix applied to all resource names
@param s3_kms_key kms.Key: The KMS Key to provide permissions to
@returns iam.Role: The role that was created
"""
return iam.Role(
self,
f'{target_environment}{logical_id_prefix}RawGlueRole',
role_name=f'{target_environment.lower()}-{resource_name_prefix}-raw-glue-role',
assumed_by=iam.ServicePrincipal('glue.amazonaws.com'),
inline_policies=[
iam.PolicyDocument(statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
's3:ListBucketVersions',
's3:ListBucket',
's3:GetBucketNotification',
's3:GetBucketLocation',
],
resources=[
'arn:aws:s3:::*'
]
)
]),
iam.PolicyDocument(statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
's3:ReplicationObject',
's3:PutObject',
's3:GetObject',
's3:DeleteObject',
],
resources=[
'arn:aws:s3:::*/*'
]
)
]),
iam.PolicyDocument(statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
's3:ListAllMyBuckets',
],
resources=[
'*'
]
)
]),
# NOTE: This is required due to bucket level encryption on S3 Buckets
iam.PolicyDocument(statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'kms:*',
],
resources=[
s3_kms_key.key_arn,
]
)
]),
],
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSGlueServiceRole'),
]
)
|
py
|
1a5d31359a9b756c60a22ec287c9d4552d5932d3
|
from django.apps import AppConfig
class AppBlogConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'App_Blog'
|
py
|
1a5d31a3f7b45bd12677779a90f7a569de9b778d
|
"""
udp_server
"""
from socket import *
# 创建套接字
udp_socket = socket(AF_INET, SOCK_DGRAM)
# 绑定地址、端口
udp_socket.bind(('0.0.0.0', 8888))
"""
功能: 接收UDP消息
参数: 每次最多接收多少字节
返回值: data 接收到的内容
addr 消息发送方地址
"""
data, addr = udp_socket.recvfrom(1024 * 1024)
print(addr)
print(data.decode()) # 字节串收发
udp_socket.sendto(b'thanks', addr)
|
py
|
1a5d325b1a30ea5146e1713f245c5b67bfdfc524
|
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""led_controller controller."""
from controller import Robot
robot = Robot()
timestep = int(robot.getBasicTimeStep())
led = robot.getLED('led')
led.set(True)
positionSensor = robot.getPositionSensor('emergency button sensor')
positionSensor.enable(timestep)
released = True
while robot.step(timestep) != -1:
value = positionSensor.getValue()
if value > -0.002:
released = True
if released and value < -0.010:
released = False
led.set(not led.get())
|
py
|
1a5d32b7be4afbefd8f5a2401d7c9bc9efa5fb98
|
"""This module contains the main functions used to load the required data from disk for training."""
import functools
import gzip
import pickle
import os
import numpy as np
import torch
from sketchgraphs_models import distributed_utils
from sketchgraphs_models.nn import data_util
from sketchgraphs_models.graph import dataset
from sketchgraphs.data import flat_array
def load_sequences_and_mappings(dataset_file, auxiliary_file, quantization, entity_features=True, edge_features=True):
data = flat_array.load_dictionary_flat(np.load(dataset_file, mmap_mode='r'))
if auxiliary_file is None:
root, _ = os.path.splitext(dataset_file)
auxiliary_file = root + '.stats.pkl.gz'
if entity_features or edge_features:
with gzip.open(auxiliary_file, 'rb') as f:
auxiliary_dict = pickle.load(f)
if entity_features:
entity_feature_mapping = dataset.EntityFeatureMapping(auxiliary_dict['node'])
else:
entity_feature_mapping = None
seqs = data['sequences']
weights = data['sequence_lengths']
if edge_features:
if isinstance(quantization['angle'], dataset.QuantizationMap):
angle_map = quantization['angle']
else:
angle_map = dataset.QuantizationMap.from_counter(auxiliary_dict['edge']['angle'], quantization['angle'])
if isinstance(quantization['length'], dataset.QuantizationMap):
length_map = quantization['length']
else:
length_map = dataset.QuantizationMap.from_counter(auxiliary_dict['edge']['length'], quantization['length'])
edge_feature_mapping = dataset.EdgeFeatureMapping(angle_map, length_map)
else:
edge_feature_mapping = None
return {
'sequences': seqs.share_memory_(),
'entity_feature_mapping': entity_feature_mapping,
'edge_feature_mapping': edge_feature_mapping,
'weights': weights
}
def load_dataset_and_weights_with_mapping(dataset_file, node_feature_mapping, edge_feature_mapping, seed=None):
data = flat_array.load_dictionary_flat(np.load(dataset_file, mmap_mode='r'))
seqs = data['sequences']
seqs.share_memory_()
ds = dataset.GraphDataset(seqs, node_feature_mapping, edge_feature_mapping, seed)
return ds, data['sequence_lengths']
def load_dataset_and_weights(dataset_file, auxiliary_file, quantization, seed=None,
entity_features=True, edge_features=True, force_entity_categorical_features=False):
data = load_sequences_and_mappings(dataset_file, auxiliary_file, quantization, entity_features, edge_features)
if data['entity_feature_mapping'] is None and force_entity_categorical_features:
# Create an entity mapping which only computes the categorical features (i.e. isConstruction and clockwise)
data['entity_feature_mapping'] = dataset.EntityFeatureMapping()
return dataset.GraphDataset(
data['sequences'], data['entity_feature_mapping'], data['edge_feature_mapping'], seed=seed), data['weights']
def make_dataloader_train(collate_fn, ds_train, weights, batch_size, num_epochs, num_workers, distributed_config=None):
sampler = torch.utils.data.WeightedRandomSampler(
weights, len(weights), replacement=True)
if distributed_config is not None:
sampler = distributed_utils.DistributedSampler(
sampler, distributed_config.world_size, distributed_config.rank)
batch_sampler = torch.utils.data.BatchSampler(
sampler, batch_size, drop_last=False)
dataloader_train = torch.utils.data.DataLoader(
ds_train,
collate_fn=collate_fn,
batch_sampler=data_util.MultiEpochSampler(batch_sampler, num_epochs),
num_workers=num_workers,
pin_memory=True)
batches_per_epoch = len(batch_sampler)
return dataloader_train, batches_per_epoch
def _make_dataloader_eval(ds_eval, weights, batch_size, num_workers, distributed_config=None):
sampler = torch.utils.data.WeightedRandomSampler(
weights, len(weights), replacement=True)
if distributed_config is not None:
sampler = distributed_utils.DistributedSampler(
sampler, distributed_config.world_size, distributed_config.rank)
dataloader_eval = torch.utils.data.DataLoader(
ds_eval,
collate_fn=functools.partial(
dataset.collate,
entity_feature_mapping=ds_eval.node_feature_mapping,
edge_feature_mapping=ds_eval.edge_feature_mapping),
sampler=sampler,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True)
return dataloader_eval
def initialize_datasets(args, distributed_config: distributed_utils.DistributedTrainingInfo = None):
"""Initialize datasets and dataloaders.
Parameters
----------
args : dict
Dictionary containing all the dataset configurations.
distributed_config : distributed_utils.DistributedTrainingInfo, optional
If not None, configuration options for distributed training.
Returns
-------
torch.data.utils.Dataloader
Training dataloader
torch.data.utils.Dataloader
If not None, testing dataloader
int
Number of batches per training epoch
dataset.EntityFeatureMapping
Feature mapping in use for entities
dataset.EdgeFeatureMapping
Feature mapping in use for constraints
"""
quantization = {'angle': args['num_quantize_angle'], 'length': args['num_quantize_length']}
dataset_train_path = args['dataset_train']
auxiliary_path = args['dataset_auxiliary']
ds_train, weights_train = load_dataset_and_weights(
dataset_train_path, auxiliary_path, quantization, args['seed'],
not args.get('disable_entity_features', False), not args.get('disable_edge_features', False),
args.get('force_entity_categorical_features', False))
batch_size = args['batch_size']
num_workers = args['num_workers']
if distributed_config:
batch_size = batch_size // distributed_config.world_size
num_workers = num_workers // distributed_config.world_size
collate_fn = functools.partial(
dataset.collate,
entity_feature_mapping=ds_train.node_feature_mapping,
edge_feature_mapping=ds_train.edge_feature_mapping)
dl_train, batches_per_epoch = make_dataloader_train(
collate_fn, ds_train, weights_train, batch_size, args['num_epochs'], num_workers, distributed_config)
if args['dataset_test'] is not None:
ds_test, weights_test = load_dataset_and_weights_with_mapping(
args['dataset_test'], ds_train.node_feature_mapping, ds_train.edge_feature_mapping, args['seed'])
dl_test = _make_dataloader_eval(
ds_test, weights_test, batch_size, num_workers, distributed_config)
else:
dl_test = None
return dl_train, dl_test, batches_per_epoch, ds_train.node_feature_mapping, ds_train.edge_feature_mapping
|
py
|
1a5d33144ceaef0e03255b8db63ba662fd2c46e7
|
import graphene
from ...core.permissions import ShippingPermissions
from ..channel.types import ChannelContext
from ..core.fields import ChannelContextFilterConnectionField
from ..decorators import permission_required
from ..translations.mutations import ShippingPriceTranslate
from .bulk_mutations import ShippingPriceBulkDelete, ShippingZoneBulkDelete
from .filters import ShippingZoneFilterInput
from .mutations.channels import ShippingMethodChannelListingUpdate
from .mutations.shippings import (
ShippingPriceCreate,
ShippingPriceDelete,
ShippingPriceExcludeProducts,
ShippingPriceRemoveProductFromExclude,
ShippingPriceUpdate,
ShippingZoneCreate,
ShippingZoneDelete,
ShippingZoneUpdate,
)
from .resolvers import resolve_shipping_zones
from .types import ShippingZone
class ShippingQueries(graphene.ObjectType):
shipping_zone = graphene.Field(
ShippingZone,
id=graphene.Argument(
graphene.ID, description="ID of the shipping zone.", required=True
),
channel=graphene.String(
description="Slug of a channel for which the data should be returned."
),
description="Look up a shipping zone by ID.",
)
shipping_zones = ChannelContextFilterConnectionField(
ShippingZone,
filter=ShippingZoneFilterInput(
description="Filtering options for shipping zones."
),
channel=graphene.String(
description="Slug of a channel for which the data should be returned."
),
description="List of the shop's shipping zones.",
)
@permission_required(ShippingPermissions.MANAGE_SHIPPING)
def resolve_shipping_zone(self, info, id, channel=None):
instance = graphene.Node.get_node_from_global_id(info, id, ShippingZone)
return ChannelContext(node=instance, channel_slug=channel) if instance else None
@permission_required(ShippingPermissions.MANAGE_SHIPPING)
def resolve_shipping_zones(self, info, channel=None, **_kwargs):
return resolve_shipping_zones(channel)
class ShippingMutations(graphene.ObjectType):
shipping_method_channel_listing_update = ShippingMethodChannelListingUpdate.Field()
shipping_price_create = ShippingPriceCreate.Field()
shipping_price_delete = ShippingPriceDelete.Field()
shipping_price_bulk_delete = ShippingPriceBulkDelete.Field()
shipping_price_update = ShippingPriceUpdate.Field()
shipping_price_translate = ShippingPriceTranslate.Field()
shipping_price_exclude_products = ShippingPriceExcludeProducts.Field()
shipping_price_remove_product_from_exclude = (
ShippingPriceRemoveProductFromExclude.Field()
)
shipping_zone_create = ShippingZoneCreate.Field()
shipping_zone_delete = ShippingZoneDelete.Field()
shipping_zone_bulk_delete = ShippingZoneBulkDelete.Field()
shipping_zone_update = ShippingZoneUpdate.Field()
|
py
|
1a5d33884a03de94ddc481529c26f428d975a8d8
|
import json
import logging
import pytest
from model.msg_model import MsgModel
from util.auth_util import Identity
from util.auth_util import JWTAuthenticator
from util.auth_util import Role
from util.config_util import Config
logger = logging.getLogger(__name__)
class TestReadMsgs:
path = "/api/user/read_msgs"
@pytest.fixture(autouse=True)
def __setup__(self, client):
self.client = client
def trigger_run(self, role, payload):
headers = {}
if role:
auth_token = JWTAuthenticator.dump_access_token(
Config.auth_secret_key, Identity(user="xxx", role=role), exp=86400
)
headers = {"Authorization": f"bearer {auth_token}"}
return self.client.post(
url=self.path, data=json.dumps(payload), headers=headers
)
def test__ok(self):
# prepare fixture
MsgModel.leave_msg("hello")
MsgModel.leave_msg("world")
# user read two messages
resp = self.trigger_run(Role.USER, {})
assert resp.status_code == 200
assert resp.json() == {"msgs": ["hello", "world"]}
# admin also has permission to read
resp = self.trigger_run(Role.ADMIN, {"limit": 1})
assert resp.status_code == 200
assert resp.json() == {"msgs": ["hello"]}
def test__authentication_error(self):
resp = self.trigger_run(None, {"msg": "hello"})
assert resp.status_code == 401
assert resp.json() == {"code": "UNAUTHENTICATED", "msg": "JWT is missing"}
def test__limit_error(self):
resp = self.trigger_run(Role.USER, {"limit": 101})
assert resp.status_code == 400
assert resp.json() == {
"code": "INVALID_PARAM",
"msg": "Invalid body param: limit",
}
|
py
|
1a5d3390f02266c662ca3babf2eba335e5f18cbf
|
from corehq.apps.app_manager.suite_xml.contributors import PostProcessor
from corehq.apps.app_manager.suite_xml.post_process.workflow import (
CommandId,
WorkflowDatumMeta,
WorkflowHelper,
prepend_parent_frame_children,
)
from corehq.apps.app_manager.suite_xml.xml_models import (
Argument,
PushFrame,
SessionEndpoint,
Stack,
StackDatum,
)
from corehq.util.timer import time_method
class EndpointsHelper(PostProcessor):
"""
Generates "Session Endpoints" - user-defined labels for forms or modules.
They end up as entries in the suite file that declare stack operations
necessary to navigate to the form or module, as well as what arguments (eg:
case IDs) must be provided to get there.
"""
@time_method()
def update_suite(self):
for module in self.modules:
if module.session_endpoint_id:
self.suite.endpoints.append(self._make_session_endpoint(module))
if module.module_type != "shadow":
for form in module.get_suite_forms():
if form.session_endpoint_id:
self.suite.endpoints.append(self._make_session_endpoint(module, form))
def _make_session_endpoint(self, module, form=None):
if form is not None:
endpoint_id = form.session_endpoint_id
else:
endpoint_id = module.session_endpoint_id
stack = Stack()
children = self.get_frame_children(module, form)
argument_ids = self._get_argument_ids(children)
# Add a claim request for each endpoint argument.
# This assumes that all arguments are case ids.
for arg_id in argument_ids:
self._add_claim_frame(stack, arg_id, endpoint_id)
# Add a frame to navigate to the endpoint
frame = PushFrame()
stack.add_frame(frame)
for child in children:
if isinstance(child, CommandId):
frame.add_command(child.to_command())
elif child.id in argument_ids:
self._add_datum_for_arg(frame, child.id)
return SessionEndpoint(
id=endpoint_id,
arguments=[Argument(id=i) for i in argument_ids],
stack=stack,
)
def _get_argument_ids(self, frame_children):
return [
child.id for child in frame_children
if isinstance(child, WorkflowDatumMeta) and child.requires_selection
]
def _add_claim_frame(self, stack, arg_id, endpoint_id):
frame = PushFrame()
stack.add_frame(frame)
self._add_datum_for_arg(frame, arg_id)
frame.add_command(f"'claim_command.{endpoint_id}.{arg_id}'")
def _add_datum_for_arg(self, frame, arg_id):
frame.add_datum(
StackDatum(id=arg_id, value=f"${arg_id}")
)
def get_frame_children(self, module, form):
helper = WorkflowHelper(self.suite, self.app, self.app.get_modules())
frame_children = helper.get_frame_children(module, form)
if module.root_module_id:
frame_children = prepend_parent_frame_children(helper, frame_children, module.root_module)
return frame_children
|
py
|
1a5d34014e4ee9cfa248fc886aab0555d4662257
|
import os
import sys
import logging
import pytest
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import lasio
test_dir = os.path.dirname(__file__)
stegfn = lambda vers, fn: os.path.join(os.path.dirname(__file__), "examples", vers, fn)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def test_read_v30_sample():
las = lasio.read(stegfn("3.0", "sample_3.0.las"))
assert las.version[0].mnemonic == "VERS"
assert las.version[0].value == 3.0
assert las.version[0].descr == "CWLS LOG ASCII STANDARD -VERSION 3.0"
assert len(las.data) == 3
assert len(las.data[0]) == 15
assert las.data[2][8] == '2850000000000.0'
assert las.data[2][9] == 'LOST INTERVAL '
def test_read_v30_sample_standard_sections():
"""
Verify 'Curves' does read '~Log_Definition'
Verify 'Curves' doesn't read 'Core_*' sections
Verity 'Parameter' does read '~Log_Parameter'
Verify 'Parameter' doesn't read 'Performations_*' sections
"""
las = lasio.read(stegfn("3.0", "sample_3.0.las"))
assert las.curves.DEPT.unit == "M"
# ~Log_Definition, a LAS3.0 equivalent of ~Curves has its data installed in Curves.
assert las.sections["Curves"].DEPT.unit == "M"
# ~Log_Parameter, a LAS3.0 equivalent of ~Parameter has its data installed in Parameter.
assert "Log_Parameter" not in las.sections.keys()
assert len(las.sections["Parameter"]) == 71
assert las.sections["Perforations_Definition"][0].mnemonic == "PERFT:1"
|
py
|
1a5d3555f853c3927536db6db47e7cb2339c4b8a
|
from pathlib import Path
from urllib.request import urlopen
from .element import Element
import json
import shutil
import logging
logger = logging.getLogger(__name__)
def read_gltf(fin):
with open(fin, encoding='utf-8') as f:
gltf = json.load(f, object_hook=lambda d: Element(**d))
# buffers = []
# for buffer in gltf.buffers:
# buffers.append(read_buffer(buffer.uri))
# with open(Path(fin).parent / gltf.buffers[0].uri, "rb") as f:
# buffer = f.read()
buffer = read_buffer(gltf.buffers[0].uri, Path(fin).parent)
return gltf, buffer
def read_buffer(uri, parent):
if is_data_uri(uri):
with urlopen(uri) as response:
return response.read()
with open(parent / uri, "rb") as f:
return f.read()
def is_data_uri(uri):
return uri.startswith("data:")
def copy_textures(fin, fout, images):
if not images:
return
src_parent = Path(fin).parent
dest_parent = Path(fout).parent
if src_parent == dest_parent:
return
for image in images:
dest = dest_parent / image.uri
try:
dest.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(src_parent / image.uri, dest)
except Exception as e:
logger.error(e)
|
py
|
1a5d366e35c123fccced96dceb3211d7782286ea
|
import io
import json
import re
from operator import itemgetter
from typing import Any, Dict, List, Optional, Union, cast
import pytest # type: ignore
from PIL import Image # type: ignore
from looker_sdk.sdk.api40 import methods as mtds
from looker_sdk.sdk.api40 import models as ml
@pytest.fixture(scope="module")
def sdk(sdk40) -> mtds.Looker40SDK:
return sdk40
def test_crud_user(sdk: mtds.Looker40SDK):
"""Test creating, retrieving, updating and deleting a user."""
# Create user
user = sdk.create_user(
ml.WriteUser(first_name="John", last_name="Doe", is_disabled=False, locale="fr")
)
assert isinstance(user, ml.User)
assert isinstance(user.id, int)
assert user.first_name == "John"
assert user.last_name == "Doe"
assert not user.is_disabled
assert user.locale == "fr"
# sudo checks
user_id = user.id
sdk.login_user(user_id)
user = sdk.me()
assert user.first_name == "John"
assert user.last_name == "Doe"
sdk.logout()
user = sdk.me()
assert user.first_name != "John"
assert user.last_name != "Doe"
# Update user and check fields we didn't intend to change didn't change
update_user = ml.WriteUser(is_disabled=True, locale="uk")
sdk.update_user(user_id, update_user)
user = sdk.user(user_id)
assert user.first_name == "John"
assert user.last_name == "Doe"
assert user.locale == "uk"
assert user.is_disabled
# Update user and check fields we intended to wipe out are now None
# first way to specify nulling out a field
update_user = ml.WriteUser(first_name=ml.EXPLICIT_NULL)
# second way
update_user.last_name = ml.EXPLICIT_NULL
sdk.update_user(user_id, update_user)
user = sdk.user(user_id)
assert user.first_name == ""
assert user.last_name == ""
# Try adding email creds
sdk.create_user_credentials_email(
user_id, ml.WriteCredentialsEmail(email="[email protected]")
)
user = sdk.user(user_id)
assert isinstance(user.credentials_email, ml.CredentialsEmail)
assert user.credentials_email.email == "[email protected]"
# Delete user
resp = sdk.delete_user(user_id)
assert resp == ""
def test_me_returns_correct_result(sdk: mtds.Looker40SDK):
"""me() should return the current authenticated user"""
me = sdk.me()
assert isinstance(me, ml.User)
assert isinstance(me.credentials_api3, list)
assert len(me.credentials_api3) > 0
assert isinstance(me.credentials_api3[0], ml.CredentialsApi3)
def test_me_field_filters(sdk: mtds.Looker40SDK):
"""me() should return only the requested fields."""
me = sdk.me("id, first_name, last_name")
assert isinstance(me, ml.User)
assert isinstance(me.id, int)
assert isinstance(me.first_name, str)
assert me.first_name != ""
assert isinstance(me.last_name, str)
assert me.last_name != ""
assert not me.display_name
assert not me.email
assert not me.personal_space_id
@pytest.mark.usefixtures("test_users")
def test_bad_user_search_returns_no_results(sdk: mtds.Looker40SDK):
"""search_users() should return an empty list when no match is found."""
resp = sdk.search_users(first_name="Bad", last_name="News")
assert isinstance(resp, list)
assert len(resp) == 0
@pytest.mark.usefixtures("test_users")
def test_search_users_matches_pattern(
sdk: mtds.Looker40SDK, users: List[Dict[str, str]], email_domain: str
):
"""search_users should return a list of all matches."""
user = users[0]
# Search by full email
search_email = f'{user["first_name"]}.{user["last_name"]}{email_domain}'
search_results = sdk.search_users_names(pattern=search_email)
assert len(search_results) == 1
assert search_results[0].first_name == user["first_name"]
assert search_results[0].last_name == user["last_name"]
assert search_results[0].email == search_email
# Search by first name
search_results = sdk.search_users_names(pattern=user["first_name"])
assert len(search_results) > 0
assert search_results[0].first_name == user["first_name"]
# First name with spaces
u = sdk.create_user(ml.WriteUser(first_name="John Allen", last_name="Smith"))
if u.id:
search_results = sdk.search_users_names(pattern="John Allen")
assert len(search_results) == 1
assert search_results[0].first_name == "John Allen"
assert search_results[0].last_name == "Smith"
# Delete user
resp = sdk.delete_user(u.id)
assert resp == ""
@pytest.mark.usefixtures("test_users")
def test_it_matches_email_domain_and_returns_sorted(
sdk: mtds.Looker40SDK, email_domain: str, users: List[Dict[str, str]]
):
"""search_users_names() should search users matching a given pattern and return
sorted results if sort fields are specified.
"""
search_results = sdk.search_users_names(
pattern=f"%{email_domain}", sorts="last_name, first_name"
)
assert len(search_results) == len(users)
sorted_test_data: List[Dict[str, str]] = sorted(
users, key=itemgetter("last_name", "first_name")
)
for actual, expected in zip(search_results, sorted_test_data):
assert actual.first_name == expected["first_name"]
assert actual.last_name == expected["last_name"]
@pytest.mark.usefixtures("test_users")
def test_delim_sequence(
sdk: mtds.Looker40SDK, email_domain: str, users: List[Dict[str, str]]
):
search_results = sdk.search_users_names(pattern=f"%{email_domain}")
assert len(search_results) == len(users)
delim_ids = ml.DelimSequence([cast(int, u.id) for u in search_results])
all_users = sdk.all_users(ids=delim_ids)
assert len(all_users) == len(users)
def test_it_retrieves_session(sdk: mtds.Looker40SDK):
"""session() should return the current session."""
current_session = sdk.session()
assert current_session.workspace_id == "production"
def test_it_updates_session(sdk: mtds.Looker40SDK):
"""update_session() should allow us to change the current workspace."""
# Switch workspace to dev mode
sdk.update_session(ml.WriteApiSession(workspace_id="dev"))
current_session = sdk.session()
assert isinstance(current_session, ml.ApiSession)
assert current_session.workspace_id == "dev"
# Switch workspace back to production
current_session = sdk.update_session(ml.WriteApiSession(workspace_id="production"))
assert isinstance(current_session, ml.ApiSession)
assert current_session.workspace_id == "production"
TQueries = List[Dict[str, Union[str, List[str], Dict[str, str]]]]
def test_it_creates_and_runs_query(
sdk: mtds.Looker40SDK, queries_system_activity: TQueries
):
"""create_query() creates a query and run_query() returns its result."""
for q in queries_system_activity:
limit = cast(str, q["limit"]) or "10"
request = create_query_request(q, limit)
query = sdk.create_query(request)
assert isinstance(query, ml.Query)
assert query.id
assert isinstance(query.id, int)
assert query.id > 0
sql = sdk.run_query(query.id, "sql")
assert "SELECT" in sql
json_ = sdk.run_query(query.id, "json")
assert isinstance(json_, str)
json_ = json.loads(json_)
assert isinstance(json_, list)
assert len(json_) == int(limit)
row = json_[0]
if q.get("fields"):
for field in q["fields"]:
assert field in row.keys()
csv = sdk.run_query(query.id, "csv")
assert isinstance(csv, str)
assert len(re.findall(r"\n", csv)) == int(limit) + 1
def test_it_runs_inline_query(sdk: mtds.Looker40SDK, queries_system_activity: TQueries):
"""run_inline_query() should run a query and return its results."""
for q in queries_system_activity:
limit = cast(str, q["limit"]) or "10"
request = create_query_request(q, limit)
json_resp = sdk.run_inline_query("json", request)
assert isinstance(json_resp, str)
json_: List[Dict[str, Any]] = json.loads(json_resp)
assert len(json_) == int(limit)
row = json_[0]
if q.get("fields"):
for field in q["fields"]:
assert field in row.keys()
csv = sdk.run_inline_query("csv", request)
assert isinstance(csv, str)
assert len(re.findall(r"\n", csv)) == int(limit) + 1
# only do 1 image download since it takes a while
png = sdk.run_inline_query("png", request)
assert isinstance(png, bytes)
try:
Image.open(io.BytesIO(png))
except IOError:
raise AssertionError("png format failed to return an image")
@pytest.mark.usefixtures("remove_test_looks")
def test_crud_look(sdk: mtds.Looker40SDK, looks):
"""Test creating, retrieving, updating and deleting a look."""
for l in looks:
request = create_query_request(l["query"][0], "10")
query = sdk.create_query(request)
look = sdk.create_look(
ml.WriteLookWithQuery(
title=l.get("title"),
description=l.get("description"),
deleted=l.get("deleted"),
is_run_on_load=l.get("is_run_on_load"),
public=l.get("public"),
query_id=query.id,
space_id=l.get("space_id") or str(sdk.me().personal_space_id),
)
)
assert isinstance(look, ml.LookWithQuery)
assert look.title == l.get("title")
assert look.description == l.get("description")
assert look.deleted == l.get("deleted")
assert look.is_run_on_load == l.get("is_run_on_load")
# TODO this is broken for local dev but works for CI...
# assert look.public == l.get("public")
assert look.query_id == query.id
assert look.space_id == l.get("space_id") or sdk.me().home_space_id
assert look.user_id == l.get("user_id") or sdk.me().id
# Update
assert isinstance(look.id, int)
updated_look = sdk.update_look(look.id, ml.WriteLookWithQuery(deleted=True))
assert updated_look.deleted
assert updated_look.title == look.title
look = sdk.update_look(look.id, ml.WriteLookWithQuery(deleted=False))
assert not look.deleted
def test_search_looks_returns_looks(sdk: mtds.Looker40SDK):
"""search_looks() should return a list of looks."""
search_results = sdk.search_looks()
assert isinstance(search_results, list)
assert len(search_results) > 0
look = search_results[0]
assert isinstance(look, ml.Look)
assert look.title != ""
assert look.created_at is not None
def test_search_looks_fields_filter(sdk: mtds.Looker40SDK):
"""search_looks() should only return the requested fields passed in the fields
argument of the request.
"""
search_results = sdk.search_looks(fields="id, title, description")
assert isinstance(search_results, list)
assert len(search_results) > 0
look = search_results[0]
assert isinstance(look, ml.Look)
assert look.title is not None
assert look.created_at is None
def test_search_looks_title_fields_filter(sdk: mtds.Looker40SDK):
"""search_looks() should be able to filter on title."""
search_results = sdk.search_looks(title="An SDK%", fields="id, title")
assert isinstance(search_results, list)
assert len(search_results) > 0
look = search_results[0]
assert isinstance(look.id, int)
assert look.id > 0
assert "SDK" in look.title
assert look.description is None
def test_search_look_and_run(sdk: mtds.Looker40SDK):
"""run_look() should return CSV and JSON
CSV will use column descriptions
JSON will use column names
JSON_LABEL will use column descriptions
"""
search_results = sdk.search_looks(title="An SDK Look", fields="id, title")
assert isinstance(search_results, list)
assert len(search_results) > 0
look = search_results[0]
assert isinstance(look.id, int)
assert look.id > 0
assert "SDK" in look.title
assert look.description is None
actual = sdk.run_look(look_id=look.id, result_format="csv")
assert "Dashboard Count" in actual
assert "Dashboard ID" in actual
actual = sdk.run_look(look_id=look.id, result_format="json")
assert "dashboard.count" in actual
assert "dashboard.id" in actual
actual = sdk.run_look(look_id=look.id, result_format="json_label")
assert "Dashboard Count" in actual
assert "Dashboard ID" in actual
def create_query_request(q, limit: Optional[str] = None) -> ml.WriteQuery:
return ml.WriteQuery(
model=q.get("model"),
view=q.get("view"),
fields=q.get("fields"),
pivots=q.get("pivots"),
fill_fields=q.get("fill_fields"),
filters=q.get("filters"),
filter_expression=q.get("filter_expressions"),
sorts=q.get("sorts"),
limit=q.get("limit") or limit,
column_limit=q.get("column_limit"),
total=q.get("total"),
row_total=q.get("row_total"),
subtotals=q.get("subtotal"),
runtime=q.get("runtime"),
vis_config=q.get("vis_config"),
filter_config=q.get("filter_config"),
visible_ui_sections=q.get("visible_ui_sections"),
dynamic_fields=q.get("dynamic_fields"),
client_id=q.get("client_id"),
query_timezone=q.get("query_timezone"),
)
@pytest.mark.usefixtures("remove_test_dashboards")
def test_crud_dashboard(sdk: mtds.Looker40SDK, queries_system_activity, dashboards):
"""Test creating, retrieving, updating and deleting a dashboard.
"""
qhash: Dict[Union[str, int], ml.Query] = {}
for idx, q in enumerate(queries_system_activity):
limit = "10"
request = create_query_request(q, limit)
key = q.get("id") or str(idx)
qhash[key] = sdk.create_query(request)
for d in dashboards:
dashboard = sdk.create_dashboard(
ml.WriteDashboard(
description=d.get("description"),
hidden=d.get("hidden"),
query_timezone=d.get("query_timezone"),
refresh_interval=d.get("refresh_interval"),
title=d.get("title"),
background_color=d.get("background_color"),
load_configuration=d.get("load_configuration"),
lookml_link_id=d.get("lookml_link_id"),
show_filters_bar=d.get("show_filters_bar"),
show_title=d.get("show_title"),
slug=d.get("slug"),
space_id=d.get("space_id") or sdk.me().home_space_id,
text_tile_text_color=d.get("text_tile_text_color"),
tile_background_color=d.get("tile_background_color"),
tile_text_color=d.get("tile_text_color"),
title_color=d.get("title_color"),
)
)
assert isinstance(dashboard, ml.Dashboard)
if d.get("background_color"):
assert d["background_color"] == dashboard.background_color
if d.get("text_tile_text_color"):
assert d["text_tile_text_color"] == dashboard.text_tile_text_color
if d.get("tile_background_color"):
assert d["tile_background_color"] == dashboard.tile_background_color
if d.get("tile_text_color"):
assert d["tile_text_color"] == dashboard.tile_text_color
if d.get("title_color"):
assert d["title_color"] == dashboard.title_color
# Update dashboard
assert isinstance(dashboard.id, str)
update_response = sdk.update_dashboard(
dashboard.id, ml.WriteDashboard(deleted=True)
)
assert update_response.deleted
assert update_response.title == dashboard.title
dashboard = sdk.update_dashboard(dashboard.id, ml.WriteDashboard(deleted=False))
assert isinstance(dashboard.id, str)
assert not dashboard.deleted
if d.get("filters"):
for f in d["filters"]:
filter = sdk.create_dashboard_filter(
ml.WriteCreateDashboardFilter(
dashboard_id=dashboard.id,
name=f.get("name"),
title=f.get("title"),
type=f.get("type"),
default_value=f.get("default_value"),
model=f.get("model"),
explore=f.get("explore"),
dimension=f.get("dimension"),
row=f.get("row"),
listens_to_filters=f.get("listens_to_filters"),
allow_multiple_values=f.get("allow_multiple_values"),
required=f.get("required"),
)
)
assert isinstance(filter, ml.DashboardFilter)
assert filter.name == f.get("name")
assert filter.title == f.get("title")
assert filter.type == f.get("type")
assert filter.default_value == f.get("default_value")
assert filter.model == f.get("model")
assert filter.explore == f.get("explore")
assert filter.dimension == f.get("dimension")
assert filter.row == f.get("row")
assert filter.allow_multiple_values == f.get(
"allow_multiple_values", False
)
assert filter.required == f.get("required", False)
if d.get("tiles"):
for t in d["tiles"]:
tile = sdk.create_dashboard_element(
ml.WriteDashboardElement(
body_text=t.get("body_text"),
dashboard_id=dashboard.id,
look=t.get("look"),
look_id=t.get("look_id"),
merge_result_id=t.get("merge_result_id"),
note_display=t.get("note_display"),
note_state=t.get("note_state"),
note_text=t.get("note_text"),
query=t.get("query"),
query_id=get_query_id(qhash, t.get("query_id")),
refresh_interval=t.get("refresh_interval"),
subtitle_text=t.get("subtitle_text"),
title=t.get("title"),
title_hidden=t.get("title_hidden"),
type=t.get("type"),
)
)
assert isinstance(tile, ml.DashboardElement)
assert tile.dashboard_id == dashboard.id
assert tile.title == t.get("title")
assert tile.type == t.get("type")
def get_query_id(
qhash: Dict[Union[str, int], ml.Query], id: Union[str, int]
) -> Optional[int]:
if isinstance(id, str) and id.startswith("#"):
id = id[1:]
# if id is invalid, default to first query. test data is bad
query = qhash.get(id) or list(qhash.values())[0]
query_id = query.id
elif (isinstance(id, str) and id.isdigit()) or isinstance(id, int):
query_id = int(id)
else:
query_id = None
return query_id
|
py
|
1a5d3685df7228838ed1fa81272e3e58876842cd
|
#!/usr/bin/python3
# Copyright 2018-2019 Alvaro Bartolome @ alvarob96 in GitHub
# See LICENSE for details.
|
py
|
1a5d371336f5b1c2483562eefc81a7bd313eb0ec
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf.urls import include, url
# 用户自定义 urlconf
urlpatterns_custom = [
url(r"^", include("gcloud.core.urls")),
url(r"^", include("gcloud.resources.urls")),
url(r"^apigw/", include("gcloud.apigw.urls")),
url(r"^common_template/", include("gcloud.commons.template.urls")),
url(r"^template/", include("gcloud.tasktmpl3.urls")),
url(r"^taskflow/", include("gcloud.taskflow3.urls")),
url(r"^appmaker/", include("gcloud.contrib.appmaker.urls")),
url(r"^develop/", include("gcloud.contrib.develop.urls")),
url(r"^pipeline/", include("pipeline_plugins.base.urls")),
url(r"^pipeline/", include("pipeline_plugins.components.urls")),
url(r"^pipeline/", include("pipeline_plugins.variables.urls")),
url(r"^analysis/", include("gcloud.contrib.analysis.urls")),
url(r"^periodictask/", include("gcloud.periodictask.urls")),
url(r"^weixin/", include("weixin.urls")),
url(r"^weixin/login/", include("weixin.core.urls")),
url(r"^admin/", include("gcloud.contrib.admin.urls")),
]
|
py
|
1a5d373b57f60c8bba72fb577b626676edf57bf0
|
from google.cloud import bigquery, storage
import argparse
from abc import ABCMeta
import yaml
import os
from ga_bq_pipeline.logger import Logger
from pathlib import Path
GOOGLE_APP_CREDENTIALS_ENV_NAME = 'GOOGLE_APPLICATION_CREDENTIALS'
GOOGLE_CREDENTIALS_PATH = '/google-keys/'
ROOT = str(Path(os.path.dirname(os.path.abspath(__file__))).parent.parent)
class ETL(metaclass=ABCMeta):
"""
Skeleton of a generic ETL pipeline.
This class take care of parsing and loading environment and arguments
A concrete ETL MUST inherit from this class and implements the 3 abstract
methods
def pre_execution_cleansing(self):
pass
def pipeline(self)
pass
def post_execution_cleanup(self):
pass
This methods are called by the execute() method and they run in sequence.
"""
def __init__(self, app_name, conf_file_path, args_file_name, logger_name, env_name=None):
"""
Configure the ETL class
:param app_name: application name used for logging references
:param conf_file_path: environment configuration directory path
:param args_file_name: arguments definition file path
:param logger_name: logger name
"""
self.__get_arguments(args_file_name)
# configure the logging
self._logger = Logger(app_name, logger_name)
env_name = self.__args.get('environment', None) if env_name is None else env_name
# get the environment variables from env configuration file
self.__get_env_vars(conf_file_path, env_name)
prefix = ROOT if env_name in ['local', 'local-dev'] else ''
# Google key credentials file path
if not os.environ.get(GOOGLE_APP_CREDENTIALS_ENV_NAME):
os.environ[GOOGLE_APP_CREDENTIALS_ENV_NAME] = prefix + GOOGLE_CREDENTIALS_PATH + self.env['service_account']
@property
def logger(self):
"""
Get the logger
:return: logger
"""
return self._logger
@property
def args(self):
"""
Get the arguments
:return: arguments
"""
return self.__args
@property
def env(self):
"""
Get the environment
:return: environment variables
"""
return self.__env
@property
def service_account(self):
"""
Get the Service Account
:return: Service Account File Location
"""
return os.environ.get(GOOGLE_APP_CREDENTIALS_ENV_NAME)
@property
def bq_client(self):
"""
Creates a BigQuery Client
:return: BigQuery Client
"""
return bigquery.Client()
@property
def gs_client(self):
"""
Creates a Cloud Storage Client
:return: Cloud Storage Client
"""
return storage.Client()
@property
def bigquery(self):
"""
Get BigQuery properties
"""
return self.env['bigquery']
@property
def storage(self):
"""
Get BigQuery properties
"""
return self.env['storage']
def __get_arguments(self, args_file_name):
"""
Get all arguments from the arg configuration file and parse them.
:param args_file_name: arguments definition file path
"""
if args_file_name is None:
self.__args = {}
return
try:
with open(args_file_name) as args_file:
args_data = yaml.load(args_file.read(), Loader=yaml.FullLoader)
except IOError as ex:
self.logger.critical('Fail to read configuration file: {0}'.format(ex))
return
try:
description = args_data['description']
except KeyError:
print("Argument description is required.")
return
parser = argparse.ArgumentParser(description=description)
try:
args = args_data['args']
except KeyError:
print("No arguments is found!")
return
for arg in args:
try:
short = args[arg]['short']
except KeyError:
print("Short name is required for an argument!")
return
arg_required = args[arg].get('required', False)
arg_choices = args[arg].get('choices', None)
arg_help = args[arg].get('help', None)
arg_type = int if args[arg].get('type', None) == 'int' else None
parser.add_argument(
'-{0}'.format(short),
'--{0}'.format(arg),
required=arg_required,
help=arg_help,
choices=arg_choices,
type=arg_type
)
self.__args = vars(parser.parse_args())
def __get_env_vars(self, env_path, env_name):
"""
Get the environment variables from env configuration file
:param env_path: environment configuration directory path
:param env_name: environment name
"""
conf_file_name = '{env_path}/{env_name}.yaml'.format(
env_path=env_path,
env_name=env_name
)
try:
with open(conf_file_name) as conf:
env = yaml.load(conf.read(), Loader=yaml.FullLoader)
except IOError as ex:
self.logger.critical('Fail to read environment variables: {0}'.format(ex))
return
self.__env = env
|
py
|
1a5d39a1623a7bccf0c8f8c5dc6b3ee0ddd7ef65
|
from typing import NamedTuple
from src.message import Message
from src.lib import get_by_path
color = {
4293271831: "red",
4293467747: "pink",
4294278144: "orange",
4294953512: "yellow",
4280150454: "green",
4278248959: "water",
4280191205: "blue"
}
class MessageRendererTuple(NamedTuple):
message: str
authorName: str
authorExternalChannelId: str
timestampUsec: str
timestampText: str
message_type: str
purchaseAmountText: str
bodyBackgroundColor: str
id: str
class MessageRenderer:
def __init__(self, item: dict) -> None:
conv_c = {
"liveChatTextMessageRenderer": LiveChatText,
"liveChatPaidMessageRenderer": LiveChatPaid,
"liveChatMembershipItemRenderer": LiveChatMembership,
"liveChatViewerEngagementMessageRenderer": LiveChatViewerEngagementMessage,
"liveChatPaidStickerRenderer": LiveChatPaidSticker,
}
self.item = item
renderer_type = list(item.keys())[0]
if "showItemEndpoint" in list(item[renderer_type].keys()):
massage = f"[warn] showItemEndpoint [id] {item[list(item.keys())[0]]['id']}"
raise KeyError(massage)
else:
input = item[renderer_type]
self.renderer = conv_c[renderer_type](input)
class LiveChatText(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
params = [
str(Message(input["message"]["runs"])),
input["authorName"]["simpleText"],
input["authorExternalChannelId"],
input["timestampUsec"],
input["timestampText"]["simpleText"],
"LiveChatText",
"",
"",
input["id"]
]
return super().__new__(cls, *params)
class LiveChatPaid(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
message = str(Message(input["message"]["runs"])
) if "message" in input.keys() else ""
params = [
message,
input["authorName"]["simpleText"],
input["authorExternalChannelId"],
input["timestampUsec"],
input["timestampText"]["simpleText"],
"LiveChatPaid",
input["purchaseAmountText"]["simpleText"],
color[input["bodyBackgroundColor"]],
input["id"]
]
return super().__new__(cls, *params)
class LiveChatMembership(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
params = [
"".join(x["text"] for x in input["headerSubtext"]["runs"]),
input["authorName"]["simpleText"],
input["authorExternalChannelId"],
input["timestampUsec"],
input["timestampText"]["simpleText"],
"LiveChatMembership",
"",
"",
input["id"]
]
return super().__new__(cls, *params)
class LiveChatPaidSticker(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
message = get_by_path(
input,
[
"sticker",
"accessibility",
"accessibilityData",
"label"
]
)
params = [
message,
input["authorName"]["simpleText"],
input["authorExternalChannelId"],
input["timestampUsec"],
input["timestampText"]["simpleText"],
"LiveChatPaidSticker",
input["purchaseAmountText"]["simpleText"],
color[input["moneyChipBackgroundColor"]],
input["id"]
]
return super().__new__(cls, *params)
class LiveChatViewerEngagementMessage(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
params = [
"".join(x["text"] for x in input["message"]["runs"]),
"YOUTUBE",
"",
input["timestampUsec"],
"0:00",
"LiveChatViewerEngagementMessage",
"",
"",
input["id"]
]
return super().__new__(cls, *params)
|
py
|
1a5d3a66c77006a1f9cc885eab3cb071088f8be9
|
# uncompyle6 version 2.9.10
# Python bytecode 2.6 (62161)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: c:\Temp\build\ZIBE\pyreadline\__init__.py
# Compiled at: 2012-12-13 16:07:50
import unicode_helper
import logger
import clipboard
import lineeditor
import modes
import console
from rlmain import *
import rlmain
|
py
|
1a5d3e660bc6175e090f9ff33f00273b180de0e3
|
from sim_common import *
ass_cards = MyCards([
Assist("劇場乳神", 1207, 213,
643, 312, 406, 438, 289,
skill=Skill(buffs=[Effect(Scope.my_team, Ability.dex, 0.2)], debuffs=[Effect(Scope.foes, Endurance.foe, 0.15)],
adj_buffs=[], )
),
Assist("奧娜", 1202, 238,
412, 232, 401, 663, 412,
skill=Skill(
buffs=[Effect(Scope.my_self, Ability.counter_rate, 0.3), Effect(Scope.my_self, Ability.guard_rate, 0.3)],
debuffs=[Effect(Scope.foes, Endurance.phy, 0.15), Effect(Scope.foes, Endurance.mag, 0.15)],
adj_buffs=[], )
),
Assist("情人埃伊娜", 946, 196,
299, 180, 304, 434, 315,
skill=Skill(buffs=[Effect(Scope.my_team, Damage.earth, 0.1), Effect(Scope.my_team, Damage.light, 0.1)],
debuffs=[Effect(Scope.foes, Endurance.earth, 0.05), Effect(Scope.foes, Endurance.light, 0.05)],
adj_buffs=[], )
),
Assist("新娘乳神", 1015, 158,
221, 168, 266, 249, 486,
skill=Skill(buffs=[Effect(Scope.my_team, Ability.mag, 0.15), Effect(Scope.my_team, Ability.crit_rate, 0.08)],
debuffs=[], adj_buffs=[], )
),
Assist("溫泉乳神", 942, 278,
293, 244, 406, 329, 591,
skill=Skill(buffs=[], debuffs=[Effect(Scope.foes, Ability.str, 0.15)], adj_buffs=[], )
),
Assist("洋裝埃伊娜", 1197, 215,
265, 227, 391, 393, 652,
skill=Skill(buffs=[Effect(Scope.my_team, Endurance.foes, 0.1), Effect(Scope.my_team, Endurance.phy, 0.15)],
debuffs=[], adj_buffs=[], )
),
Assist("伯爵希兒", 1188, 138,
323, 268, 297, 206, 389,
skill=Skill(buffs=[Effect(Scope.my_self, Ability.energy_bar, 0.66)], debuffs=[], adj_buffs=[], )
),
])
adv_cards = MyCards([
Adventurer("折紙", 4045, 414,
423, 548, 737, 929, 2045,
skills=[Skill(Scope.foe, Power.high, Damage.light, Attack.mag, temp_boost=True, mp=34, buffs=[],
debuffs=[Effect(Scope.foe, Endurance.light, 0.35, 4)], adj_buffs=[], ),
Skill(Scope.foe, Power.mid, Damage.light, Attack.mag, mp=30,
buffs=[Effect(Scope.my_self, Ability.mag, 0.8, 4)], debuffs=[], adj_buffs=[], ),
Skill(Scope.foes, Power.high, Damage.light, Attack.mag, temp_boost=True, mp=29, buffs=[],
debuffs=[], adj_buffs=[], ),
Skill(Scope.foe, Power.ultra, Damage.light, Attack.mag, temp_boost=True, is_special=True,
buffs=[], debuffs=[], adj_buffs=[], )],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, SuccessUp.pene, 0.2),
Effect(Scope.my_self, Endurance.dark, 0.35)])],
killer=Killer.fairy,
),
Adventurer("情人艾斯", 4017, 421,
459, 560, 744, 902, 1802,
skills=[
Skill(Scope.foe, Power.high, Damage.light, Attack.mag, temp_boost=True, mp=41, buffs=[], debuffs=[],
adj_buffs=[Effect(Scope.foes, AdjBuff.clear_buff, 0, 0, Ability.mag)], ),
Skill(Scope.foe, Power.high, Damage.light, Attack.mag, temp_boost=True, mp=44,
buffs=[Effect(Scope.my_team, Damage.light, 0.2, 4)], debuffs=[], adj_buffs=[], ),
Skill(Scope.foe, Power.low, Damage.light, Attack.mag, mp=27,
buffs=[Effect(Scope.my_self, Ability.mag, 0.75, 4)], debuffs=[], adj_buffs=[], ),
Skill(Scope.foe, Power.ultra, Damage.light, Attack.mag, temp_boost=True, is_special=True, buffs=[],
debuffs=[], adj_buffs=[], )],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, SuccessUp.pene, 0.2),
Effect(Scope.my_self, Endurance.dark, 0.35)])],
killer=Killer.rock,
),
Adventurer("春姬", 4140, 438,
1040, 514, 848, 838, 1647,
skills=[Skill(Scope.foes, Power.mid, Damage.fire, Attack.mag, mp=34, buffs=[],
debuffs=[Effect(Scope.foes, Ability.str, 0.4, 3), Effect(Scope.foes, Ability.mag, 0.4, 3)],
adj_buffs=[], ),
Skill(mp=12, buffs=[Effect(Scope.my_self, Recover.mp_imm, 0.15),
Effect(Scope.my_team, Ability.counter_rate, 0.3, 3),
Effect(Scope.my_team, Ability.pene_rate, 0.3, 3)], debuffs=[],
adj_buffs=[], ),
Skill(mp=141, buffs=[Effect(Scope.my_team, Recover.hp_imm, 0.3)], debuffs=[],
adj_buffs=[Effect(Scope.my_team, AdjBuff.extend_buff, 2, 0),
Effect(Scope.foes, AdjBuff.extend_debuff, 2, 0)], ),
Skill(is_special=True, buffs=[Effect(Scope.my_team, Recover.hp_imm, 0.8),
Effect(Scope.my_team, Recover.hp_turn, 0.4, 3),
Effect(Scope.my_team, Ability.str, 1.0, 3),
Effect(Scope.my_team, Ability.mag, 1.0, 3)], debuffs=[],
adj_buffs=[], )],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, SuccessUp.guard, 0.3),
Effect(Scope.my_self, Endurance.wind, 0.35),
Effect(Scope.my_self, Ability.mag, 0.25),
Effect(Scope.my_self, Ability.agi, 0.25),
Effect(Scope.my_self, Ability.dex, 0.25),
Effect(Scope.my_self, Recover.hp_turn, 0.04),
Effect(Scope.my_self, Recover.mp_turn, 0.04)])],
counter_hp=True,
),
Adventurer("偶像莉涅", 2510 + 1084, 312 + 87,
721 + 201, 212 + 69, 413 + 81, 762 + 284, 727 + 304,
skills=[
Skill(Scope.foes, Power.super, Damage.light, Attack.phy, mp=59,
adj_buffs=[Effect(Scope.foes, AdjBuff.clear_buff, 0, 0, Ability.str),
Effect(Scope.foes, AdjBuff.clear_buff, 0, 0, Ability.mag)]),
Skill(is_fast=True, p=45, buffs=[Effect(Scope.my_team, Ability.energy_bar, 0.33, 4),
Effect(Scope.my_team, Ability.counter_rate, 0.20, 4),
Effect(Scope.my_team, Ability.crit_rate, 0.20, 4),
Effect(Scope.my_team, Ability.pene_rate, 0.20, 4)]),
Skill(Scope.foe, Power.high, Damage.light, Attack.phy, mp=25,
boost_by_buff=[Effect(Scope.my_self, Ability.crit_rate, 0.40)]),
]),
Adventurer("無人島春姬", 2103, 313,
209, 183, 397, 392, 849,
skills=[Skill(mp=52, buffs=[Effect(Scope.my_self, Ability.mag, 0.6, 4),
Effect(Scope.my_self, Ability.dex, 0.6, 4),
Effect(Scope.my_self, Damage.light, 0.6, 4),
Effect(Scope.my_team, Ability.mag, 0.3, 4),
Effect(Scope.my_team, Ability.dex, 0.3, 4),
Effect(Scope.my_team, Damage.light, 0.3, 4)], debuffs=[], adj_buffs=[], ),
Skill(mp=20, buffs=[], debuffs=[Effect(Scope.foe, Endurance.foe, 0.2, 4)],
adj_buffs=[Effect(Scope.foe, AdjBuff.clear_buff, 0, 0, Ability.str),
Effect(Scope.foe, AdjBuff.clear_buff, 0, 0, Ability.mag),
Effect(Scope.foe, AdjBuff.clear_buff, 0, 0, Ability.agi)], ),
Skill(Scope.foe, Power.super, Damage.light, Attack.mag,
boost_by_buff=[Effect(Scope.my_self, Ability.mag, 0.4)], mp=136,
buffs=[Effect(Scope.my_team, Recover.hp_turn, 0.2, 1)], debuffs=[], adj_buffs=[], ),
Skill(Scope.foe, Power.ultra, Damage.light, Attack.mag,
boost_by_buff=[Effect(Scope.my_self, Ability.mag, 0.8)], is_special=True,
buffs=[Effect(Scope.my_team, Recover.hp_turn, 0.4, 3),
Effect(Scope.my_team, Damage.light, 0.8, 3)], debuffs=[], adj_buffs=[], )],
passive_skills=[Skill(
buffs=[Effect(Scope.my_self, Recover.hp_turn, 0.08), Effect(Scope.my_self, Recover.mp_turn, 0.08),
Effect(Scope.my_self, SuccessUp.counter, 0.5),
Effect(Scope.my_self, Endurance.dark, 0.35)])],
killer=Killer.undead,
),
Adventurer("18", 2506, 224,
1387, 599, 601, 416, 981,
skills=[Skill(is_fast=True, mp=47, buffs=[Effect(Scope.my_team, Endurance.foes, 0.35, 3),
Effect(Scope.my_team, Endurance.foe, 0.35, 3)], debuffs=[],
adj_buffs=[], ),
Skill(Scope.foe, Power.high, Damage.earth, Attack.phy, temp_boost=True, mp=30, buffs=[],
debuffs=[], adj_buffs=[], ),
Skill(Scope.foes, Power.super, Damage.earth, Attack.phy, temp_boost=True, mp=69, buffs=[],
debuffs=[], adj_buffs=[Effect(Scope.foes, AdjBuff.shorten_buff, 1, 0)], ),
Skill(Scope.foes, Power.ultra, Damage.earth, Attack.phy, temp_boost=True, is_special=True,
buffs=[], debuffs=[], adj_buffs=[], )],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, SuccessUp.guard, 0.3),
Effect(Scope.my_self, Endurance.thunder, 0.35),
Effect(Scope.my_self, Ability.str, 0.4),
Effect(Scope.my_self, Ability.end, 0.4)])],
killer=Killer.dragon,
),
])
boss_cards = MyCards([
Adventurer("九魔姬", 100000000, 0,
0, 100, 0, 0, 1000,
skills=[Skill(Scope.foes, Power.low, Damage.dark, Attack.mag)],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, Endurance.fire, 0.1)])]
),
Adventurer("紅髮怪人", 100000000, 0,
100, 100, 0, 0, 0,
skills=[Skill(Scope.foes, Power.low, Damage.none, Attack.phy),
Skill(Scope.foes, Power.high, Damage.none, Attack.phy),
Skill(debuffs=[Effect(Scope.my_self, Endurance.mag, 0.7, 15)]),
Skill(adj_buffs=[Effect(Scope.my_self, AdjBuff.clear_debuff, 0, 0)]),
Skill(buffs=[Effect(Scope.my_self, Ability.str, 0.20, 3)]),
],
passive_skills=[Skill(debuffs=[Effect(Scope.my_self, Endurance.light, 0.7)])],
init_skill=Skill(debuffs=[Effect(Scope.my_self, Endurance.mag, 0.7, 15)], idx="init"),
),
])
ranker = Ranker()
boss1 = boss_cards.get("紅髮怪人")
enemy_team = Team(1, [boss1.set_steps([
[1, 1], # 1
[1, 1, 1], # 2
[1, 1, 1, 5], # 3
[1, 1, 2], # 4
[1, 1, 4], # 5
[1, 1, 1], # 6
[1, 1, 1, 5], # 7
[1, 1, 2, 3], # 8
[1, 1, 4], # 9
[1, 1, 1], # 10
[1, 1, 1, 5], # 11
[1, 1, 2, 3], # 12
[1, 1, 1], # 13
[1, 1, 1, 1], # 14
1, # 15
])
])
p1 = adv_cards.get("無人島春姬").set_assist(ass_cards.get("溫泉乳神"))
p2 = adv_cards.get("折紙").set_assist(ass_cards.get("奧娜"))
p3 = adv_cards.get("情人艾斯").set_assist(ass_cards.get("洋裝埃伊娜"))
p4 = adv_cards.get("偶像莉涅").set_one_shot().set_assist(ass_cards.get("新娘乳神"))
p5 = adv_cards.get("18").set_one_shot().set_assist(ass_cards.get("情人埃伊娜"))
p6 = adv_cards.get("春姬").set_assist(ass_cards.get("劇場乳神"))
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
my_team = Team(4, [p1.set_steps([1, 2, 3, 4, 3, 2, 3, 3, 3, 2, 3, 2, 4, 3, 3]),
p2.set_steps([2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1]),
p3.set_steps([3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1]),
p4.set_steps([2]),
p5.set_steps([x, 1]),
p6.set_steps([x, x, 1, 3, 3, 1, 3, 2, 3, 1, 3, 2, 3, 3, 1]),
]
)
battle = BattleStage(15)
battle.set_player_team(my_team).set_enemy_team(enemy_team)
battle.run()
rank = ranker.add(battle)
ranker.report(rank=rank, detail=True)
# ranker.report(limit=1, detail=False)
# ranker.report(rank=rank, detail=False)
# ranker.report()
|
py
|
1a5d3e9b2f509a5579b14f12d7c92cf56010c7d4
|
# Generated by Django 2.2.3 on 2019-07-24 12:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0003_auto_20190724_1411'),
]
operations = [
migrations.AddField(
model_name='post',
name='content',
field=models.TextField(default='Co chcesz napisać?'),
),
migrations.AlterField(
model_name='post',
name='overview',
field=models.TextField(max_length=300),
),
]
|
py
|
1a5d3ef6897abbdec7ac2d3d5707c1feca9e10bc
|
from google.appengine.ext import ndb
from user import User
class Comment(ndb.Model):
"""comment info"""
post_id = ndb.IntegerProperty(required = True)
content = ndb.StringProperty(required = True)
created = ndb.DateTimeProperty(auto_now_add = True)
author = ndb.StructuredProperty(User)
|
py
|
1a5d3f01772774eebb6a3075da4a5062d39c7de7
|
class RNAParams(object):
def __init__(self, args):
self.rna = args.rna
|
py
|
1a5d3f0f2caae6aa79bd51afc46653daea445887
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'passport_api.settings')
application = get_wsgi_application()
|
py
|
1a5d3f6bef3b1e3a1d33bfba1f56655309548dd5
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5c!5oa%m=_2e&1n*#odl6pzzt(s2^9tjg8x&(+fr*m3jm(!u%h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
py
|
1a5d3fb6ef687f3c49e025579145c0343c353f2f
|
import os
import shutil
import locale
from time import sleep
user = os.environ['USERPROFILE']
language = locale.getdefaultlocale()[0][0:2]
activeTime = 8.0
print(language)
def extension_type(event):
return event.src_path[event.src_path.rindex('.') + 1:]
def is_text_file(event):
if extension_type(event) == 'txt':
return True
return False
def is_pdf_file(event):
if extension_type(event) == 'pdf':
return True
return False
def is_mp3_file(event):
if extension_type(event) in ('mp3', "wav", "m4a", "flac", "aiff", "ogg"):
return True
return False
def is_image_file(event):
if extension_type(event) in ('png', 'jpg', 'jpeg', 'bmp', 'gif', 'raw', 'ico'):
return True
return False
def is_video_file(event):
if extension_type(event) in ('mov', 'mp4', 'avi', 'flv'):
return True
return False
def is_doc_file(event):
if extension_type(event) in ('doc', 'docx'):
return True
return False
def is_spreadsheet_file(event):
if extension_type(event) in ('xls', 'xlsx'):
return True
return False
def is_presentation_file(event):
if extension_type(event) in ('ppt', 'pptx'):
return True
return False
def is_compacted_file(event):
if extension_type(event) in ('rar', 'zip', '7z', 'iso'):
return True
return False
def is_code_file(event):
if extension_type(event) in ('py', "jl", 'cs', 'js', 'php', 'html', 'sql', 'css', 'c', 'h', 'cpp', 'java', 'asp', 'aspx', 'axd', 'asx', 'asmx', 'ashx', 'cfm', 'yaws', 'swf', 'htm', 'xhtml', 'jhtml', "jsp", "jspx", "wss", "do", "cmd", "action", "pl", "phtml", "php3", "php4", "rb", "rhtml", "shtml", "rss", "svg", ):
return True
return False
def is_invoice_file(event):
if extension_type(event) in ('xml'):
return True
return False
def is_executable_file(event):
if extension_type(event) in ('exe', 'msi', 'run', 'deb'):
return True
return False
def make_folder(foldername):
os.chdir('{}\\Downloads'.format(user))
if os.path.exists(foldername) == True:
if language == 'en':
print('\nDestine folder already exists, skipping creation')
elif language == 'pt':
print('\nA pasta destino já existe, pulando criação')
return os.getcwd() + os.sep + str(foldername)
else:
os.mkdir(str(foldername))
return os.getcwd() + os.sep + str(foldername)
def move_to_new_corresponding_folder(event, path_to_new_folder):
try:
if language == 'en':
print('\nMoving file in {} seconds...'.format(activeTime))
sleep(activeTime / 2)
print('\nMoving file in {} seconds...'.format(activeTime / 2))
sleep(activeTime / 2)
shutil.move(event.src_path, path_to_new_folder)
print('\nFile moved successfully')
elif language == 'pt':
print('\nMovendo arquivo em {} segundos...'.format(activeTime))
sleep(activeTime / 2)
print('Movendo o arquivo em {} segundos...'.format(activeTime / 2))
sleep(activeTime / 2)
shutil.move(event.src_path, path_to_new_folder)
print('\nArquivo movido com sucesso')
except:
if language == 'en':
print('\nFile already exists in destination folder')
elif language == 'pt':
print('\nO arquivo já existe na pasta de destino')
pass
|
py
|
1a5d3fcc513d3832f68b2c5ad7b4a20ce24a9d42
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import LiveScenarioTest, ScenarioTest, ResourceGroupPreparer, record_only
from knack.util import CLIError
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
class TestMonitorAutoscaleScenario(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale')
def test_monitor_autoscale_basic(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1'
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testadmin --admin-password TestTest12#$')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --count 3', checks=[
self.check('profiles[0].capacity.default', 3),
self.check('profiles[0].capacity.minimum', 3),
self.check('profiles[0].capacity.maximum', 3)
])
self.cmd('monitor autoscale list -g {rg}',
checks=self.check('length(@)', 1))
self.cmd('monitor autoscale show -g {rg} -n {vmss}')
# verify that count behaves correctly
self.cmd('monitor autoscale update -g {rg} -n {vmss} --count 2', checks=[
self.check('profiles[0].capacity.default', 2),
self.check('profiles[0].capacity.minimum', 2),
self.check('profiles[0].capacity.maximum', 2)
])
self.cmd('monitor autoscale update -g {rg} -n {vmss} --min-count 1 --count 2 --max-count 4', checks=[
self.check('profiles[0].capacity.default', 2),
self.check('profiles[0].capacity.minimum', 1),
self.check('profiles[0].capacity.maximum', 4)
])
self.cmd('monitor autoscale update -g {rg} -n {vmss} --max-count 5', checks=[
self.check('profiles[0].capacity.default', 2),
self.check('profiles[0].capacity.minimum', 1),
self.check('profiles[0].capacity.maximum', 5)
])
self.cmd('monitor autoscale delete -g {rg} -n {vmss}')
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_rules')
def test_monitor_autoscale_rules(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1'
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testadmin --admin-password TestTest12#$')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --min-count 1 --count 3 --max-count 5')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}')
self.cmd('monitor autoscale rule create -g {rg} --autoscale-name {vmss} --condition "Percentage CPU > 75 avg 5m" --scale to 5', checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 75),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '5')
])
self.cmd('monitor autoscale rule create -g {rg} --autoscale-name {vmss} --timegrain "avg 5m" --condition "Percentage CPU < 30 avg 10m" --scale in 50% --cooldown 10', checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'LessThan'),
self.check('metricTrigger.threshold', 30),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT10M'),
self.check('metricTrigger.timeGrain', 'PT5M'),
self.check('scaleAction.cooldown', 'PT10M'),
self.check('scaleAction.direction', 'Decrease'),
self.check('scaleAction.type', 'PercentChangeCount'),
self.check('scaleAction.value', '50')
])
self.cmd('monitor autoscale rule create -g {rg} --autoscale-name {vmss} --timegrain "min 1m" --condition "Percentage CPU < 10 avg 5m" --scale to 1', checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'LessThan'),
self.check('metricTrigger.threshold', 10),
self.check('metricTrigger.statistic', 'Min'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '1')
])
# verify order is stable
list_1 = self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}').get_output_in_json()
with self.assertRaisesRegex(CLIError, 'Please double check the name of the autoscale profile.'):
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name falseprofile')
list_2 = self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}').get_output_in_json()
self.assertTrue(len(list_1) == 3 and len(list_2) == 3)
for x in range(len(list_1)):
self.assertTrue(list_1[x] == list_2[x])
# verify copy works
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n test2 --start 2018-03-01 --end 2018-04-01 --min-count 1 --count 3 --max-count 5 --timezone "Pacific Standard Time"')
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n test3 --start 2018-05-01 --end 2018-06-01 --min-count 1 --count 2 --max-count 5 --timezone "Pacific Standard Time"')
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n test1 --start 2018-01-01 --end 2018-02-01 --min-count 1 --count 2 --max-count 5 --timezone "Pacific Standard Time" --copy-rules default')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test1',
checks=self.check('length(@)', 3))
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test2',
checks=self.check('length(@)', 0))
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test3',
checks=self.check('length(@)', 0))
self.cmd('monitor autoscale rule copy -g {rg} --autoscale-name {vmss} --source-schedule test1 --dest-schedule test2 --index "*"')
self.cmd('monitor autoscale rule copy -g {rg} --autoscale-name {vmss} --source-schedule test2 --dest-schedule test3 --index 0')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test2',
checks=self.check('length(@)', 3))
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss} --profile-name test3',
checks=self.check('length(@)', 1))
# verify rule removal by index and remove all works
self.cmd('monitor autoscale rule delete -g {rg} --autoscale-name {vmss} --index 2')
list_3 = self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}').get_output_in_json()
self.assertTrue(len(list_3) == 2)
self.cmd('monitor autoscale rule delete -g {rg} --autoscale-name {vmss} --index "*"')
list_4 = self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}').get_output_in_json()
self.assertTrue(len(list_4) == 0)
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_rule_with_dimensions')
def test_monitor_autoscale_rule_with_dimensions(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1'
})
self.cmd(
'vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testadmin --admin-password TestTest12#$ --instance-count 2')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --min-count 1 --count 3 --max-count 5')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}')
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {vmss} --condition "\'Mynamespace.abcd\' Percentage CPU > 75 avg 5m where VMName == cliname1 or cliname2" --scale to 5',
checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 75),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'VMName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'cliname1'),
self.check('metricTrigger.dimensions[0].values[1]', 'cliname2'),
self.check('metricTrigger.metricNamespace', 'Mynamespace.abcd'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '5')
])
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {vmss} --condition "\'Mynamespace.abcd\' Percentage CPU > 75 avg 5m where VMName == cliname1 or cliname2" --scale to 5',
checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 75),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'VMName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'cliname1'),
self.check('metricTrigger.dimensions[0].values[1]', 'cliname2'),
self.check('metricTrigger.metricNamespace', 'Mynamespace.abcd'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '5')
])
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {vmss} --condition "\'Mynamespace.abcd\' Percentage CPU > 75 avg 5m where VMName == cliname1 or cliname2" --scale to 5',
checks=[
self.check('metricTrigger.metricName', 'Percentage CPU'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 75),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT5M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'VMName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', 'cliname1'),
self.check('metricTrigger.dimensions[0].values[1]', 'cliname2'),
self.check('metricTrigger.metricNamespace', 'Mynamespace.abcd'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'None'),
self.check('scaleAction.type', 'ExactCount'),
self.check('scaleAction.value', '5')
])
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {vmss}', checks=[
self.check('length(@)', 3)
])
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_fixed')
def test_monitor_autoscale_fixed(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'sched': 'Christmas'
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testadmin --admin-password TestTest12#$')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --count 3')
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n {sched} --start 2018-12-24 --end 2018-12-26 --count 5 --timezone "Pacific Standard Time"', checks=[
self.check('capacity.default', 5),
self.check('capacity.minimum', 5),
self.check('capacity.maximum', 5),
self.check('fixedDate.end', '2018-12-26T00:00:00+00:00'),
self.check('fixedDate.start', '2018-12-24T00:00:00+00:00'),
self.check('fixedDate.timeZone', 'Pacific Standard Time'),
self.check('recurrence', None)
])
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 2))
self.cmd('monitor autoscale profile delete -g {rg} --autoscale-name {vmss} -n {sched}')
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_recurring')
def test_monitor_autoscale_recurring(self, resource_group):
import json
import time
sleep_time = 3
self.kwargs.update({
'vmss': 'vmss1'
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --admin-username testname --admin-password TestTest12#$')
self.kwargs['vmss_id'] = self.cmd('vmss show -g {rg} -n {vmss}').get_output_in_json()['id']
self.cmd('monitor autoscale create --resource {vmss_id} --count 3')
time.sleep(sleep_time)
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n weekend --recurrence week sat sun --count 1 --timezone "Pacific Standard Time"')
time.sleep(sleep_time)
self.cmd('monitor autoscale profile create -g {rg} --autoscale-name {vmss} -n weekday --recurrence week mo tu we th fr --count 4 --timezone "Pacific Standard Time"')
time.sleep(sleep_time)
# 2 profiles + 2 "default" profiles + default "default" profile
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 5))
# should update all "default" profiles
value = 4
self.cmd('monitor autoscale update -g {{rg}} -n {{vmss}} --count {}'.format(value))
time.sleep(sleep_time)
schedules = self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}').get_output_in_json()
def _is_default(val):
if not val['fixedDate'] and not val['recurrence']:
return True
try:
json.loads(val['name'])
return True
except ValueError:
return False
for schedule in [x for x in schedules if _is_default(x)]:
self.assertTrue(int(schedule['capacity']['default']) == value)
self.assertTrue(int(schedule['capacity']['minimum']) == value)
self.assertTrue(int(schedule['capacity']['maximum']) == value)
# should delete the weekend profile and its matching default
self.cmd('monitor autoscale profile delete -g {rg} --autoscale-name {vmss} -n weekend')
time.sleep(sleep_time)
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 3))
# should delete the weekday profile and its matching default
self.cmd('monitor autoscale profile delete -g {rg} --autoscale-name {vmss} -n weekday')
time.sleep(sleep_time)
self.cmd('monitor autoscale profile list -g {rg} --autoscale-name {vmss}',
checks=self.check('length(@)', 1))
# inexplicably fails on CI so making into a live test
class TestMonitorAutoscaleTimezones(LiveScenarioTest):
def test_monitor_autoscale_timezones(self):
self.cmd('monitor autoscale profile list-timezones',
checks=self.check('length(@)', 136))
self.cmd('monitor autoscale profile list-timezones -q pacific',
checks=self.check('length(@)', 6))
self.cmd('monitor autoscale profile list-timezones --offset +12',
checks=self.check('length(@)', 6))
self.cmd('monitor autoscale profile list-timezones -q pacific --offset -4',
checks=self.check('length(@)', 1))
class TestMonitorAutoscaleComplexRules(LiveScenarioTest):
def setUp(self):
super(TestMonitorAutoscaleComplexRules, self).setUp()
self.cmd('extension add -n spring-cloud')
def tearDown(self):
self.cmd('extension remove -n spring-cloud')
super(TestMonitorAutoscaleComplexRules, self).tearDown()
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_monitor_autoscale_rule_for_spring_cloud', location='westus2')
def test_monitor_autoscale_rule_for_spring_cloud(self, resource_group):
self.kwargs.update({
'sc': self.create_random_name('clitestsc', 15),
'rg': resource_group,
'scapp': 'app1',
'gitrepo': 'https://github.com/Azure-Samples/piggymetrics-config',
})
self.cmd('spring-cloud create -n {sc} -g {rg}')
self.cmd('spring-cloud config-server git set -n {sc} -g {rg} --uri {gitrepo}')
self.kwargs['deployment_id'] = self.cmd('spring-cloud app create -n {scapp} -s {sc} -g {rg}').get_output_in_json()['properties']['activeDeployment']['id']
self.cmd('monitor autoscale create -g {rg} --resource {deployment_id} --min-count 1 --count 1 --max-count 3')
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {sc}')
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {sc} --condition "tomcat.global.request.total.count > 0 avg 3m where AppName == {scapp} and Deployment == default" --scale out 1',
checks=[
self.check('metricTrigger.metricName', 'tomcat.global.request.total.count'),
self.check('metricTrigger.metricNamespace', 'Microsoft.AppPlatform/Spring'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 0),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT3M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'AppName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', self.kwargs['scapp']),
self.check('metricTrigger.dimensions[1].dimensionName', 'Deployment'),
self.check('metricTrigger.dimensions[1].operator', 'Equals'),
self.check('metricTrigger.dimensions[1].values[0]', 'default'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'Increase'),
self.check('scaleAction.type', 'ChangeCount'),
self.check('scaleAction.value', '1')
])
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {sc} --condition "tomcat.global.request.total.count > 0 avg 3m where AppName == {scapp} and Deployment == default" --scale out 1',
checks=[
self.check('metricTrigger.metricName', 'tomcat.global.request.total.count'),
self.check('metricTrigger.metricNamespace', 'Microsoft.AppPlatform/Spring'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 0),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT3M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'AppName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', self.kwargs['scapp']),
self.check('metricTrigger.dimensions[1].dimensionName', 'Deployment'),
self.check('metricTrigger.dimensions[1].operator', 'Equals'),
self.check('metricTrigger.dimensions[1].values[0]', 'default'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'Increase'),
self.check('scaleAction.type', 'ChangeCount'),
self.check('scaleAction.value', '1')
])
self.cmd(
'monitor autoscale rule create -g {rg} --autoscale-name {sc} --condition "tomcat.global.request.total.count > 0 avg 3m where AppName == {scapp} and Deployment == default" --scale out 1',
checks=[
self.check('metricTrigger.metricName', 'tomcat.global.request.total.count'),
self.check('metricTrigger.metricNamespace', 'Microsoft.AppPlatform/Spring'),
self.check('metricTrigger.operator', 'GreaterThan'),
self.check('metricTrigger.threshold', 0),
self.check('metricTrigger.statistic', 'Average'),
self.check('metricTrigger.timeAggregation', 'Average'),
self.check('metricTrigger.timeWindow', 'PT3M'),
self.check('metricTrigger.timeGrain', 'PT1M'),
self.check('metricTrigger.dimensions[0].dimensionName', 'AppName'),
self.check('metricTrigger.dimensions[0].operator', 'Equals'),
self.check('metricTrigger.dimensions[0].values[0]', self.kwargs['scapp']),
self.check('metricTrigger.dimensions[1].dimensionName', 'Deployment'),
self.check('metricTrigger.dimensions[1].operator', 'Equals'),
self.check('metricTrigger.dimensions[1].values[0]', 'default'),
self.check('scaleAction.cooldown', 'PT5M'),
self.check('scaleAction.direction', 'Increase'),
self.check('scaleAction.type', 'ChangeCount'),
self.check('scaleAction.value', '1')
])
self.cmd('monitor autoscale rule list -g {rg} --autoscale-name {sc}', checks=[
self.check('length(@)', 3)
])
|
py
|
1a5d401c35d6d3c1dd171a7db9d0e94831d2ec9f
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='ScreenshotFormat',
version='1',
packages=setuptools.find_packages(),
author="Gomes Alexis",
author_email="[email protected]",
description="Python package to help create screenshot to upload on stores",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AlexisGomes/ScreenshotFormat",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
py
|
1a5d4032a1db46c723e7c05425fe51505e82fbd9
|
# -*- coding: utf-8 -*-
#
# MEOPAR MIDOSS project documentation Sphinx builder configuration file.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import datetime
# -- Project information -----------------------------------------------------
project = 'MEOPAR MIDOSS Project Docs'
author = (
'The MIDOSS Project Contributors, '
'the University of British Columbia, '
'and Dalhousie University')
copyright_years = (
"2018"
if datetime.date.today().year == 2018
else f"2018-{datetime.date.today():%Y}"
)
copyright = f"{copyright_years}, {author}"
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
]
intersphinx_mapping = {
"salishseanowcast": ("https://salishsea-nowcast.readthedocs.io/en/latest/", None),
}
# Private GitHub repositories that linkcheck will ignore
linkcheck_ignore = [
'https://github.com/MIDOSS/MIDOSS-MOHID-CODE',
'https://github.com/MIDOSS/MIDOSS-MOHID-grid',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/MEOPAR_favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
|
py
|
1a5d40afc123003a9edf13dfda027503cc7f8aef
|
import numpy as np
from sklearn.metrics import r2_score
from sklearn.datasets import load_svmlight_file
from sklearn.linear_model import Ridge
from train.build_model import *
np.random.seed(1337)
x, y = load_svmlight_file('data/reg_big.data')
x = np.asarray(x.todense())
tri, tei = split_testing_data_r(y)
xtr = x[tri]
ytr = y[tri]
xte = x[tei]
yte = y[tei]
alp = 1000
m = Ridge(alpha=alp)
m.fit(xtr, ytr)
r2_train = r2_score(ytr, m.predict(xtr))
r2_test = r2_score(yte, m.predict(xte))
print('Traing R2 Score: {0}'.format(np.round(r2_train, 5)))
print('Testing R2 Score: {0}'.format(np.round(r2_test, 5)))
|
py
|
1a5d4119fe54218a65ea9939b2ff3e1712ed14f1
|
#!/usr/bin/env python
from tokio.cli.archive_lmtdb import main
if __name__ == "__main__":
main()
|
py
|
1a5d414e9a9e3620343f38c0e853e98b252fd38c
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import random
from smarts.core.utils.logging import surpress_stdout
import subprocess
import time
from typing import List, Sequence
import numpy as np
from shapely.geometry import Polygon, box as shapely_box
from shapely.affinity import rotate as shapely_rotate
import traci.constants as tc
from traci.exceptions import FatalTraCIError, TraCIException
from smarts.core import gen_id
from .colors import SceneColors
from .coordinates import Heading, Pose
from .provider import ProviderState, ProviderTLS, ProviderTrafficLight
from .vehicle import VEHICLE_CONFIGS, VehicleState
# We need to import .utils.sumo before we can use traci
from .utils.sumo import SUMO_PATH, traci
from .utils import networking
class SumoTrafficSimulation:
"""
Args:
net_file:
path to sumo .net.xml file
headless:
False to run with `sumo-gui`. True to run with `sumo`
time_resolution:
SUMO simulation is descretized into steps of `time_resolution` seconds
WARNING:
Since our interface(TRACI) to SUMO is delayed by one simulation step,
setting a higher time resolution may lead to unexpected artifacts
"""
def __init__(
self,
headless=True,
time_resolution=0.1,
num_clients=1,
num_external_sumo_clients=0,
sumo_port=None,
auto_start=True,
endless_traffic=True,
debug=True,
):
self._log = logging.getLogger(self.__class__.__name__)
self._debug = debug
self._scenario = None
self._log_file = None
self._time_resolution = time_resolution
self._headless = headless
self._cumulative_sim_seconds = 0
self._non_sumo_vehicle_ids = set()
self._sumo_vehicle_ids = set()
self._is_setup = False
self._last_trigger_time = -1000000
self._num_dynamic_ids_used = 0
self._traci_conn = None
self._sumo_proc = None
self._num_clients = 1 + num_external_sumo_clients
self._sumo_port = sumo_port
self._auto_start = auto_start
self._endless_traffic = endless_traffic
self._to_be_teleported = dict()
self._reserved_areas = dict()
def __repr__(self):
return f"""SumoTrafficSim(
_scenario={repr(self._scenario)},
_time_resolution={self._time_resolution},
_headless={self._headless},
_cumulative_sim_seconds={self._cumulative_sim_seconds},
_non_sumo_vehicle_ids={self._non_sumo_vehicle_ids},
_sumo_vehicle_ids={self._sumo_vehicle_ids},
_is_setup={self._is_setup},
_last_trigger_time={self._last_trigger_time},
_num_dynamic_ids_used={self._num_dynamic_ids_used},
_traci_conn={repr(self._traci_conn)}
)"""
def __str__(self):
return repr(self)
def _initialize_traci_conn(self, num_retries=5):
# TODO: inline sumo or process pool
# the retries are to deal with port collisions
# since the way we start sumo here has a race condition on
# each spawned process claiming a port
for _ in range(num_retries):
self._close_traci_and_pipes()
sumo_port = self._sumo_port
if sumo_port is None:
sumo_port = networking.find_free_port()
sumo_binary = "sumo" if self._headless else "sumo-gui"
sumo_cmd = [
os.path.join(SUMO_PATH, "bin", sumo_binary),
"--remote-port=%s" % sumo_port,
*self._base_sumo_load_params(),
]
self._log.debug("Starting sumo process:\n\t %s", sumo_cmd)
self._sumo_proc = subprocess.Popen(
sumo_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
time.sleep(0.05) # give SUMO time to start
try:
with surpress_stdout():
self._traci_conn = traci.connect(
sumo_port,
numRetries=100,
proc=self._sumo_proc,
waitBetweenRetries=0.05,
) # SUMO must be ready within 5 seconds
try:
assert (
self._traci_conn.getVersion()[0] >= 20
), "TraCI API version must be >= 20 (SUMO 1.5.0)"
# We will retry since this is our first sumo command
except FatalTraCIError:
logging.debug("Connection closed. Retrying...")
self._close_traci_and_pipes()
continue
except ConnectionRefusedError:
logging.debug(
"Connection refused. Tried to connect to unpaired TraCI client."
)
self._close_traci_and_pipes()
continue
# It is mandatory to set order when using multiple clients.
self._traci_conn.setOrder(0)
break
try:
self._traci_conn.getVersion()
except Exception as e:
logging.error(
f"""Failed to initialize SUMO
Your scenario might not be configured correctly or
you were trying to initialize many SUMO instances at
once and we were not able to assign unique port
numbers to all SUMO processes.
Check {self._log_file} for hints"""
)
raise e
self._log.debug("Finished starting sumo process")
def _base_sumo_load_params(self):
load_params = [
"--num-clients=%d" % self._num_clients,
"--net-file=%s" % self._scenario.net_filepath,
"--quit-on-end",
"--log=%s" % self._log_file,
"--error-log=%s" % self._log_file,
"--no-step-log",
"--no-warnings=1",
"--seed=%s" % random.randint(0, 2147483648),
"--time-to-teleport=%s" % -1,
"--collision.check-junctions=true",
"--collision.action=none",
"--lanechange.duration=3.0",
# TODO: 1--lanechange.duration1 or 1--lateral-resolution`, in combination with `route_id`,
# causes lane change crashes as of SUMO 1.6.0.
# Controlling vehicles that have been added to the simulation with a route causes
# lane change related crashes.
# "--lateral-resolution=100", # smooth lane changes
"--step-length=%f" % self._time_resolution,
"--default.action-step-length=%f" % self._time_resolution,
"--begin=0", # start simulation at time=0
"--end=31536000", # keep the simulation running for a year
]
if self._auto_start:
load_params.append("--start")
if self._scenario.route_files_enabled:
load_params.append("--route-files={}".format(self._scenario.route_filepath))
return load_params
def setup(self, scenario) -> ProviderState:
self._log.debug("Setting up SumoTrafficSim %s" % self)
assert not self._is_setup, (
"Can't setup twice, %s, see teardown()" % self._is_setup
)
# restart sumo process only when map file changes
if self._scenario and self._scenario.net_file_hash == scenario.net_file_hash:
restart_sumo = False
else:
restart_sumo = True
self._scenario = scenario
self._log_file = scenario.unique_sumo_log_file()
if restart_sumo:
self._initialize_traci_conn()
else:
self._traci_conn.load(self._base_sumo_load_params())
assert self._traci_conn is not None, "No active traci conn"
self._traci_conn.simulation.subscribe(
[tc.VAR_DEPARTED_VEHICLES_IDS, tc.VAR_ARRIVED_VEHICLES_IDS]
)
for tls_id in self._traci_conn.trafficlight.getIDList():
self._traci_conn.trafficlight.subscribe(
tls_id, [tc.TL_RED_YELLOW_GREEN_STATE, tc.TL_CONTROLLED_LINKS]
)
# XXX: SUMO caches the previous subscription results. Calling `simulationStep`
# effectively flushes the results. We need to use epsilon instead of zero
# as zero will step according to a default (non-zero) step-size.
self.step({}, 1e-6, 0)
self._is_setup = True
return self._compute_provider_state()
def _close_traci_and_pipes(self):
if self._sumo_proc:
self._sumo_proc.stdin.close()
self._sumo_proc.stdout.close()
self._sumo_proc.stderr.close()
self._sumo_proc = None
if self._traci_conn:
self._traci_conn.close()
self._traci_conn = None
def teardown(self):
self._log.debug("Tearing down SUMO traffic sim %s" % self)
if not self._is_setup:
self._log.debug("Nothing to teardown")
return
assert self._is_setup
self._cumulative_sim_seconds = 0
self._non_sumo_vehicle_ids = set()
self._sumo_vehicle_ids = set()
self._is_setup = False
self._num_dynamic_ids_used = 0
self._to_be_teleported = dict()
self._reserved_areas = dict()
@property
def action_spaces(self):
# Unify interfaces with other providers
return {}
def reset(self):
# Unify interfaces with other providers
pass
def step(self, provider_actions, dt, elapsed_sim_time) -> ProviderState:
"""
Args:
dt: time (in seconds) to simulate during this simulation step
managed_vehicles: dict of {vehicle_id: (x, y, heading)}
!! The vehicle state should represent the state of the
!! vehicles at the start of the current simulation step
Returns:
ProviderState representing the state of the SUMO simulation
"""
# we tell SUMO to step through dt more seconds of the simulation
self._cumulative_sim_seconds += dt
self._traci_conn.simulationStep(self._cumulative_sim_seconds)
return self._compute_provider_state()
def sync(self, provider_state: ProviderState):
provider_vehicles = {v.vehicle_id: v for v in provider_state.vehicles}
external_vehicles = [v for v in provider_state.vehicles if v.source != "SUMO"]
external_vehicle_ids = {v.vehicle_id for v in external_vehicles}
# Represents current state
traffic_vehicle_states = self._traci_conn.vehicle.getAllSubscriptionResults()
traffic_vehicle_ids = set(traffic_vehicle_states)
# State / ownership changes
external_vehicles_that_have_left = (
self._non_sumo_vehicle_ids - external_vehicle_ids - traffic_vehicle_ids
)
external_vehicles_that_have_joined = (
external_vehicle_ids - self._non_sumo_vehicle_ids - traffic_vehicle_ids
)
vehicles_that_have_become_external = (
traffic_vehicle_ids & external_vehicle_ids - self._non_sumo_vehicle_ids
)
# XXX: They may have become internal because they've been relinquished or
# because they've been destroyed from a collision. Presently we're not
# differentiating and will take over as social vehicles regardless.
vehicles_that_have_become_internal = (
self._non_sumo_vehicle_ids - external_vehicle_ids
) & traffic_vehicle_ids
log = ""
if external_vehicles_that_have_left:
log += (
f"external_vehicles_that_have_left={external_vehicles_that_have_left}\n"
)
if external_vehicles_that_have_joined:
log += f"external_vehicles_that_have_joined={external_vehicles_that_have_joined}\n"
if vehicles_that_have_become_external:
log += f"vehicles_that_have_become_external={vehicles_that_have_become_external}\n"
if vehicles_that_have_become_internal:
log += f"vehicles_that_have_become_internal={vehicles_that_have_become_internal}\n"
if log:
self._log.debug(log)
for vehicle_id in external_vehicles_that_have_left:
self._log.debug("Non SUMO vehicle %s left simulation", vehicle_id)
self._non_sumo_vehicle_ids.remove(vehicle_id)
self._traci_conn.vehicle.remove(vehicle_id)
for vehicle_id in external_vehicles_that_have_joined:
dimensions = provider_vehicles[vehicle_id].dimensions
self._create_vehicle(vehicle_id, dimensions)
# update the state of all current managed vehicles
for vehicle_id in self._non_sumo_vehicle_ids:
provider_vehicle = provider_vehicles[vehicle_id]
pos, sumo_heading = provider_vehicle.pose.as_sumo(
provider_vehicle.dimensions.length, Heading(0)
)
# See https://sumo.dlr.de/docs/TraCI/Change_Vehicle_State.html#move_to_xy_0xb4
# for flag values
try:
self._move_vehicle(
provider_vehicle.vehicle_id,
pos,
sumo_heading,
provider_vehicle.speed,
)
except TraCIException as e:
# Likely as a result of https://github.com/eclipse/sumo/issues/3993
# the vehicle got removed because we skipped a moveToXY call between
# internal stepSimulations, so we add the vehicle back here.
self._log.warning(
"Attempted to (TraCI) SUMO.moveToXY(...) on missing "
f"vehicle(id={vehicle_id})"
)
self._create_vehicle(vehicle_id, provider_vehicle.dimensions)
self._move_vehicle(
provider_vehicle.vehicle_id,
pos,
sumo_heading,
provider_vehicle.speed,
)
for vehicle_id in vehicles_that_have_become_external:
self._traci_conn.vehicle.setColor(
vehicle_id, SumoTrafficSimulation._social_agent_vehicle_color()
)
self._non_sumo_vehicle_ids.add(vehicle_id)
for vehicle_id in vehicles_that_have_become_internal:
self._traci_conn.vehicle.setColor(
vehicle_id, SumoTrafficSimulation._social_vehicle_color()
)
self._non_sumo_vehicle_ids.remove(vehicle_id)
# Let sumo take over speed again
self._traci_conn.vehicle.setSpeed(vehicle_id, -1)
if self._endless_traffic:
self._reroute_vehicles(traffic_vehicle_states)
self._teleport_exited_vehicles()
@staticmethod
def _ego_agent_vehicle_color():
return np.array(SceneColors.Agent.value[:3]) * 255
@staticmethod
def _social_agent_vehicle_color():
return np.array(SceneColors.SocialAgent.value[:3]) * 255
@staticmethod
def _social_vehicle_color():
return np.array(SceneColors.SocialVehicle.value[:3]) * 255
def _move_vehicle(self, vehicle_id, position, heading, speed):
x, y, _ = position
self._traci_conn.vehicle.moveToXY(
vehID=vehicle_id,
edgeID="", # let sumo choose the edge
lane=-1, # let sumo choose the lane
x=x,
y=y,
angle=heading, # only used for visualizing in sumo-gui
keepRoute=0,
)
self._traci_conn.vehicle.setSpeed(vehicle_id, speed)
def _create_vehicle(self, vehicle_id, dimensions):
assert (
type(vehicle_id) == str
), f"SUMO expects string ids: {vehicle_id} is a {type(vehicle_id)}"
self._log.debug("Non SUMO vehicle %s joined simulation", vehicle_id)
self._non_sumo_vehicle_ids.add(vehicle_id)
self._traci_conn.vehicle.add(
vehID=vehicle_id,
routeID="", # we don't care which route this vehicle is on
)
# TODO: Vehicle Id should not be using prefixes this way
if vehicle_id.startswith("social-agent"):
# This is based on ID convention
vehicle_color = SumoTrafficSimulation._social_agent_vehicle_color()
else:
vehicle_color = SumoTrafficSimulation._ego_agent_vehicle_color()
self._traci_conn.vehicle.setColor(vehicle_id, vehicle_color)
# Directly below are two of the main factors that affect vehicle secure gap for
# purposes of determining the safety gaps that SUMO vehicles will abide by. The
# remaining large factor is vehicle speed.
# See:
# http://sumo-user-mailing-list.90755.n8.nabble.com/sumo-user-Questions-on-SUMO-Built-In-Functions-getSecureGap-amp-brakeGap-td3254.html
# Set the controlled vehicle's time headway in seconds
self._traci_conn.vehicle.setTau(vehicle_id, 4)
# Set the controlled vehicle's maximum natural deceleration in m/s
self._traci_conn.vehicle.setDecel(vehicle_id, 6)
# setup the vehicle size
self._traci_conn.vehicle.setLength(vehicle_id, dimensions.length)
self._traci_conn.vehicle.setWidth(vehicle_id, dimensions.width)
self._traci_conn.vehicle.setHeight(vehicle_id, dimensions.height)
def _compute_provider_state(self) -> ProviderState:
return ProviderState(
vehicles=self._compute_traffic_vehicles(),
traffic_light_systems=self._compute_traffic_lights(),
)
def _compute_traffic_vehicles(self) -> List[VehicleState]:
sub_results = self._traci_conn.simulation.getSubscriptionResults()
if sub_results is None or sub_results == {}:
return {}
# New social vehicles that have entered the map
newly_departed_sumo_traffic = [
vehicle_id
for vehicle_id in sub_results[tc.VAR_DEPARTED_VEHICLES_IDS]
if vehicle_id not in self._non_sumo_vehicle_ids
]
reserved_areas = [position for position in self._reserved_areas.values()]
for vehicle_id in newly_departed_sumo_traffic:
other_vehicle_shape = self._shape_of_vehicle(vehicle_id)
violates_reserved_area = False
for reserved_area in reserved_areas:
if reserved_area.intersects(other_vehicle_shape):
violates_reserved_area = True
break
if violates_reserved_area:
self._traci_conn.vehicle.remove(vehicle_id)
continue
self._log.debug("SUMO vehicle %s entered simulation", vehicle_id)
self._traci_conn.vehicle.subscribe(
vehicle_id,
[
tc.VAR_POSITION,
tc.VAR_ANGLE,
tc.VAR_SPEED,
tc.VAR_VEHICLECLASS,
tc.VAR_ROUTE_INDEX,
tc.VAR_EDGES,
tc.VAR_TYPE,
],
)
# Non-sumo vehicles will show up the step after the sync where the non-sumo vehicle is
# added.
newly_departed_non_sumo_vehicles = [
vehicle_id
for vehicle_id in sub_results[tc.VAR_DEPARTED_VEHICLES_IDS]
if vehicle_id not in newly_departed_sumo_traffic
]
for vehicle_id in newly_departed_non_sumo_vehicles:
if vehicle_id in self._reserved_areas:
del self._reserved_areas[vehicle_id]
sumo_vehicle_state = self._traci_conn.vehicle.getAllSubscriptionResults()
self._sumo_vehicle_ids = (
set(sumo_vehicle_state.keys()) - self._non_sumo_vehicle_ids
)
provider_vehicles = []
# batched conversion of positions to numpy arrays
front_bumper_positions = np.array(
[
sumo_vehicle[tc.VAR_POSITION]
for sumo_vehicle in sumo_vehicle_state.values()
]
).reshape(-1, 2)
for i, (sumo_id, sumo_vehicle) in enumerate(sumo_vehicle_state.items()):
# XXX: We can safely rely on iteration order over dictionaries being
# stable on py3.7.
# See: https://www.python.org/downloads/release/python-370/
# "The insertion-order preservation nature of dict objects is now an
# official part of the Python language spec."
front_bumper_pos = front_bumper_positions[i]
heading = Heading.from_sumo(sumo_vehicle[tc.VAR_ANGLE])
speed = sumo_vehicle[tc.VAR_SPEED]
vehicle_type = sumo_vehicle[tc.VAR_VEHICLECLASS]
dimensions = VEHICLE_CONFIGS[vehicle_type].dimensions
provider_vehicles.append(
VehicleState(
# XXX: In the case of the SUMO traffic provider, the vehicle ID is
# the sumo ID is the actor ID.
vehicle_id=sumo_id,
vehicle_type=vehicle_type,
pose=Pose.from_front_bumper(
front_bumper_pos, heading, dimensions.length
),
dimensions=dimensions,
speed=speed,
source="SUMO",
)
)
return provider_vehicles
def _teleport_exited_vehicles(self):
sub_results = self._traci_conn.simulation.getSubscriptionResults()
if not sub_results:
return
exited_sumo_traffic = [
vehicle_id
for vehicle_id in sub_results[tc.VAR_ARRIVED_VEHICLES_IDS]
if vehicle_id not in self._non_sumo_vehicle_ids
]
for v_id in exited_sumo_traffic:
if v_id in self._to_be_teleported:
route = self._to_be_teleported[v_id]["route"]
type_id = self._to_be_teleported[v_id]["type_id"]
self._teleport_vehicle(v_id, route, 0, type_id)
def _teleport_vehicle(self, vehicle_id, route, lane_offset, type_id):
self._log.debug(
f"Teleporting {vehicle_id} to lane_offset={lane_offset} route={route}"
)
spawn_edge = self._scenario.road_network.graph.getEdge(route[0])
lane_index = random.randint(0, len(spawn_edge.getLanes()) - 1)
self._emit_vehicle_by_route(vehicle_id, route, lane_index, lane_offset, type_id)
def _reroute_vehicles(self, vehicle_states):
for vehicle_id, state in vehicle_states.items():
if vehicle_id not in self._sumo_vehicle_ids:
continue
route_index = state[tc.VAR_ROUTE_INDEX]
route_edges = state[tc.VAR_EDGES]
type_id = state[tc.VAR_TYPE]
if route_index != len(route_edges) - 1:
# The vehicle is not in the last route edge.
continue
# Check if these edges forms a loop.
from_edge = self._scenario.road_network.graph.getEdge(route_edges[-1])
to_edge = self._scenario.road_network.graph.getEdge(route_edges[0])
next_edges = from_edge.getOutgoing().keys()
if to_edge not in next_edges:
# Reroute only if it's loop, otherwise, teleport the vehicle.
self._to_be_teleported[vehicle_id] = {
"route": route_edges,
"type_id": type_id,
}
continue
# The first edge in the list has to be the one that the vehicle
# is in at the moment, which is the last edge in current route_edges.
new_route_edges = route_edges[-1:] + route_edges
self._traci_conn.vehicle.setRoute(vehicle_id, new_route_edges)
def _compute_traffic_lights(self) -> List[ProviderTLS]:
"""TraCI will automatically generate TLS programs if none was specified
according to the net/program. To support this we opt to use TraCI instead
of the sumolib interface for TLS support.
"""
sub_results = self._traci_conn.trafficlight.getSubscriptionResults(None)
tlss = []
if not sub_results:
return tlss
for tls_id in sub_results:
light_states = sub_results[tls_id][tc.TL_RED_YELLOW_GREEN_STATE]
links = sub_results[tls_id][tc.TL_CONTROLLED_LINKS]
traffic_lights = []
for link, state in zip(links, light_states):
lane_start, lane_end, lane_via = [
self._scenario.road_network.lane_by_id(lane) for lane in link[0]
]
traffic_lights.append(
ProviderTrafficLight(
lane_in=lane_start,
lane_via=lane_via,
lane_out=lane_end,
state=state,
)
)
tlss.append(ProviderTLS(tls_id, traffic_lights))
return tlss
def _unique_id(self):
route_id = "hiway_id_%s" % self._num_dynamic_ids_used
self._num_dynamic_ids_used += 1
return route_id
def vehicle_route(self, vehicle_id) -> Sequence[str]:
return self._traci_conn.vehicle.getRoute(vehicle_id)
def reserve_traffic_location_for_vehicle(
self, vehicle_id: str, reserved_location: Polygon,
):
"""Reserve an area around a location where vehicles cannot spawn until a given vehicle
is added.
Args:
vehicle_id: The vehicle to wait for.
reserved_location: The space the vehicle takes up.
"""
self._reserved_areas[vehicle_id] = reserved_location
def remove_traffic_vehicle(self, vehicle_id: str):
self._traci_conn.vehicle.remove(vehicle_id)
self._sumo_vehicle_ids.remove(vehicle_id)
def _shape_of_vehicle(self, vehicle_id):
p = self._traci_conn.vehicle.getPosition(vehicle_id)
length = self._traci_conn.vehicle.getLength(vehicle_id)
width = self._traci_conn.vehicle.getWidth(vehicle_id)
heading = Heading.from_sumo(self._traci_conn.vehicle.getAngle(vehicle_id))
poly = shapely_box(p[0] - width * 0.5, p[1] - length, p[0] + width * 0.5, p[1],)
return shapely_rotate(poly, heading, use_radians=True)
def _emit_vehicle_by_route(
self, vehicle_id, route, lane_index, lane_offset, type_id="DEFAULT_VEHTYPE"
):
route_id = f"route-{gen_id()}"
self._traci_conn.route.add(route_id, route)
self._traci_conn.vehicle.add(
vehicle_id,
route_id,
typeID=type_id,
departPos=lane_offset,
departLane=lane_index,
)
return vehicle_id
def _emit_vehicle_near_position(self, position, vehicle_id=None) -> str:
wp = self._scenario.waypoints.closest_waypoint(position)
lane = self._scenario.road_network.lane_by_id(wp.lane_id)
offset_in_lane = self._scenario.road_network.offset_into_lane(
lane, tuple(wp.pos)
)
if not vehicle_id:
vehicle_id = self._unique_id()
# XXX: Do not give this a route or it will crash on `moveTo` calls
self._traci_conn.vehicle.add(
vehicle_id, "", departPos=offset_in_lane, departLane=wp.lane_index,
)
self._traci_conn.vehicle.moveToXY(
vehID=vehicle_id,
edgeID="", # let sumo choose the edge
lane=-1, # let sumo choose the lane
x=position[0],
y=position[1],
# angle=sumo_heading, # only used for visualizing in sumo-gui
keepRoute=0b000, # On lane
)
return vehicle_id
|
py
|
1a5d4216af5449885ab1d2a3ee2998d80082e602
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pickle
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
ak_Array = ak._v2.highlevel.Array
ak_Record = ak._v2.highlevel.Record
ak_to_buffers = ak._v2.operations.to_buffers
ak_from_buffers = ak._v2.operations.from_buffers
def test_numpyarray():
assert ak_from_buffers(*ak_to_buffers(ak_Array([1, 2, 3, 4, 5]))).tolist() == [
1,
2,
3,
4,
5,
]
assert pickle.loads(pickle.dumps(ak_Array([1, 2, 3, 4, 5]), -1)).tolist() == [
1,
2,
3,
4,
5,
]
def test_listoffsetarray():
assert ak_from_buffers(*ak_to_buffers([[1, 2, 3], [], [4, 5]])).tolist() == [
[1, 2, 3],
[],
[4, 5],
]
assert ak_from_buffers(
*ak_to_buffers(["one", "two", "three", "four", "five"])
).tolist() == ["one", "two", "three", "four", "five"]
assert ak_from_buffers(
*ak_to_buffers([["one", "two", "three"], [], ["four", "five"]])
).tolist() == [["one", "two", "three"], [], ["four", "five"]]
assert pickle.loads(
pickle.dumps(ak_Array([[1, 2, 3], [], [4, 5]]), -1)
).tolist() == [[1, 2, 3], [], [4, 5]]
def test_listarray():
listoffsetarray = ak_Array([[1, 2, 3], [], [4, 5]]).layout
listarray = ak._v2.contents.ListArray(
listoffsetarray.starts, listoffsetarray.stops, listoffsetarray.content
)
assert ak_from_buffers(*ak_to_buffers(listarray)).tolist() == [
[1, 2, 3],
[],
[4, 5],
]
assert pickle.loads(pickle.dumps(ak_Array(listarray), -1)).tolist() == [
[1, 2, 3],
[],
[4, 5],
]
def test_indexedoptionarray():
assert ak_from_buffers(*ak_to_buffers([1, 2, 3, None, None, 5])).tolist() == [
1,
2,
3,
None,
None,
5,
]
assert pickle.loads(
pickle.dumps(ak_Array([1, 2, 3, None, None, 5]), -1)
).tolist() == [1, 2, 3, None, None, 5]
def test_indexedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
index = ak._v2.index.Index64(np.array([3, 1, 1, 4, 2], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedArray(index, content)
assert ak_from_buffers(*ak_to_buffers(indexedarray)).tolist() == [
3.3,
1.1,
1.1,
4.4,
2.2,
]
assert pickle.loads(pickle.dumps(ak_Array(indexedarray), -1)).tolist() == [
3.3,
1.1,
1.1,
4.4,
2.2,
]
def test_emptyarray():
assert ak_from_buffers(*ak_to_buffers([])).tolist() == []
assert ak_from_buffers(*ak_to_buffers([[], [], []])).tolist() == [[], [], []]
assert pickle.loads(pickle.dumps(ak_Array([]), -1)).tolist() == []
assert pickle.loads(pickle.dumps(ak_Array([[], [], []]), -1)).tolist() == [
[],
[],
[],
]
def test_bytemaskedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
mask = ak._v2.index.Index8(
np.array([False, True, True, False, False], dtype=np.int8)
)
bytemaskedarray = ak._v2.contents.ByteMaskedArray(mask, content, True)
assert ak_from_buffers(*ak_to_buffers(bytemaskedarray)).tolist() == [
None,
1.1,
2.2,
None,
None,
]
assert pickle.loads(pickle.dumps(ak_Array(bytemaskedarray), -1)).tolist() == [
None,
1.1,
2.2,
None,
None,
]
def test_bitmaskedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
mask = ak._v2.index.IndexU8(
np.packbits(np.array([False, True, True, False, False], dtype=np.int8))
)
bitmaskedarray = ak._v2.contents.BitMaskedArray(mask, content, True, 5, False)
assert ak_from_buffers(*ak_to_buffers(bitmaskedarray)).tolist() == [
None,
1.1,
2.2,
None,
None,
]
assert pickle.loads(pickle.dumps(ak_Array(bitmaskedarray), -1)).tolist() == [
None,
1.1,
2.2,
None,
None,
]
def test_recordarray():
assert ak_from_buffers(
*ak_to_buffers([(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])])
).tolist() == [(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]
assert ak_from_buffers(
*ak_to_buffers(
[{"x": 1.1, "y": [1]}, {"x": 2.2, "y": [1, 2]}, {"x": 3.3, "y": [1, 2, 3]}]
)
).tolist() == [
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
assert pickle.loads(
pickle.dumps(ak_Array([(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]), -1)
).tolist() == [(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]
assert pickle.loads(
pickle.dumps(
ak_Array(
[
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
),
-1,
)
).tolist() == [
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
def test_record():
assert pickle.loads(
pickle.dumps(ak_Record({"x": 2.2, "y": [1, 2]}), -1)
).tolist() == {"x": 2.2, "y": [1, 2]}
assert pickle.loads(
pickle.dumps(
ak_Array(
[
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
)[1],
-1,
)
).tolist() == {"x": 2.2, "y": [1, 2]}
def test_regulararray():
content = ak_Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]).layout
regulararray = ak._v2.contents.RegularArray(content, 3, zeros_length=0)
assert ak_from_buffers(*ak_to_buffers(regulararray)).tolist() == [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
]
assert pickle.loads(pickle.dumps(ak_Array(regulararray), -1)).tolist() == [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
]
def test_unionarray():
assert ak_from_buffers(*ak_to_buffers([[1, 2, 3], [], 4, 5])).tolist() == [
[1, 2, 3],
[],
4,
5,
]
assert pickle.loads(pickle.dumps(ak_Array([[1, 2, 3], [], 4, 5]), -1)).tolist() == [
[1, 2, 3],
[],
4,
5,
]
def test_unmaskedarray():
content = ak_Array([1, 2, 3, 4, 5]).layout
unmaskedarray = ak._v2.contents.UnmaskedArray(content)
assert ak_from_buffers(*ak_to_buffers(unmaskedarray)).tolist() == [1, 2, 3, 4, 5]
assert pickle.loads(pickle.dumps(ak_Array(unmaskedarray), -1)).tolist() == [
1,
2,
3,
4,
5,
]
|
py
|
1a5d425cd6715695f962699f070ae70d81041d3a
|
"""Given an string S, find all its permutations.
Example:
S = "abc"
Permutations found = ["abc", "cab", "bac", "acb", "bac", "cba"]
"""
def find_permutations(s):
if len(s) <= 1:
return [s]
permutations = []
def _find_permutations(partial, rest, permutations):
if len(rest) == 0:
permutations.append(partial)
for i in range(len(rest)):
_find_permutations(partial + rest[i], rest[:i] + rest[i+1:], permutations)
_find_permutations("", s, permutations)
return permutations
if __name__ == "__main__":
test_cases = [
("", [""]),
("a", ["a"]),
("ab", ["ab", "ba"]),
("abc", ["abc", "acb", "cab", "bac", "bca", "cba"]),
("abcd", [
"abcd", "abdc", "adbc", "dabc",
"acbd", "acdb", "adcb", "dacb",
"cabd", "cadb", "cdab", "dcab",
"bacd", "badc", "bdac", "dbac",
"bcad", "bcda", "bdca", "dbca",
"cbad", "cbda", "cdba", "dcba"
]),
]
for s, expected in test_cases:
found_permutations = find_permutations(s)
print(found_permutations)
assert set(found_permutations) == set(expected)
|
py
|
1a5d42fc43ca21958c622ecbbf65987afbee0aa4
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.UrlMapsAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.UrlMapsAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.UrlMapsAggregatedList],
request: compute.AggregatedListUrlMapsRequest,
response: compute.UrlMapsAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListUrlMapsRequest):
The initial request object.
response (google.cloud.compute_v1.types.UrlMapsAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListUrlMapsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.UrlMapsAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.UrlMapsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.UrlMapsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.UrlMapList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.UrlMapList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.UrlMapList],
request: compute.ListUrlMapsRequest,
response: compute.UrlMapList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListUrlMapsRequest):
The initial request object.
response (google.cloud.compute_v1.types.UrlMapList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListUrlMapsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.UrlMapList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.UrlMap]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
py
|
1a5d430da23ad792b32735001c11d28eaf1845b3
|
"""
Various functions for calculating optimal path trees.
"""
from operator import itemgetter
from overtime.components import TemporalDiGraph
def calculate_fastest_path_durations(graph, root, interval=None):
"""
Returns a dictionary where the keys are node labels and the values are the duration of the fastest path- i.e.
the path which minimizes elapsed time- to all other nodes from a given root. Unreachable nodes have fastest path
duration set to infinity.
Parameter(s):
-------------
graph : TemporalDiGraph
A directed temporal graph.
root: string
The node label for a node to use as root.
interval : tuple/List
A time interval.
For example: ((0,3))
Returns:
--------
fastest_path_durations : dict
The durations of the fastest paths to all other nodes from the root node.
For example: {A: 0, B: 2, C: 4, D: inf...}
Notes:
------
Our implementation for calculating fastest temporal path durations is based on the algorithm as specified in
"Path Problems in Temporal Graphs" (Wu et al. 2014), found here: https://www.vldb.org/pvldb/vol7/p721-wu.pdf.
Their algorithm takes as input a temporal graph (rather than, say, a static expansion) and returns the duration
of the fastest time-respecting paths to all other nodes.
TODO
----
- Generalize to undirected graphs
- Perhaps have this function return the actual tree, and another function to calculate the duration of paths
in that tree
"""
if not isinstance(graph, TemporalDiGraph):
raise TypeError("Input is not an instance of TemporalDiGraph. This method only accepts directed temporal graphs as input.")
# If interval not specified, set interval to be entire lifetime of graph
if not interval:
interval = (0, graph.edges.end())
# Initialize lists for storing fastest path start and arrival times to each node
path_start_end_times = {label: [] for label in graph.nodes.labels()}
# Initialize dict for storing fastest path duration for each node
# Root initialized to 0, rest to infinity
fastest_path_durations = {label: float("inf") for label in graph.nodes.labels()}
fastest_path_durations[root] = 0
# Get edge stream representation
edge_stream = graph.edges.set
# Iterate over edge stream representation
for edge in edge_stream:
u = edge.node1.label
v = edge.node2.label
t = edge.start
if interval[0] <= t <= interval[1]:
# If source node of edge is root
if u == root:
if (t, t) not in path_start_end_times[root]:
path_start_end_times[root].append([t, t])
# If path start and arrival times do not yet exist for u, continue
if not path_start_end_times[u]:
continue
# Get best start and new arrival times of path
# Apparently itemgetter is faster for this - is it worth having an extra dependency?
new_start_time = max(path_start_end_times[u], key=itemgetter(1))[0]
new_arr_time = edge.end
# If node v not already visited, insert new start and arrival time
if not path_start_end_times[v]:
path_start_end_times[v].append([new_start_time, new_arr_time])
# Update fastest path arrival time for node v if v already visited
else:
for element in path_start_end_times[v]:
if element[0] == new_start_time:
element[1] = new_arr_time
break
# If path faster than currently stored path, update stored duration
if new_arr_time - new_start_time < fastest_path_durations[v]:
fastest_path_durations[v] = new_arr_time - new_start_time
return fastest_path_durations
def calculate_shortest_path_lengths(graph, root, interval=None):
"""
Returns a dictionary where the keys are node labels and the values are the lengths of the shortest path- i.e.
the paths which minimize traversal time- to all other nodes from a given root. Unreachable nodes have shortest
path length set to infinity.
Parameter(s):
-------------
graph : TemporalDiGraph
A directed temporal graph.
root: string
The node label for a node to use as root.
interval : tuple/List
A time interval.
For example: ((0,3))
Returns:
--------
shortest_path_lengths : dict
The overall traversal time of the shortest paths to all other nodes from the root node.
For example: {A: 0, B: 2, C: 4, D: inf...}
Notes:
------
Our implementation for calculating shortest temporal path durations is based on the algorithm as specified in
"Path Problems in Temporal Graphs" (Wu et al. 2014), found here: https://www.vldb.org/pvldb/vol7/p721-wu.pdf.
Their algorithm takes as input a temporal graph (rather than, say, a static expansion) and returns the total
traversal time of the shortest time-respecting paths to all other nodes.
TODO
----
- Generalize to undirected graphs
- Perhaps have this function return the actual tree, and another function to calculate the duration of paths
in that tree
"""
# If interval not specified, set interval to be entire lifetime of graph
if not interval:
interval = (0, graph.edges.end())
# Initialize lists for storing shortest path distance and arrival times to each node
path_distance_end_times = {label: [] for label in graph.nodes.labels()}
# Initialize dict for storing fastest path duration for each node
# Root initialized to 0, rest to infinity
shortest_path_lengths = {label: float("inf") for label in graph.nodes.labels()}
shortest_path_lengths[root] = 0
# Get edge stream representation
edge_stream = graph.edges.set
for edge in edge_stream:
u = edge.node1.label
v = edge.node2.label
t = edge.start
dur = edge.duration
if interval[0] <= t and t + dur <= interval[1]:
# If source node of edge is root
if u == root:
if (0, t) not in path_distance_end_times[root]:
path_distance_end_times[root].append([0, t])
# If path start and arrival times do not yet exist for u, continue
if not path_distance_end_times[u]:
continue
# Get best start and new arrival times of path
# Apparently itemgetter is faster for this - is it worth having an extra dependency?
new_distance = max(path_distance_end_times[u], key=itemgetter(1))[0] + dur
new_arr_time = edge.end
# If node v not already visited, insert new start and arrival time
if not path_distance_end_times[v]:
path_distance_end_times[v].append([new_distance, new_arr_time])
# Update fastest path arrival time for node v if v already visited
else:
for element in path_distance_end_times[v]:
if element[0] == new_distance:
element[1] = new_arr_time
break
# If path shorter than currently stored path, update stored duration
if new_distance < shortest_path_lengths[v]:
shortest_path_lengths[v] = new_distance
return shortest_path_lengths
|
py
|
1a5d43b1d1ea5a151354e8425870bbf8816d02a1
|
from cloudify import ctx
from cloudify import utils
from cloudify.exceptions import NonRecoverableError
from StringIO import StringIO
import base64
import os
import platform
import re
import subprocess
import sys
import time
import threading
import platform
import json
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute_from_top_host(ctx, 'user'):
return get_attribute_from_top_host(ctx, 'user')
if get_attribute(ctx, 'cloudify_agent'):
return get_attribute(ctx, 'cloudify_agent').get('user', None)
if get_attribute(ctx, 'agent_config'):
return get_attribute(ctx, 'agent_config').get('user', None)
return None
def get_attribute_key(ctx):
if get_attribute_from_top_host(ctx, 'key'):
return get_attribute_from_top_host(ctx, 'key')
if get_attribute(ctx, 'cloudify_agent'):
return get_attribute(ctx, 'cloudify_agent').get('key', None)
if get_attribute(ctx, 'agent_config'):
return get_attribute(ctx, 'agent_config').get('key', None)
return None
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
# ctx.logger.debug('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name,json.dumps(entity.node.properties)))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
# ctx.logger.debug('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, json.dumps(mapping_configuration)))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
# ctx.logger.debug('Mapping exists for attribute {0} with value {1}'.format(attribute_name, json.dumps(mapped_value)))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
# ctx.logger.debug('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, json.dumps(attribute_value), entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
# ctx.logger.debug('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def get_target_capa_or_node_attribute(entity, capability_attribute_name, attribute_name):
attribute_value = entity.instance.runtime_properties.get(capability_attribute_name, None)
if attribute_value is not None:
# ctx.logger.debug('Found the capability attribute {0} with value {1} on the node {2}'.format(attribute_name, json.dumps(attribute_value), entity.node.id))
return attribute_value
return get_attribute(entity, attribute_name)
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
# ctx.logger.debug('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, json.dumps(prop_value), entity.node.id,
# node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
# Same as previous method but will first try to find the attribute on the capability.
def _all_instances_get_target_capa_or_node_attribute(entity, capability_attribute_name, attribute_name):
result_map = {}
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
attribute_value = node_instance.runtime_properties.get(capability_attribute_name, None)
if attribute_value is not None:
prop_value = attribute_value
else:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
# ctx.logger.debug('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, json.dumps(prop_value), entity.node.id,
# node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
# ctx.logger.debug('Found the property {0} with value {1} on the node {2}'.format(property_name, json.dumps(property_value), entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
# ctx.logger.debug('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
# ctx.logger.debug('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, json.dumps(node.properties)))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
# ctx.logger.debug('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, json.dumps(mapping_configuration)))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def get_public_or_private_ip(entity):
public_ip = get_attribute(entity, 'public_ip_address')
if not public_ip:
return get_attribute(entity, 'ip_address')
return public_ip
def get_attribute_from_top_host(entity, attribute_name):
host = get_host(entity)
while host is not None:
entity = host
host = get_host(entity)
return get_attribute(entity, attribute_name)
ctx.instance.runtime_properties['tosca_id'] = ctx.instance.id
ctx.instance.runtime_properties['tosca_name'] = ctx.node.id
ctx.instance.runtime_properties['component_version'] = r'5'
|
py
|
1a5d4475b0e9623168d2659350c5a36d1810121e
|
"""Service calls related dependencies for LCN component."""
import pypck
import voluptuous as vol
from homeassistant.const import (
CONF_ADDRESS,
CONF_BRIGHTNESS,
CONF_STATE,
CONF_UNIT_OF_MEASUREMENT,
TIME_SECONDS,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_CONNECTIONS,
CONF_KEYS,
CONF_LED,
CONF_OUTPUT,
CONF_PCK,
CONF_RELVARREF,
CONF_ROW,
CONF_SETPOINT,
CONF_TABLE,
CONF_TEXT,
CONF_TIME,
CONF_TIME_UNIT,
CONF_TRANSITION,
CONF_VALUE,
CONF_VARIABLE,
DATA_LCN,
LED_PORTS,
LED_STATUS,
OUTPUT_PORTS,
RELVARREF,
SENDKEYCOMMANDS,
SETPOINTS,
THRESHOLDS,
TIME_UNITS,
VAR_UNITS,
VARIABLES,
)
from .helpers import (
get_connection,
is_address,
is_key_lock_states_string,
is_relays_states_string,
)
class LcnServiceCall:
"""Parent class for all LCN service calls."""
schema = vol.Schema({vol.Required(CONF_ADDRESS): is_address})
def __init__(self, hass):
"""Initialize service call."""
self.connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
def get_address_connection(self, call):
"""Get address connection object."""
addr, connection_id = call.data[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*addr)
if connection_id is None:
connection = self.connections[0]
else:
connection = get_connection(self.connections, connection_id)
return connection.get_address_conn(addr)
class OutputAbs(LcnServiceCall):
"""Set absolute brightness of output port in percent."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Required(CONF_BRIGHTNESS): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0)
),
}
)
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
brightness = call.data[CONF_BRIGHTNESS]
transition = pypck.lcn_defs.time_to_ramp_value(
call.data[CONF_TRANSITION] * 1000
)
address_connection = self.get_address_connection(call)
address_connection.dim_output(output.value, brightness, transition)
class OutputRel(LcnServiceCall):
"""Set relative brightness of output port in percent."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Required(CONF_BRIGHTNESS): vol.All(
vol.Coerce(int), vol.Range(min=-100, max=100)
),
}
)
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
brightness = call.data[CONF_BRIGHTNESS]
address_connection = self.get_address_connection(call)
address_connection.rel_output(output.value, brightness)
class OutputToggle(LcnServiceCall):
"""Toggle output port."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0)
),
}
)
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
transition = pypck.lcn_defs.time_to_ramp_value(
call.data[CONF_TRANSITION] * 1000
)
address_connection = self.get_address_connection(call)
address_connection.toggle_output(output.value, transition)
class Relays(LcnServiceCall):
"""Set the relays status."""
schema = LcnServiceCall.schema.extend(
{vol.Required(CONF_STATE): is_relays_states_string}
)
def __call__(self, call):
"""Execute service call."""
states = [
pypck.lcn_defs.RelayStateModifier[state] for state in call.data[CONF_STATE]
]
address_connection = self.get_address_connection(call)
address_connection.control_relays(states)
class Led(LcnServiceCall):
"""Set the led state."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_LED): vol.All(vol.Upper, vol.In(LED_PORTS)),
vol.Required(CONF_STATE): vol.All(vol.Upper, vol.In(LED_STATUS)),
}
)
def __call__(self, call):
"""Execute service call."""
led = pypck.lcn_defs.LedPort[call.data[CONF_LED]]
led_state = pypck.lcn_defs.LedStatus[call.data[CONF_STATE]]
address_connection = self.get_address_connection(call)
address_connection.control_led(led, led_state)
class VarAbs(LcnServiceCall):
"""Set absolute value of a variable or setpoint.
Variable has to be set as counter!
Regulator setpoints can also be set using R1VARSETPOINT, R2VARSETPOINT.
"""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_VARIABLE): vol.All(
vol.Upper, vol.In(VARIABLES + SETPOINTS)
),
vol.Optional(CONF_VALUE, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0)
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
}
)
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
value = call.data[CONF_VALUE]
unit = pypck.lcn_defs.VarUnit.parse(call.data[CONF_UNIT_OF_MEASUREMENT])
address_connection = self.get_address_connection(call)
address_connection.var_abs(var, value, unit)
class VarReset(LcnServiceCall):
"""Reset value of variable or setpoint."""
schema = LcnServiceCall.schema.extend(
{vol.Required(CONF_VARIABLE): vol.All(vol.Upper, vol.In(VARIABLES + SETPOINTS))}
)
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
address_connection = self.get_address_connection(call)
address_connection.var_reset(var)
class VarRel(LcnServiceCall):
"""Shift value of a variable, setpoint or threshold."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_VARIABLE): vol.All(
vol.Upper, vol.In(VARIABLES + SETPOINTS + THRESHOLDS)
),
vol.Optional(CONF_VALUE, default=0): int,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
vol.Optional(CONF_RELVARREF, default="current"): vol.All(
vol.Upper, vol.In(RELVARREF)
),
}
)
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
value = call.data[CONF_VALUE]
unit = pypck.lcn_defs.VarUnit.parse(call.data[CONF_UNIT_OF_MEASUREMENT])
value_ref = pypck.lcn_defs.RelVarRef[call.data[CONF_RELVARREF]]
address_connection = self.get_address_connection(call)
address_connection.var_rel(var, value, unit, value_ref)
class LockRegulator(LcnServiceCall):
"""Locks a regulator setpoint."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_SETPOINT): vol.All(vol.Upper, vol.In(SETPOINTS)),
vol.Optional(CONF_STATE, default=False): bool,
}
)
def __call__(self, call):
"""Execute service call."""
setpoint = pypck.lcn_defs.Var[call.data[CONF_SETPOINT]]
state = call.data[CONF_STATE]
reg_id = pypck.lcn_defs.Var.to_set_point_id(setpoint)
address_connection = self.get_address_connection(call)
address_connection.lock_regulator(reg_id, state)
class SendKeys(LcnServiceCall):
"""Sends keys (which executes bound commands)."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_KEYS): vol.All(
vol.Upper, cv.matches_regex(r"^([A-D][1-8])+$")
),
vol.Optional(CONF_STATE, default="hit"): vol.All(
vol.Upper, vol.In(SENDKEYCOMMANDS)
),
vol.Optional(CONF_TIME, default=0): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_TIME_UNIT, default=TIME_SECONDS): vol.All(
vol.Upper, vol.In(TIME_UNITS)
),
}
)
def __call__(self, call):
"""Execute service call."""
address_connection = self.get_address_connection(call)
keys = [[False] * 8 for i in range(4)]
key_strings = zip(call.data[CONF_KEYS][::2], call.data[CONF_KEYS][1::2])
for table, key in key_strings:
table_id = ord(table) - 65
key_id = int(key) - 1
keys[table_id][key_id] = True
delay_time = call.data[CONF_TIME]
if delay_time != 0:
hit = pypck.lcn_defs.SendKeyCommand.HIT
if pypck.lcn_defs.SendKeyCommand[call.data[CONF_STATE]] != hit:
raise ValueError(
"Only hit command is allowed when sending deferred keys."
)
delay_unit = pypck.lcn_defs.TimeUnit.parse(call.data[CONF_TIME_UNIT])
address_connection.send_keys_hit_deferred(keys, delay_time, delay_unit)
else:
state = pypck.lcn_defs.SendKeyCommand[call.data[CONF_STATE]]
address_connection.send_keys(keys, state)
class LockKeys(LcnServiceCall):
"""Lock keys."""
schema = LcnServiceCall.schema.extend(
{
vol.Optional(CONF_TABLE, default="a"): vol.All(
vol.Upper, cv.matches_regex(r"^[A-D]$")
),
vol.Required(CONF_STATE): is_key_lock_states_string,
vol.Optional(CONF_TIME, default=0): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_TIME_UNIT, default=TIME_SECONDS): vol.All(
vol.Upper, vol.In(TIME_UNITS)
),
}
)
def __call__(self, call):
"""Execute service call."""
address_connection = self.get_address_connection(call)
states = [
pypck.lcn_defs.KeyLockStateModifier[state]
for state in call.data[CONF_STATE]
]
table_id = ord(call.data[CONF_TABLE]) - 65
delay_time = call.data[CONF_TIME]
if delay_time != 0:
if table_id != 0:
raise ValueError(
"Only table A is allowed when locking keys for a specific time."
)
delay_unit = pypck.lcn_defs.TimeUnit.parse(call.data[CONF_TIME_UNIT])
address_connection.lock_keys_tab_a_temporary(delay_time, delay_unit, states)
else:
address_connection.lock_keys(table_id, states)
address_connection.request_status_locked_keys_timeout()
class DynText(LcnServiceCall):
"""Send dynamic text to LCN-GTxD displays."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_ROW): vol.All(int, vol.Range(min=1, max=4)),
vol.Required(CONF_TEXT): vol.All(str, vol.Length(max=60)),
}
)
def __call__(self, call):
"""Execute service call."""
row_id = call.data[CONF_ROW] - 1
text = call.data[CONF_TEXT]
address_connection = self.get_address_connection(call)
address_connection.dyn_text(row_id, text)
class Pck(LcnServiceCall):
"""Send arbitrary PCK command."""
schema = LcnServiceCall.schema.extend({vol.Required(CONF_PCK): str})
def __call__(self, call):
"""Execute service call."""
pck = call.data[CONF_PCK]
address_connection = self.get_address_connection(call)
address_connection.pck(pck)
|
py
|
1a5d4495eec8153c050a3f21c00656c4369a74d8
|
#from six.moves import xrange
import common
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# basics
tf.app.flags.DEFINE_integer('batch_size', 24,
"""Number of images to process in a batch.""")
# naming
tf.app.flags.DEFINE_string('UPDATE_OPS_COLLECTION', 'update_ops',
""" collection of ops to be updated""")
tf.app.flags.DEFINE_string('LOSSES_COLLECTION', 'losses',
""" collection of ops to be updated""")
# training
tf.app.flags.DEFINE_integer('num_epochs_per_decay', 2,
"""number of epochs per decay""")
tf.app.flags.DEFINE_float('initial_learning_rate', 0.01,
"""initial learning rate""")
tf.app.flags.DEFINE_float('learning_rate_decay', 0.1,
"""decay factor of learning rate""")
tf.app.flags.DEFINE_float('momentum', 0.9,
"""momentum of optimization""")
# inference of resnet
def inference_resnet(images, num_output=1):
with tf.variable_scope('1'):
conv1 = common.conv(images, 64, ksize=7, stride=2)
conv1 = common.bn(conv1)
pool1 = common.max_pool(conv1)
with tf.variable_scope('2'):
stack2 = common.res_stack(pool1, [256, 256, 256], pool=False)
with tf.variable_scope('3'):
stack3 = common.res_stack(stack2, [512, 512, 512, 512])
with tf.variable_scope('4'):
stack4 = common.res_stack(stack3, [1024, 1024, 1024,
1024, 1024, 1024])
with tf.variable_scope('5'):
stack5 = common.res_stack(stack4, [2048, 2048, 2048])
pool5 = common.global_ave_pool(stack5)
with tf.variable_scope('fc'):
fc = common.fc(pool5, num_output)
return tf.sigmoid(fc)
|
py
|
1a5d449cc3a787d002fa34495fae3eb6c2ef9de8
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NOTE: This module shall not be used by external projects. It will be moved
to neutron-lib in due course, and then it can be used from there.
"""
import contextlib
from neutron_lib.db import utils as db_utils
from oslo_log import log as logging
from oslo_utils import excutils
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def _noop_context_manager():
yield
def safe_creation(context, create_fn, delete_fn, create_bindings,
transaction=True):
'''This function wraps logic of object creation in safe atomic way.
In case of exception, object is deleted.
More information when this method could be used can be found in
developer guide - Effective Neutron: Database interaction section.
https://docs.openstack.org/neutron/latest/contributor/effective_neutron.html
:param context: context
:param create_fn: function without arguments that is called to create
object and returns this object.
:param delete_fn: function that is called to delete an object. It is
called with object's id field as an argument.
:param create_bindings: function that is called to create bindings for
an object. It is called with object's id field as an argument.
:param transaction: if true the whole operation will be wrapped in a
transaction. if false, no transaction will be used.
'''
cm = (context.session.begin(subtransactions=True)
if transaction else _noop_context_manager())
with cm:
obj = create_fn()
try:
value = create_bindings(obj['id'])
except Exception:
with excutils.save_and_reraise_exception():
try:
delete_fn(obj['id'])
except Exception as e:
LOG.error("Cannot clean up created object %(obj)s. "
"Exception: %(exc)s", {'obj': obj['id'],
'exc': e})
return obj, value
def model_query(context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if db_utils.model_query_scope_is_project(context, model):
query_filter = (model.tenant_id == context.tenant_id)
if query_filter is not None:
query = query.filter(query_filter)
return query
|
py
|
1a5d44c9d3c27953c77f98714300cdc01da9f4a6
|
from __future__ import annotations
from functools import lru_cache
from typing import FrozenSet
import numpy as np
import numpy.linalg as la
from rl_rpsr.core import Intent, Interaction
from rl_rpsr.pomdp import POMDP_Model
from .search import outcome, outcome_matrix
__all__ = ['RPSR_Model']
class RPSR_Model:
def __init__(self, pomdp_model: POMDP_Model, I: FrozenSet[Intent]):
self.pomdp_model = pomdp_model
self.V = self.outcome_matrix(I)
self.V_PI = la.pinv(self.V)
# (|A|, |O|, |I|, |I|) array, M_{ao} \in \mathbb{R}^{|I|\times|I|}
self.M_ao = np.einsum(
'ij,aojk,kl->aoil', self.V.T, pomdp_model.G, self.V_PI.T
)
# (|A|, |O|, |I|, |I|) array, M_{ao} \in \mathbb{R}^{|I|\times|I|}
self.M_aoI = np.array(
[
[
np.column_stack(
[self._m(i.prepend(Interaction(a, o))) for i in I]
)
for o in range(pomdp_model.observation_space.n)
]
for a in range(pomdp_model.action_space.n)
]
)
# (|A|, |O|, |I|) array, m_{ao} \in \mathbb{R}^{|I|}
self.m_ao = np.array(
[
[
self._m(Intent.actionless(Interaction(a, o).as_test()))
for o in range(pomdp_model.observation_space.n)
]
for a in range(pomdp_model.action_space.n)
]
)
# (|I|, |A|) array
self.R = self.V_PI @ pomdp_model.R
self.discount = pomdp_model.discount
self.actions = pomdp_model.actions
self.observations = pomdp_model.observations
self.start = self.rpsr(pomdp_model.start)
self.action_space = pomdp_model.action_space
self.observation_space = pomdp_model.observation_space
self.reward_set = set((self.V @ self.R).flatten())
self.reward_range = min(self.reward_set), max(self.reward_set)
self.rank = self.V.shape[1]
def outcome(self, intent: Intent):
return outcome(self.pomdp_model, intent)
def outcome_matrix(self, intents: FrozenSet[Intent]):
return outcome_matrix(self.pomdp_model, intents)
def rpsr(self, belief):
return belief @ self.V
@lru_cache(maxsize=None)
def _m(self, intent):
# TODO decompose outcome(intent) into individual matrices stuff
# return self.V_PI @ self.M(intent.test) @ self...
return self.V_PI @ self.outcome(intent)
def dynamics(self, state, action, observation):
M = self.M_aoI[action, observation]
m = self.m_ao[action, observation]
return (state @ M) / (state @ m)
def observation_probs(self, state, action):
return state @ self.m_ao[action, :, :].T
def expected_reward(self, state, action):
return state @ self.R[:, action]
def R_as_pomdp(self):
return self.V @ self.R
|
py
|
1a5d44e2f44b7eb30cddd0b8cada99040af66325
|
from PIL import Image,ImageTk
import random as rand
import turtle as trtl
# import required module
import os
from playsound import playsound
import tkinter.messagebox
wn = trtl.Screen()
#si = tk.Tk()
si = trtl.Turtle()
caller = trtl.Turtle()
st = trtl.Turtle()
rSt = trtl.Turtle()
user = trtl.Turtle()
point = trtl.Turtle()
#score = trtl.Turtle()
count = 0
caller_list = ['abrupt stop', 'speed bump','right','left','go']
caller_txt = []
#Message ="Abrupt stop = DOWN speed bump = SHIFT right = RIGHT left = LEFT go =UP"
tkinter.messagebox.showinfo('Directions','Abrupt stop = DOWN speed bump = SHIFT right = RIGHT left = LEFT go =UP')
#wn = tk.Tk()
#wn.screensize("400x400")
# --- Window Creator ---
wn.title("Vroom Vroom: BTS Edition")
#wn.window_height(150)
wn.setup(height=500,width=500)
#caller_img ="huh_resize.gif"
#user_label = Label(wn,image=caller_img)
# ---IMages ---
as_img = "vAb.gif"
wn.addshape(as_img)
sb_img = "vSb_resize.gif"
wn.addshape(sb_img)
r_img = "right_resize.gif"
wn.addshape(r_img)
l_img = "vL.gif"
wn.addshape(l_img)
go_img = "go_resize.gif"
wn.addshape(go_img)
caller_img = "huh_resize.gif"
wn.addshape(caller_img)
# --- Functions ---
x = -191
y = 180
caller.pu()
caller.goto(x,y)
si.pu()
si.ht()
si.goto(-120,150)
start_pic = "st_resize.gif"
wn.addshape(start_pic)
st.shape(start_pic)
st.pu()
st.goto(0,180)
restart_pic = "restart_resized.gif"
wn.addshape(restart_pic)
rSt.shape(restart_pic)
rSt.pu()
rSt.goto(0,180)
user_pic = "plyr_resize.gif"
wn.addshape(user_pic)
user.shape(user_pic)
user.pu()
user.goto(0,-50)
def startPress(x, y):
caller.shape(caller_img)
st.ht()
rSt.st()
#print('playing sound using native player')
playsound('vvvcopy.wav')
wn.delay(10)
si.clear()
callerChoose()
# callerSoundOs()
def rStPress(x, y):
rSt.ht()
st.st()
si.clear()
# gameMain()
def callerChoose():
#st.ht()
global caller_txt
si.ht()
caller_txt = rand.choice(caller_list)
si.write(caller_txt,font=("Arial",15))
print(caller_txt)
callerSoundOs()
#wn.delay(10)
#si.ht()
def callerSound():
#caller_pic = "huh_resize.gif"
if caller_txt == caller_list[0]:
print("Ab")
playsound('vDa_AS.wav')
cAs()
elif caller_txt == caller_list[1]:
print("sb")
playsound('vS_sb.wav')
cSb()
elif caller_txt == caller_list[2]:
print("right")
playsound('vR.wav')
cR()
elif caller_txt == caller_list[3]:
print("left")
playsound('vL.wav')
cL()
#vroomVroom_wn.addshape(caller_pic)
#caller.shape(caller_pic)
elif caller_txt == caller_list[4]:
print('go')
playsound('vUp_go.wav')
cGo()
def callerSoundOs():
global caller_txt
print("cSOs")
#caller_pic = "huh_resize.gif"
if caller_txt == caller_list[0]:
print("ab")
playsound('vDa_AS.wav')
#cAs()
elif caller_txt == caller_list[1]:
print("sb")
playsound('vS_sb.wav')
#cSb()
elif caller_txt == caller_list[2]:
print("r")
playsound('vR.wav')
#cR()
elif caller_txt == caller_list[3]:
print("l")
playsound('vL.wav')
#cL()
#vroomVroom_wn.addshape(caller_pic)
#caller.shape(caller_pic)
elif caller_txt == caller_list[4]:
print("g")
playsound('vUp_go.wav')
#cGo()
def playSound(caller_txt):
if caller_txt == [d for d in caller_list if "abrupt stop" in d]:
playsound('vDa_AS.wav')
caller == 'as_resize.gif'
elif caller_txt == [d for d in caller_list if 'speed bump'in d]:
playsound('vDa_AS.wav')
caller == 'vSb_resize.gif'
elif caller_list == [d for d in caller_list if 'right' in d]:
playsound('vR.wav')
caller == 'right_resize.gif'
elif caller_txt == [d for d in caller_list if 'left' in d]:
playsound('vL.wav')
caller == 'vL.gif'
elif caller_txt == [d for d in caller_list if 'go' in d]:
playsound('vUp_go')
caller == 'go_resize.gif'
def abruptStop():
user.shape(as_img)
caller.shape(as_img)
def speedBump():
user.shape(sb_img)
caller.shape(sb_img)
def rightTurn():
user.shape(r_img)
def leftTurn():
user.shape(l_img)
def goFD():
user.shape(go_img)
def cAs():
caller.shape(as_img)
def cSb():
caller.shape(sb_img)
def cR():
caller.shape(r_img)
def cL():
caller.shape(l_img)
def cGo():
caller.shape(go_img)
def gameMain():
#caller.shapesize(10)
callerChoose()
#callerSoundOs()
#callerSound()
gameMain()
st.onclick(startPress)
rSt.onclick(rStPress)
wn.onkeypress(abruptStop,'Down')
wn.onkeypress(speedBump,'Return')
wn.onkeypress(rightTurn,'Right')
wn.onkeypress(leftTurn,'Left')
wn.onkeypress(goFD,'Up')
# createCaller()
# createRestart_btn()
# createUser()
# createStart_btn()
wn.listen()
wn.mainloop()
|
py
|
1a5d45f533167c1207455f5b038aef36ce0ed5c3
|
'''
Created on 29 Oct 2013
@author: michael
'''
from django import template
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from tunobase.core import models, nodes
register = template.Library()
@register.tag
def smart_query_string(parser, token):
'''
Outputs current GET query string with additions appended.
Additions are provided in token pairs.
'''
args = token.split_contents()
additions = args[1:]
addition_pairs = []
while additions:
addition_pairs.append(additions[0:2])
additions = additions[2:]
return nodes.SmartQueryStringNode(addition_pairs)
@register.assignment_tag
def gallery_surrounding_image_pks(gallery, gallery_image_pk):
gallery_images = list(gallery.images.all())
previous_image_pk = None
next_image_pk = None
for i, gallery_image in enumerate(gallery_images):
if gallery_image.pk == gallery_image_pk:
try:
previous_image_pk = gallery_images[i+1].pk
except IndexError:
pass
try:
next_image_pk = gallery_images[i-1].pk
except IndexError:
pass
break
return {
'next_image_pk': next_image_pk,
'previous_image_pk': previous_image_pk
}
@register.filter
def letterify(value):
return str(unichr(65 + value))
@register.filter
def class_name(obj):
return obj.__class__.__name__
|
py
|
1a5d4684b2e254c14ddfd2c1af7edb6b3170bdb4
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class bitbank(Exchange):
def describe(self):
return self.deep_extend(super(bitbank, self).describe(), {
'id': 'bitbank',
'name': 'bitbank',
'countries': ['JP'],
'version': 'v1',
'has': {
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchDepositAddress': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTrades': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'4h': '4hour',
'8h': '8hour',
'12h': '12hour',
'1d': '1day',
'1w': '1week',
},
'hostname': 'bitbank.cc',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/37808081-b87f2d9c-2e59-11e8-894d-c1900b7584fe.jpg',
'api': {
'public': 'https://public.{hostname}',
'private': 'https://api.{hostname}',
'markets': 'https://api.{hostname}',
},
'www': 'https://bitbank.cc/',
'doc': 'https://docs.bitbank.cc/',
'fees': 'https://bitbank.cc/docs/fees/',
},
'api': {
'public': {
'get': [
'{pair}/ticker',
'{pair}/depth',
'{pair}/transactions',
'{pair}/transactions/{yyyymmdd}',
'{pair}/candlestick/{candletype}/{yyyymmdd}',
],
},
'private': {
'get': [
'user/assets',
'user/spot/order',
'user/spot/active_orders',
'user/spot/trade_history',
'user/withdrawal_account',
],
'post': [
'user/spot/order',
'user/spot/cancel_order',
'user/spot/cancel_orders',
'user/spot/orders_info',
'user/request_withdrawal',
],
},
'markets': {
'get': [
'spot/pairs',
],
},
},
'exceptions': {
'20001': AuthenticationError,
'20002': AuthenticationError,
'20003': AuthenticationError,
'20005': AuthenticationError,
'20004': InvalidNonce,
'40020': InvalidOrder,
'40021': InvalidOrder,
'40025': ExchangeError,
'40013': OrderNotFound,
'40014': OrderNotFound,
'50008': PermissionDenied,
'50009': OrderNotFound,
'50010': OrderNotFound,
'60001': InsufficientFunds,
'60005': InvalidOrder,
},
})
def fetch_markets(self, params={}):
response = self.marketsGetSpotPairs(params)
#
# {
# "success": 1,
# "data": {
# "pairs": [
# {
# "name": "btc_jpy",
# "base_asset": "btc",
# "quote_asset": "jpy",
# "maker_fee_rate_base": "0",
# "taker_fee_rate_base": "0",
# "maker_fee_rate_quote": "-0.0002",
# "taker_fee_rate_quote": "0.0012",
# "unit_amount": "0.0001",
# "limit_max_amount": "1000",
# "market_max_amount": "10",
# "market_allowance_rate": "0.2",
# "price_digits": 0,
# "amount_digits": 4,
# "is_enabled": True,
# "stop_order": False,
# "stop_order_and_cancel": False
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data')
pairs = self.safe_value(data, 'pairs', [])
result = []
for i in range(0, len(pairs)):
entry = pairs[i]
id = self.safe_string(entry, 'name')
baseId = self.safe_string(entry, 'base_asset')
quoteId = self.safe_string(entry, 'quote_asset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
maker = self.safe_number(entry, 'maker_fee_rate_quote')
taker = self.safe_number(entry, 'taker_fee_rate_quote')
pricePrecisionString = self.safe_string(entry, 'price_digits')
priceLimit = self.parse_precision(pricePrecisionString)
precision = {
'price': int(pricePrecisionString),
'amount': self.safe_integer(entry, 'amount_digits'),
}
active = self.safe_value(entry, 'is_enabled')
minAmountString = self.safe_string(entry, 'unit_amount')
minCost = Precise.string_mul(minAmountString, priceLimit)
limits = {
'amount': {
'min': self.safe_number(entry, 'unit_amount'),
'max': self.safe_number(entry, 'limit_max_amount'),
},
'price': {
'min': self.parse_number(priceLimit),
'max': None,
},
'cost': {
'min': self.parse_number(minCost),
'max': None,
},
}
result.append({
'info': entry,
'id': id,
'symbol': symbol,
'baseId': baseId,
'quoteId': quoteId,
'base': base,
'quote': quote,
'precision': precision,
'limits': limits,
'active': active,
'maker': maker,
'taker': taker,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetPairTicker(self.extend(request, params))
data = self.safe_value(response, 'data', {})
return self.parse_ticker(data, market)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
response = self.publicGetPairDepth(self.extend(request, params))
orderbook = self.safe_value(response, 'data', {})
timestamp = self.safe_integer(orderbook, 'timestamp')
return self.parse_order_book(orderbook, symbol, timestamp)
def parse_trade(self, trade, market=None):
timestamp = self.safe_integer(trade, 'executed_at')
symbol = None
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
id = self.safe_string_2(trade, 'transaction_id', 'trade_id')
takerOrMaker = self.safe_string(trade, 'maker_taker')
fee = None
feeCost = self.safe_number(trade, 'fee_amount_quote')
if feeCost is not None:
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
orderId = self.safe_string(trade, 'order_id')
type = self.safe_string(trade, 'type')
side = self.safe_string(trade, 'side')
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetPairTransactions(self.extend(request, params))
data = self.safe_value(response, 'data', {})
trades = self.safe_value(data, 'transactions', [])
return self.parse_trades(trades, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# "0.02501786",
# "0.02501786",
# "0.02501786",
# "0.02501786",
# "0.0000",
# 1591488000000
# ]
#
return [
self.safe_integer(ohlcv, 5),
self.safe_number(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
]
def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
date = self.milliseconds()
date = self.ymd(date)
date = date.split('-')
request = {
'pair': market['id'],
'candletype': self.timeframes[timeframe],
'yyyymmdd': ''.join(date),
}
response = self.publicGetPairCandlestickCandletypeYyyymmdd(self.extend(request, params))
#
# {
# "success":1,
# "data":{
# "candlestick":[
# {
# "type":"5min",
# "ohlcv":[
# ["0.02501786","0.02501786","0.02501786","0.02501786","0.0000",1591488000000],
# ["0.02501747","0.02501953","0.02501747","0.02501953","0.3017",1591488300000],
# ["0.02501762","0.02501762","0.02500392","0.02500392","0.1500",1591488600000],
# ]
# }
# ],
# "timestamp":1591508668190
# }
# }
#
data = self.safe_value(response, 'data', {})
candlestick = self.safe_value(data, 'candlestick', [])
first = self.safe_value(candlestick, 0, {})
ohlcv = self.safe_value(first, 'ohlcv', [])
return self.parse_ohlcvs(ohlcv, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetUserAssets(params)
#
# {
# "success": "1",
# "data": {
# "assets": [
# {
# "asset": "jpy",
# "amount_precision": "4",
# "onhand_amount": "0.0000",
# "locked_amount": "0.0000",
# "free_amount": "0.0000",
# "stop_deposit": False,
# "stop_withdrawal": False,
# "withdrawal_fee": {
# "threshold": "30000.0000",
# "under": "550.0000",
# "over": "770.0000"
# }
# },
# {
# "asset": "btc",
# "amount_precision": "8",
# "onhand_amount": "0.00000000",
# "locked_amount": "0.00000000",
# "free_amount": "0.00000000",
# "stop_deposit": False,
# "stop_withdrawal": False,
# "withdrawal_fee": "0.00060000"
# },
# ]
# }
# }
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
data = self.safe_value(response, 'data', {})
assets = self.safe_value(data, 'assets', [])
for i in range(0, len(assets)):
balance = assets[i]
currencyId = self.safe_string(balance, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'free_amount')
account['used'] = self.safe_string(balance, 'locked_amount')
account['total'] = self.safe_string(balance, 'onhand_amount')
result[code] = account
return self.parse_balance(result)
def parse_order_status(self, status):
statuses = {
'UNFILLED': 'open',
'PARTIALLY_FILLED': 'open',
'FULLY_FILLED': 'closed',
'CANCELED_UNFILLED': 'canceled',
'CANCELED_PARTIALLY_FILLED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'order_id')
marketId = self.safe_string(order, 'pair')
symbol = None
if marketId and not market and (marketId in self.markets_by_id):
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'ordered_at')
price = self.safe_number(order, 'price')
amount = self.safe_number(order, 'start_amount')
filled = self.safe_number(order, 'executed_amount')
remaining = self.safe_number(order, 'remaining_amount')
average = self.safe_number(order, 'average_price')
status = self.parse_order_status(self.safe_string(order, 'status'))
type = self.safe_string_lower(order, 'type')
side = self.safe_string_lower(order, 'side')
return self.safe_order({
'id': id,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': None,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'amount': self.amount_to_precision(symbol, amount),
'side': side,
'type': type,
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostUserSpotOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_order(data, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'pair': market['id'],
}
response = self.privatePostUserSpotCancelOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return data
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'pair': market['id'],
}
response = self.privateGetUserSpotOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_order(data, market)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['count'] = limit
if since is not None:
request['since'] = int(since / 1000)
response = self.privateGetUserSpotActiveOrders(self.extend(request, params))
data = self.safe_value(response, 'data', {})
orders = self.safe_value(data, 'orders', [])
return self.parse_orders(orders, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {}
if market is not None:
request['pair'] = market['id']
if limit is not None:
request['count'] = limit
if since is not None:
request['since'] = int(since / 1000)
response = self.privateGetUserSpotTradeHistory(self.extend(request, params))
data = self.safe_value(response, 'data', {})
trades = self.safe_value(data, 'trades', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.privateGetUserWithdrawalAccount(self.extend(request, params))
data = self.safe_value(response, 'data', {})
# Not sure about self if there could be more than one account...
accounts = self.safe_value(data, 'accounts', [])
firstAccount = self.safe_value(accounts, 0, {})
address = self.safe_string(firstAccount, 'address')
return {
'currency': currency,
'address': address,
'tag': None,
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
if not ('uuid' in params):
raise ExchangeError(self.id + ' uuid is required for withdrawal')
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': amount,
}
response = self.privatePostUserRequestWithdrawal(self.extend(request, params))
data = self.safe_value(response, 'data', {})
txid = self.safe_string(data, 'txid')
return {
'info': response,
'id': txid,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.implode_hostname(self.urls['api'][api]) + '/'
if (api == 'public') or (api == 'markets'):
url += self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce
url += self.version + '/' + self.implode_params(path, params)
if method == 'POST':
body = self.json(query)
auth += body
else:
auth += '/' + self.version + '/' + path
if query:
query = self.urlencode(query)
url += '?' + query
auth += '?' + query
headers = {
'Content-Type': 'application/json',
'ACCESS-KEY': self.apiKey,
'ACCESS-NONCE': nonce,
'ACCESS-SIGNATURE': self.hmac(self.encode(auth), self.encode(self.secret)),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
success = self.safe_integer(response, 'success')
data = self.safe_value(response, 'data')
if not success or not data:
errorMessages = {
'10000': 'URL does not exist',
'10001': 'A system error occurred. Please contact support',
'10002': 'Invalid JSON format. Please check the contents of transmission',
'10003': 'A system error occurred. Please contact support',
'10005': 'A timeout error occurred. Please wait for a while and try again',
'20001': 'API authentication failed',
'20002': 'Illegal API key',
'20003': 'API key does not exist',
'20004': 'API Nonce does not exist',
'20005': 'API signature does not exist',
'20011': 'Two-step verification failed',
'20014': 'SMS authentication failed',
'30001': 'Please specify the order quantity',
'30006': 'Please specify the order ID',
'30007': 'Please specify the order ID array',
'30009': 'Please specify the stock',
'30012': 'Please specify the order price',
'30013': 'Trade Please specify either',
'30015': 'Please specify the order type',
'30016': 'Please specify asset name',
'30019': 'Please specify uuid',
'30039': 'Please specify the amount to be withdrawn',
'40001': 'The order quantity is invalid',
'40006': 'Count value is invalid',
'40007': 'End time is invalid',
'40008': 'end_id Value is invalid',
'40009': 'The from_id value is invalid',
'40013': 'The order ID is invalid',
'40014': 'The order ID array is invalid',
'40015': 'Too many specified orders',
'40017': 'Incorrect issue name',
'40020': 'The order price is invalid',
'40021': 'The trading classification is invalid',
'40022': 'Start date is invalid',
'40024': 'The order type is invalid',
'40025': 'Incorrect asset name',
'40028': 'uuid is invalid',
'40048': 'The amount of withdrawal is illegal',
'50003': 'Currently, self account is in a state where you can not perform the operation you specified. Please contact support',
'50004': 'Currently, self account is temporarily registered. Please try again after registering your account',
'50005': 'Currently, self account is locked. Please contact support',
'50006': 'Currently, self account is locked. Please contact support',
'50008': 'User identification has not been completed',
'50009': 'Your order does not exist',
'50010': 'Can not cancel specified order',
'50011': 'API not found',
'60001': 'The number of possessions is insufficient',
'60002': 'It exceeds the quantity upper limit of the tender buying order',
'60003': 'The specified quantity exceeds the limit',
'60004': 'The specified quantity is below the threshold',
'60005': 'The specified price is above the limit',
'60006': 'The specified price is below the lower limit',
'70001': 'A system error occurred. Please contact support',
'70002': 'A system error occurred. Please contact support',
'70003': 'A system error occurred. Please contact support',
'70004': 'We are unable to accept orders as the transaction is currently suspended',
'70005': 'Order can not be accepted because purchase order is currently suspended',
'70006': 'We can not accept orders because we are currently unsubscribed ',
'70009': 'We are currently temporarily restricting orders to be carried out. Please use the limit order.',
'70010': 'We are temporarily raising the minimum order quantity as the system load is now rising.',
}
errorClasses = self.exceptions
code = self.safe_string(data, 'code')
message = self.safe_string(errorMessages, code, 'Error')
ErrorClass = self.safe_value(errorClasses, code)
if ErrorClass is not None:
raise ErrorClass(message)
else:
raise ExchangeError(self.id + ' ' + self.json(response))
|
py
|
1a5d46df806503f809f746450ff5da4810a60d46
|
from os import path
from setuptools import setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name='apt-ios-repo',
version='0.1',
packages=['apt_ios_repo'],
url='https://github.com/arturgoms/python-apt-ios-repo',
license='MIT',
author='Artur Gomes',
author_email='[email protected]',
description='Python library to manage and query APT repositories from iOS Jailbreak community',
long_description_content_type='text/markdown',
long_description=long_description,
python_requires='>=3.9',
install_requires=['requests'],
)
|
py
|
1a5d47402811ddcc138071396ea6981e29051185
|
import os
import sys
from setuptools import find_packages
from setuptools import setup
version = '1.28.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
'setuptools>=41.6.0',
]
if not os.environ.get('SNAP_BUILD'):
install_requires.extend([
# We specify the minimum acme and certbot version as the current plugin
# version for simplicity. See
# https://github.com/certbot/certbot/issues/8761 for more info.
f'acme>={version}',
f'certbot>={version}',
])
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Unset SNAP_BUILD when building wheels '
'to include certbot dependencies.')
if os.environ.get('SNAP_BUILD'):
install_requires.append('packaging')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-dns-linode',
version=version,
description="Linode DNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='[email protected]',
license='Apache License 2.0',
python_requires='>=3.7',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-linode = certbot_dns_linode._internal.dns_linode:Authenticator',
],
},
)
|
py
|
1a5d48128b6e896ef5e0c5e182cc18ee90875a05
|
# Generated by Django 2.1.7 on 2019-07-15 11:25
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Builder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100)),
],
),
migrations.CreateModel(
name='BuildHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('build_id', models.IntegerField()),
('status', models.CharField(max_length=50)),
('port_name', models.CharField(max_length=100)),
('time_start', models.DateTimeField()),
('time_elapsed', models.TimeField(null=True)),
('watcher_id', models.IntegerField()),
('builder_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ports.Builder')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('name', models.TextField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Commit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash', models.CharField(max_length=50)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Dependency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Maintainer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=50)),
('domain', models.CharField(default='', max_length=50)),
('github', models.CharField(default='', max_length=50)),
],
),
migrations.CreateModel(
name='OSDistribution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('osx_version', models.CharField(db_index=True, max_length=20)),
('month', models.IntegerField(db_index=True)),
('year', models.IntegerField(db_index=True)),
],
),
migrations.CreateModel(
name='Port',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('portdir', models.CharField(max_length=100)),
('description', models.TextField(default='')),
('homepage', models.URLField(default='')),
('epoch', models.BigIntegerField(default=0)),
('platforms', models.TextField(null=True)),
('long_description', models.TextField(default='')),
('version', models.CharField(default='', max_length=100)),
('revision', models.IntegerField(default=0)),
('closedmaintainer', models.BooleanField(default=False)),
('name', models.CharField(db_index=True, max_length=100)),
('license', models.CharField(default='', max_length=100)),
('replaced_by', models.CharField(max_length=100, null=True)),
('categories', models.ManyToManyField(db_index=True, related_name='category', to='ports.Category')),
],
),
migrations.CreateModel(
name='PortInstallation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('port', models.CharField(max_length=100)),
('version', models.CharField(max_length=100)),
('requested', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('os_version', models.CharField(max_length=10)),
('xcode_version', models.CharField(max_length=10)),
('os_arch', models.CharField(max_length=20)),
('macports_version', models.CharField(max_length=10)),
('timestamp', models.DateTimeField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(db_index=True, max_length=36)),
('osx_version', models.CharField(max_length=10)),
('macports_version', models.CharField(max_length=10)),
('xcode_version', models.CharField(max_length=10)),
('os_arch', models.CharField(max_length=10)),
('full_json', django.contrib.postgres.fields.jsonb.JSONField()),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='UUID',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(db_index=True, max_length=36)),
],
),
migrations.CreateModel(
name='Variant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variant', models.CharField(default='', max_length=100)),
('port', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ports', to='ports.Port')),
],
),
migrations.AddField(
model_name='submission',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ports.UUID'),
),
migrations.AddField(
model_name='portinstallation',
name='submission',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ports.Submission'),
),
migrations.AddField(
model_name='osdistribution',
name='users',
field=models.ManyToManyField(related_name='users', to='ports.User'),
),
migrations.AddField(
model_name='maintainer',
name='ports',
field=models.ManyToManyField(related_name='maintainers', to='ports.Port'),
),
migrations.AddField(
model_name='dependency',
name='dependencies',
field=models.ManyToManyField(to='ports.Port'),
),
migrations.AddField(
model_name='dependency',
name='port_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dependent_port', to='ports.Port'),
),
migrations.AddIndex(
model_name='submission',
index=models.Index(fields=['timestamp'], name='ports_submi_timesta_ef1081_idx'),
),
migrations.AddIndex(
model_name='submission',
index=models.Index(fields=['user'], name='ports_submi_user_id_aa7fba_idx'),
),
migrations.AddIndex(
model_name='submission',
index=models.Index(fields=['user', '-timestamp'], name='ports_submi_user_id_e8af69_idx'),
),
migrations.AddIndex(
model_name='submission',
index=models.Index(fields=['user', 'timestamp'], name='ports_submi_user_id_ade7b9_idx'),
),
migrations.AddIndex(
model_name='submission',
index=models.Index(fields=['os_version'], name='ports_submi_os_vers_0b58c5_idx'),
),
migrations.AddIndex(
model_name='portinstallation',
index=models.Index(fields=['submission'], name='ports_porti_submiss_9f52b0_idx'),
),
migrations.AddIndex(
model_name='portinstallation',
index=models.Index(fields=['port'], name='ports_porti_port_ff6a5e_idx'),
),
migrations.AddIndex(
model_name='maintainer',
index=models.Index(fields=['github'], name='ports_maint_github_4a7ac9_idx'),
),
migrations.AddIndex(
model_name='maintainer',
index=models.Index(fields=['name', 'domain'], name='ports_maint_name_edcd1d_idx'),
),
migrations.AlterUniqueTogether(
name='maintainer',
unique_together={('name', 'domain', 'github')},
),
migrations.AddIndex(
model_name='dependency',
index=models.Index(fields=['port_name'], name='ports_depen_port_na_647c68_idx'),
),
migrations.AlterUniqueTogether(
name='dependency',
unique_together={('port_name', 'type')},
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name', 'builder_name', '-build_id'], name='ports_build_port_na_f1d25f_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name', 'status', 'builder_name'], name='ports_build_port_na_94f9e4_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name', 'builder_name'], name='ports_build_port_na_3bfa4c_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['-time_start'], name='ports_build_time_st_67b2ce_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name'], name='ports_build_port_na_aea401_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['status'], name='ports_build_status_76abdf_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['builder_name'], name='ports_build_builder_289e95_idx'),
),
]
|
py
|
1a5d48c2ad47646e0d23354d4b0c03cb99a251cc
|
# -*- coding: utf-8 -*-
'''
Starting or restarting of services and daemons.
===============================================
Services are defined as system daemons typically started with system init or
rc scripts, services can be defined as running or dead.
.. code-block:: yaml
httpd:
service:
- running
The service can also be set to be started at runtime via the enable option:
.. code-block:: yaml
openvpn:
service:
- running
- enable: True
By default if a service is triggered to refresh due to a watch statement the
service is by default restarted. If the desired behaviour is to reload the
service, then set the reload value to True:
.. code-block:: yaml
redis:
service:
- running
- enable: True
- reload: True
- watch:
- pkg: redis
'''
def __virtual__():
'''
Only make these states available if a service provider has been detected or
assigned for this minion
'''
return 'service' if 'service.start' in __salt__ else False
def _enabled_used_error(ret):
ret['result'] = False
ret['comment'] = (
'Service {0} uses non-existent option "enabled". ' +
'Perhaps "enable" option was intended?'
).format(ret['name'])
return ret
def _enable(name, started, result=True, **kwargs):
'''
Enable the service
'''
ret = {'name': name,
'changes': {},
'result': result,
'comment': ''}
# is service available?
if not _available(name, ret):
return ret
# Check to see if this minion supports enable
if not 'service.enable' in __salt__ or not 'service.enabled' in __salt__:
if started is True:
ret['comment'] = ('Enable is not available on this minion,'
' service {0} started').format(name)
return ret
elif started is None:
ret['comment'] = ('Enable is not available on this minion,'
' service {0} is in the desired state'
).format(name)
return ret
else:
ret['comment'] = ('Enable is not available on this minion,'
' service {0} is dead').format(name)
return ret
# Service can be enabled
if __salt__['service.enabled'](name):
# Service is enabled
if started is True:
ret['comment'] = ('Service {0} is already enabled,'
' and is running').format(name)
return ret
elif started is None:
ret['comment'] = ('Service {0} is already enabled,'
' and is in the desired state').format(name)
return ret
else:
ret['comment'] = ('Service {0} is already enabled,'
' and is dead').format(name)
return ret
# Service needs to be enabled
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service {0} set to be enabled'.format(name)
return ret
if __salt__['service.enable'](name, **kwargs):
# Service has been enabled
if started is True:
ret['changes'][name] = True
ret['comment'] = ('Service {0} has been enabled,'
' and is running').format(name)
return ret
elif started is None:
ret['changes'][name] = True
ret['comment'] = ('Service {0} has been enabled,'
' and is in the desired state').format(name)
return ret
else:
ret['changes'][name] = True
ret['comment'] = ('Service {0} has been enabled,'
' and is dead').format(name)
return ret
# Service failed to be enabled
if started is True:
ret['changes'][name] = True
ret['result'] = False
ret['comment'] = ('Failed when setting service {0} to start at boot,'
' but the service is running').format(name)
return ret
elif started is None:
ret['result'] = False
ret['comment'] = ('Failed when setting service {0} to start at boot,'
' but the service was already running').format(name)
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed when setting service {0} to start at boot,'
' and the service is dead').format(name)
return ret
def _disable(name, started, result=True, **kwargs):
'''
Disable the service
'''
ret = {'name': name,
'changes': {},
'result': result,
'comment': ''}
# is service available?
if not _available(name, ret):
ret['result'] = True
return ret
# is enable/disable available?
if not 'service.disable' in __salt__ or not 'service.disabled' in __salt__:
if started is True:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} started').format(name)
return ret
elif started is None:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} is in the desired state'
).format(name)
return ret
else:
ret['comment'] = ('Disable is not available on this minion,'
' service {0} is dead').format(name)
return ret
# Service can be disabled
if __salt__['service.disabled'](name):
# Service is disabled
if started is True:
ret['changes'][name] = True
ret['comment'] = ('Service {0} is already disabled,'
' and is running').format(name)
return ret
elif started is None:
ret['comment'] = ('Service {0} is already disabled,'
' and is in the desired state').format(name)
return ret
else:
ret['comment'] = ('Service {0} is already disabled,'
' and is dead').format(name)
return ret
# Service needs to be disabled
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service {0} set to be disabled'.format(name)
return ret
if __salt__['service.disable'](name, **kwargs):
# Service has been disabled
if started is True:
ret['changes'][name] = True
ret['comment'] = ('Service {0} has been disabled,'
' and is running').format(name)
return ret
elif started is None:
ret['changes'][name] = True
ret['comment'] = ('Service {0} has been disabled,'
' and is in the desired state').format(name)
return ret
else:
ret['changes'][name] = True
ret['comment'] = ('Service {0} has been disabled,'
' and is dead').format(name)
return ret
# Service failed to be disabled
if started is True:
ret['result'] = False
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, and is running').format(name)
return ret
elif started is None:
ret['result'] = False
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, but the service was already running'
).format(name)
return ret
else:
ret['changes'][name] = True
ret['result'] = False
ret['comment'] = ('Failed when setting service {0} to not start'
' at boot, and the service is dead').format(name)
return ret
def _available(name, ret):
# Check if the service is available
avail = False
if 'service.available' in __salt__:
avail = __salt__['service.available'](name)
elif 'service.get_all' in __salt__:
avail = name in __salt__['service.get_all']()
if not avail:
ret['result'] = False
ret['comment'] = 'The named service {0} is not available'.format(name)
return avail
def running(name, enable=None, sig=None, **kwargs):
'''
Verify that the service is running
name
The name of the init or rc script used to manage the service
enable
Set the service to be enabled at boot time, True sets the service to
be enabled, False sets the named service to be disabled. The default
is None, which does not enable or disable anything.
sig
The string to search for when looking for the service process with ps
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Check for common error: using enabled option instead of enable
if 'enabled' in kwargs:
return _enabled_used_error(ret)
# Check if the service is available
if not _available(name, ret):
return ret
# See if the service is already running
if __salt__['service.status'](name, sig):
ret['comment'] = 'The service {0} is already running'.format(name)
if enable is True:
return _enable(name, None, **kwargs)
elif enable is False:
return _disable(name, None, **kwargs)
else:
return ret
# Run the tests
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service {0} is set to start'.format(name)
return ret
changes = {name: __salt__['service.start'](name)}
if not changes[name]:
if enable is True:
return _enable(name, False, result=False, **kwargs)
elif enable is False:
return _disable(name, False, result=False, **kwargs)
else:
ret['result'] = False
ret['comment'] = 'Service {0} failed to start'.format(name)
return ret
if enable is True:
return _enable(name, True, **kwargs)
elif enable is False:
return _disable(name, True, **kwargs)
else:
ret['changes'] = changes
ret['comment'] = 'Started Service {0}'.format(name)
return ret
def dead(name, enable=None, sig=None, **kwargs):
'''
Ensure that the named service is dead by stopping the service if it is running
name
The name of the init or rc script used to manage the service
enable
Set the service to be enabled at boot time, ``True`` sets the service
to be enabled, ``False`` sets the named service to be disabled. The
default is ``None``, which does not enable or disable anything.
sig
The string to search for when looking for the service process with ps
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Check for common error: using enabled option instead of enable
if 'enabled' in kwargs:
return _enabled_used_error(ret)
# Check if the service is available
if not _available(name, ret):
ret['result'] = True
return ret
if not __salt__['service.status'](name, sig):
ret['comment'] = 'The service {0} is already dead'.format(name)
if enable is True:
return _enable(name, None, **kwargs)
elif enable is False:
return _disable(name, None, **kwargs)
else:
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service {0} is set to be killed'.format(name)
return ret
ret['changes'] = {name: __salt__['service.stop'](name)}
if not ret['changes'][name]:
ret['result'] = False
ret['comment'] = 'Service {0} failed to die'.format(name)
if enable is True:
return _enable(name, True, result=False)
elif enable is False:
return _disable(name, True, result=False)
else:
ret['result'] = False
ret['comment'] = 'Service {0} failed to die'.format(name)
return ret
else:
if enable is True:
return _enable(name, False)
elif enable is False:
return _disable(name, False)
else:
ret['comment'] = 'Service {0} was killed'.format(name)
return ret
def enabled(name, **kwargs):
'''
Verify that the service is enabled on boot, only use this state if you
don't want to manage the running process, remember that if you want to
enable a running service to use the enable: True option for the running
or dead function.
name
The name of the init or rc script used to manage the service
'''
return _enable(name, None, **kwargs)
def disabled(name, **kwargs):
'''
Verify that the service is disabled on boot, only use this state if you
don't want to manage the running process, remember that if you want to
disable a service to use the enable: False option for the running or dead
function.
name
The name of the init or rc script used to manage the service
'''
return _disable(name, None, **kwargs)
def mod_watch(name, sig=None, reload=False, full_restart=False):
'''
The service watcher, called to invoke the watch command.
name
The name of the init or rc script used to manage the service
sig
The string to search for when looking for the service process with ps
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
action = ''
if __salt__['service.status'](name, sig):
if 'service.reload' in __salt__ and reload:
restart_func = __salt__['service.reload']
action = 'reload'
elif 'service.full_restart' in __salt__ and full_restart:
restart_func = __salt__['service.full_restart']
action = 'fully restart'
else:
restart_func = __salt__['service.restart']
action = 'restart'
else:
restart_func = __salt__['service.start']
action = 'start'
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service is set to be {0}ed'.format(action)
return ret
result = restart_func(name)
ret['changes'] = {name: result}
ret['result'] = result
ret['comment'] = 'Service {0}ed'.format(action) if result else \
'Failed to {0} the service'.format(action)
return ret
|
py
|
1a5d48c80242985919e8d12199478ff2910c771f
|
def solve_eig_numpy(A, B):
"""
A, B .... scipy sparse matrices
Uses numpy to solve the A*x = lambda*B*x eigenproblem.
"""
from numpy import array, dot
from numpy.linalg import inv, eig, eigh
A = A.todense()
B = B.todense()
print "inverting"
M = dot(inv(B), A)
print "solving"
w, v = eig(M)
print "sorting the eigenvalues"
r = []
for i in range(len(w)):
vec = v[:, i]
r.append((w[i], vec))
r.sort(key=lambda x: x[0])
print "eigenvalues:"
eigs = []
for w, vec in r:
if w > 0:
break
print w
eigs.append(vec)
return r
def convert_mat(mtx):
"""
Converts a scipy matrix "mtx" to a pysparse matrix.
"""
from pysparse import spmatrix
mtx = mtx.tocsr()
A = spmatrix.ll_mat(*mtx.shape)
for i in xrange( mtx.indptr.shape[0] - 1 ):
ii = slice( mtx.indptr[i], mtx.indptr[i+1] )
n_in_row = ii.stop - ii.start
A.update_add_at( mtx.data[ii], [i] * n_in_row, mtx.indices[ii] )
return A
def solve_eig_pysparse(A, B, n_eigs=4, verbose=False):
"""
Solves the generalized eigenvalue problem.
A, B ..... scipy matrices
n_eigs ... number of eigenvalues to solve for
returns a list of (lmbd, vec), where lmbd is the eigenvalue and vec is the
eigenvector
"""
from pysparse import jdsym, precon, itsolvers
if verbose:
print "converting to pysparse"
n = A.shape[0]
A = convert_mat(A)
B = convert_mat(B)
if verbose:
print "solving (%d x %d)" % (n, n)
Atau = A.copy()
tau = -1
Atau.shift(-tau, B)
K = precon.jacobi(Atau)
A = A.to_sss()
B = B.to_sss()
kconv, lmbd, Q, it, it_in = jdsym.jdsym(A, B, K, n_eigs, tau, 1e-6, 150,
itsolvers.qmrs)
if verbose:
print "number of converged eigenvalues:", kconv
r = []
for i in range(len(lmbd)):
vec = Q[:, i]
r.append((lmbd[i], vec))
r.sort(key=lambda x: x[0])
print "eigenvalues:"
eigs = []
for w, vec in r:
if w > 0:
break
print w
eigs.append(vec)
return r
|
py
|
1a5d4a0e825ca32e0dedcc4d34bde78fe1a41b04
|
import os
import traceback
import sys
from tensorflow.core.framework import summary_pb2
from tensorflow.python.summary.writer import writer_cache
import tensorflow as tf
import numpy as np
class InMemoryFinetune(tf.estimator.SessionRunHook):
def __init__(self, config_to_eval, model, eval_dir, X, Y, X_test, Y_test, name=None, every_n_iter=100):
if every_n_iter is None or every_n_iter <= 0:
raise ValueError('invalid every_n_iter=%s.' % every_n_iter)
self._current_finetune = model
self._config_to_finetune = config_to_eval
self._name = name
self._every_n_iter = every_n_iter
self._timer = tf.estimator.SecondOrStepTimer(every_steps=every_n_iter)
self._eval_dir = eval_dir
self.train_data = (X, Y)
self.test_data = (X_test, Y_test)
self._iter_count = 0
def begin(self):
self._timer.reset()
self._iter_count = 0
def after_create_session(self, session, coord):
"""Does first run which shows the metrics before training."""
self._evaluate(session)
def _evaluate(self, session):
try:
with tf.Graph().as_default():
from finetune import Classifier
model = Classifier(**self._config_to_finetune)
if self._current_finetune.saver.variables:
model.saver.variables = {
k: v.copy() for k, v in self._current_finetune.saver.variables.items()
if "global_step" not in k and "Adam" not in k
}
model.saver.fallback_ = {
k: v for k, v in self._current_finetune.saver.fallback.items() if "global_step" not in k
}
train_x, train_y = self.train_data
model.fit(train_x, train_y)
test_x, test_y = self.test_data
test_accuracy = np.mean(model.predict(test_x) == test_y)
train_accuracy = np.mean(model.predict(train_x) == train_y)
except IOError as e:
traceback.print_exc(file=sys.stdout)
test_accuracy = -1.0
train_accuracy = -1.0
global_step = session.run(tf.compat.v1.train.get_or_create_global_step())
directory = os.path.join(self._eval_dir, "..", "finetuning")
if not os.path.exists(directory):
os.makedirs(directory)
summary_writer = writer_cache.FileWriterCache.get(directory)
summary_proto = summary_pb2.Summary()
summary_proto.value.add(tag="finetuning/{}_train_accurary".format(self._name), simple_value=float(train_accuracy))
summary_proto.value.add(tag="finetuning/{}_test_accurary".format(self._name), simple_value=float(test_accuracy))
summary_writer.add_summary(summary_proto, global_step)
summary_writer.flush()
self._timer.update_last_triggered_step(self._iter_count)
def after_run(self, run_context, run_values):
self._iter_count += 1
if self._timer.should_trigger_for_step(self._iter_count):
self._evaluate(run_context.session)
def end(self, session):
self._evaluate(session)
def make_in_memory_finetune_hooks(model, estimator):
hooks = []
for f in model.config.in_memory_finetune:
hooks.append(InMemoryFinetune(
config_to_eval=f["config"],
model=model,
eval_dir=estimator.eval_dir(),
X=f["X"],
Y=f["Y"],
X_test=f["X_test"],
Y_test=f["Y_test"],
name=f["name"],
every_n_iter=f["every_n_iter"]
))
return hooks
|
py
|
1a5d4a17643fa83ef2c240d9b50d54ace1c3971e
|
#!/usr/bin/env python
# coding: utf-8
# # Ans 1.
# In[16]:
def getrange(fibfun):
def fibseries():
print("Enter the range of Fibonacci Series")
end = int(input("Enter ending no: "))
fibfun(end)
return fibseries
# In[17]:
@getrange
def fibonacciSeries(ending):
starting = 0
num = 0
first = 1
for i in range(ending):
print("Fibonacci Series :", num)
starting = first
first = num
num = starting + first
# In[18]:
fibonacciSeries()
# # Ans 2)
# In[60]:
file = open("Day8-AA","w")
file.write("Hey there!, How are you")
file.close()
# In[61]:
file = open("Day8-AA","r")
fileData = file.read()
print(fileData)
file.close()
# In[62]:
file = open("Day8-AA","a")
file.write("?")
file.close()
# In[63]:
file = open("Day8-AA","r")
fileData = file.read()
print(fileData)
file.close()
# In[64]:
file = open("Day8-AA", "r")
try :
file.write("I am fine thankyou")
except :
print("File is opened in read only mode, not writable!!!")
fileData = file.read()
print(fileData)
file.close()
# In[ ]:
|
py
|
1a5d4c804568e992d5cbd2ac2518afa0b8d3bf1b
|
# EQUILIBRIUM PROFILES
import numpy as np
import csv
from matplotlib import pyplot as plt
def eqProfs(state):
#Compiling CSV data into independent variables
dataArr = []
with open('output_runModel/equilibrium.csv', 'r') as equilibriumFile:
csvRead = csv.reader(equilibriumFile)
for row in csvRead:
dataArr.append(row)
dataArr = [ele for ele in dataArr if ele != []]
lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf = dataArr[0],dataArr[1],dataArr[2], dataArr[3], dataArr[4]
timeTaken = ''.join(dataArr[5])
airPressure_vCoord = np.array(state['air_pressure_on_interface_levels']).flatten()
airPressure_vCoord = [round((float(ele)/1000),0) for ele in airPressure_vCoord]
# Plotting Schwarzchild deltas.
fig = plt.figure()
lwFluxNet = [float(i) for i in lwFluxNet]
plt.plot(lwFluxNet,airPressure_vCoord)
plt.gca().invert_yaxis()
plt.xlabel("Net longwave radiative flux (Wm^-2)")
plt.ylabel("Pressure (kPa)")
plt.savefig("graphs/equilibrium_netFlux_vertical.png")
# Plotting upwelling longwave flux (p)
fig = plt.figure()
lwFluxUp = [float(i) for i in lwFluxUp]
plt.gca().invert_yaxis()
plt.plot(lwFluxUp, airPressure_vCoord)
plt.xlabel("Upwelling longwave radiative flux (Wm^-2)")
plt.ylabel("Pressure (kPa)")
plt.savefig("graphs/equilibrium_upFlux_vertical.png")
# Plotting downwelling longwave flux (p)
fig = plt.figure()
lwFluxDown = [float(i) for i in lwFluxDown]
plt.xlabel("Downwelling longwave radiative flux (Wm^-2)")
plt.ylabel("Pressure (kPa)")
plt.gca().invert_yaxis()
plt.plot(lwFluxDown,airPressure_vCoord)
plt.savefig("graphs/equilibrium_downFlux_vertical.png")
# Plotting heating rate (p)
fig = plt.figure()
heatRate = [float(i) for i in heatRate]
plt.xlabel("Longwave Heating Rate")
plt.ylabel("Pressure (kPa)")
plt.gca().invert_yaxis()
plt.plot(heatRate,airPressure_vCoord)
plt.savefig("graphs/equilibrium_heatRate_vertical.png")
fig = plt.figure()
airTemperatureProf = [float(i) for i in airTemperatureProf]
plt.xlabel("Air Temperature (K)")
plt.ylabel("Pressure (kPa)")
plt.gca().invert_yaxis()
plt.plot(airTemperatureProf,airPressure_vCoord[:28])
plt.savefig("graphs//equilibrium_airT_vertical.png")
return 0.
|
py
|
1a5d4c9e8b5d539ff6203434bb164e44fc02005e
|
"""
COSE_Encrypt0: Encrypted Messages with Implicit Key
COSE_Encrypt0 = [
Headers,
ciphertext: bstr / nil,
]
"""
from typing import Optional, TYPE_CHECKING
from cose import utils
from cose.messages import enccommon, cosemessage
if TYPE_CHECKING:
from cose.keys.symmetric import SK
CBOR = bytes
@cosemessage.CoseMessage.record_cbor_tag(16)
class Enc0Message(enccommon.EncCommon):
context = "Encrypt0"
cbor_tag = 16
@classmethod
def from_cose_obj(cls, cose_obj: list, *args, **kwargs) -> 'Enc0Message':
return super().from_cose_obj(cose_obj)
def __init__(self,
phdr: Optional[dict] = None,
uhdr: Optional[dict] = None,
payload: bytes = b'',
external_aad: bytes = b'',
key: Optional['SK'] = None):
"""
Create a COSE_encrypt0 message.
:param phdr: Protected header bucket.
:param uhdr: Unprotected header bucket.
:param payload: The payload (will be encrypted and authenticated).
:param external_aad: External data (is authenticated but not transported in the message).
:param key: The Symmetric COSE key for encryption/decryption of the message
:returns: Returns a COSE Encrypt0 message object.
"""
if phdr is None:
phdr = {}
if uhdr is None:
uhdr = {}
super().__init__(phdr, uhdr, payload, external_aad, key)
def encode(self, tag: bool = True, encrypt: bool = True, *args, **kwargs) -> CBOR:
"""
Encode and protect the COSE_Encrypt0 message.
:param tag: Boolean value which indicates if the COSE message will have a CBOR tag.
:param encrypt: Boolean which activates or deactivates the payload protection.
:return: Returns a CBOR-encoded COSE Encrypt0 message.
"""
if encrypt:
message = [self.phdr_encoded, self.uhdr_encoded, self.encrypt()]
else:
message = [self.phdr_encoded, self.uhdr_encoded, self.payload]
res = super(Enc0Message, self).encode(message, tag)
return res
def __repr__(self) -> str:
phdr, uhdr = self._hdr_repr()
return f'<COSE_Encrypt0: [{phdr}, {uhdr}, {utils.truncate(self._payload)}]>'
|
py
|
1a5d4d0a77990d9eea21c4211db9ea5110db8440
|
from keras import backend as K
from keras.objectives import categorical_crossentropy
import tensorflow as tf
lambda_rpn_regr = 1.0
lambda_rpn_class = 1.0
lambda_cls_regr = 1.0
lambda_cls_class = 1.0
epsilon = 1e-4
def rpn_loss_regr(num_anchors):
def rpn_loss_regr_fixed_num(y_true, y_pred):
x = y_true[:, :, :, 4 * num_anchors:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
return lambda_rpn_regr * K.sum(
y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
return rpn_loss_regr_fixed_num
def rpn_loss_cls(num_anchors):
def rpn_loss_cls_fixed_num(y_true, y_pred):
return lambda_rpn_class * K.sum(y_true[:, :, :, :num_anchors] * K.binary_crossentropy(y_pred[:, :, :, :], y_true[:, :, :, num_anchors:])) / K.sum(epsilon + y_true[:, :, :, :num_anchors])
return rpn_loss_cls_fixed_num
def class_loss_regr(num_classes):
def class_loss_regr_fixed_num(y_true, y_pred):
x = y_true[:, :, 4*num_classes:] - y_pred
x_abs = K.abs(x)
# TODO: check!?
# x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
return class_loss_regr_fixed_num
def class_loss_cls(y_true, y_pred):
return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))
|
py
|
1a5d4e1643ef8d5d750036bd858efcc46d3bf9b4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from src.librecatastro.domain.address import Address
from src.librecatastro.domain.cadaster_entry.cadaster_entry import CadasterEntry
from src.librecatastro.domain.construction import Construction
from src.librecatastro.domain.location import Location
from src.utils.catastro_logger import CadastroLogger
logger = CadastroLogger(__name__).logger
class CadasterEntryHTML(CadasterEntry):
"""Cadaster class, obtained from parsing HTML, that inheritates from Cadaster, and
stores all the information about a surface and its properties"""
def __init__(self, description_data):
self.address = Address(description_data[u'Localización'])
self.cadaster = description_data[u'Referencia catastral']
self.type = description_data[u'Clase'] if u'Clase' in description_data else None
self.use = description_data[u'Uso principal'] if u'Uso principal' in description_data else None
self.surface = description_data[u'Superficie construida'] if u'Superficie construida' in description_data else None
self.year = description_data[u'Año construcción'] if u'Año construcción' in description_data else None
self.location = Location(description_data[u'Longitud'], description_data[u'Latitud'])
self.gsurface = description_data[u'Superficie gráfica'] if u'Superficie gráfica' in description_data else None
self.constructions = [Construction(x) for x in description_data[u'Construcciones']]
self.picture = description_data[u'GráficoParcela'] if u'GráficoParcela' in description_data else None
self.timestamp = str(datetime.now())
super().__init__(self)
|
py
|
1a5d4ed847a1c3789b0b10bbf7b5e6041070299e
|
def gen_fn():
result = yield 1
print('result of yield: {}'.format(result))
result2 = yield 2
print('result of 2nd yield: {}'.format(result2))
return 'done'
def caller_fn():
gen = gen_fn()
rv = yield from gen
print('return value of yield-from: {}'
.format(rv))
# Make a generator from the
# generator function.
caller = caller_fn()
caller.send(None)
caller.send('hello')
caller.send('goodbye')
# def gen_fn():
# result = yield 1
# print('result of yield: {}'.format(result))
# result2 = yield 2
# print('result of 2nd yield: {}'.format(result2))
# return 'done'
|
py
|
1a5d4f56eecff6d42dd4ca38622519868b309409
|
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
# Defines common utilities for responsibleai tests
from dice_ml.utils import helpers
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_iris, load_breast_cancer, \
make_classification, load_boston
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
def create_sklearn_random_forest_classifier(X, y):
rfc = RandomForestClassifier(n_estimators=10, max_depth=4,
random_state=777)
model = rfc.fit(X, y)
return model
def create_lightgbm_classifier(X, y):
lgbm = LGBMClassifier(boosting_type='gbdt', learning_rate=0.1,
max_depth=5, n_estimators=200, n_jobs=1,
random_state=777)
model = lgbm.fit(X, y)
return model
def create_xgboost_classifier(X, y):
xgb = XGBClassifier(learning_rate=0.1, max_depth=3, n_estimators=100,
n_jobs=1, random_state=777)
model = xgb.fit(X, y)
return model
def create_sklearn_svm_classifier(X, y, probability=True):
clf = svm.SVC(gamma=0.001, C=100., probability=probability,
random_state=777)
model = clf.fit(X, y)
return model
def create_sklearn_logistic_regressor(X, y, pipeline=False):
lin = LogisticRegression(solver='liblinear')
if pipeline:
lin = Pipeline([('lin', lin)])
model = lin.fit(X, y)
return model
def create_sklearn_random_forest_regressor(X, y):
rfc = RandomForestRegressor(n_estimators=10, max_depth=4,
random_state=777)
model = rfc.fit(X, y)
return model
def create_iris_data():
# Import Iris dataset
iris = load_iris()
# Split data into train and test
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=0)
feature_names = [name.replace(' (cm)', '') for name in iris.feature_names]
classes = iris.target_names
X_train = pd.DataFrame(X_train, columns=feature_names)
X_test = pd.DataFrame(X_test, columns=feature_names)
return X_train, X_test, y_train, y_test, feature_names, classes
def create_cancer_data():
breast_cancer_data = load_breast_cancer()
classes = breast_cancer_data.target_names.tolist()
# Split data into train and test
X_train, X_test, y_train, y_test = train_test_split(
breast_cancer_data.data, breast_cancer_data.target,
test_size=0.2, random_state=0)
feature_names = breast_cancer_data.feature_names
classes = breast_cancer_data.target_names.tolist()
X_train = pd.DataFrame(X_train, columns=feature_names)
X_test = pd.DataFrame(X_test, columns=feature_names)
return X_train, X_test, y_train, y_test, feature_names, classes
def create_binary_classification_dataset():
X, y = make_classification()
# Split data into train and test
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=0)
classes = np.unique(y_train).tolist()
feature_names = ["col" + str(i) for i in list(range(X_train.shape[1]))]
X_train = pd.DataFrame(X_train, columns=feature_names)
X_test = pd.DataFrame(X_test, columns=feature_names)
return X_train, y_train, X_test, y_test, classes
def create_boston_data():
# Import Boston housing dataset
boston = load_boston()
# Split data into train and test
X_train, X_test, y_train, y_validation = train_test_split(
boston.data, boston.target,
test_size=0.2, random_state=7)
return X_train, X_test, y_train, y_validation, boston.feature_names
def create_adult_income_dataset():
dataset = helpers.load_adult_income_dataset()
continuous_features = ['age', 'hours_per_week']
target_name = 'income'
target = dataset[target_name]
classes = list(np.unique(target))
categorical_features = list(set(dataset.columns) -
set(continuous_features) -
set([target_name]))
# Split data into train and test
data_train, data_test, y_train, y_test = train_test_split(
dataset, target,
test_size=0.2, random_state=7, stratify=target)
return data_train, data_test, y_train, y_test, categorical_features, \
continuous_features, target_name, classes
def create_complex_classification_pipeline(
X_train, y_train, continuous_features, categorical_features):
# We create the preprocessing pipelines for both
# numeric and categorical data.
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
transformations = ColumnTransformer(
transformers=[
('num', numeric_transformer, continuous_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
pipeline = Pipeline(steps=[('preprocessor', transformations),
('classifier', RandomForestClassifier())])
return pipeline.fit(X_train, y_train)
def create_models_classification(X_train, y_train):
svm_model = create_sklearn_svm_classifier(X_train, y_train)
log_reg_model = create_sklearn_logistic_regressor(X_train, y_train)
xgboost_model = create_xgboost_classifier(X_train, y_train)
lgbm_model = create_lightgbm_classifier(X_train, y_train)
rf_model = create_sklearn_random_forest_classifier(X_train, y_train)
return [svm_model, log_reg_model, xgboost_model, lgbm_model, rf_model]
def create_models_regression(X_train, y_train):
rf_model = create_sklearn_random_forest_regressor(X_train, y_train)
return [rf_model]
|
py
|
1a5d4f5c9efcb2d75a5efe6d38da2c43de9e7d13
|
'''
Descripttion:
version:
Author: LiQiang
Date: 2021-01-21 11:45:22
LastEditTime: 2021-01-21 13:05:07
'''
import argparse
import cv2
import torch
import os
import os.path as osp
import glob
from mmdet.apis import inference_detector, init_detector
from mmcv.utils import mkdir_or_exist
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--video_in_dir', type=str, default='',help='test video path')
parser.add_argument(
'--video_out_dir', type=str, default='', help='output video path')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--show', type=bool, default=False, help='bbox score threshold')
args = parser.parse_args()
return args
def list_files(path, ends):
files = []
list_dir = os.walk(path)
for maindir, subdir, all_file in list_dir:
for filename in all_file:
apath = os.path.join(maindir, filename)
if apath.endswith(ends):
files.append(apath)
return files
def detectvideo(model, video_in, video_out, args):
cap = cv2.VideoCapture(video_in)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#获取视频帧率
#设置写入视频的编码格式
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
fps_video = cap.get(cv2.CAP_PROP_FPS)
####重要
videoWriter = cv2.VideoWriter(video_out, fourcc, fps_video, (frame_width, frame_height))
count=0
print('Press "Esc", "q" or "Q" to exit.')
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
pbar = tqdm(range(length))
for i in pbar:
torch.cuda.empty_cache()
ret_val, img = cap.read()
if ret_val:
if count<0:
count+=1
print('Write {} in result Successfully!'.format(count))
continue
#############################
result = inference_detector(model, img)
# ch = cv2.waitKey(1)
# if ch == 27 or ch == ord('q') or ch == ord('Q'):
# break
frame=model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=False,thickness=1)
if args.show:
cv2.imshow('frame',frame)
cv2.waitKey(1)
if len(frame)>=1 or frame:
#写入视频
videoWriter.write(frame)
count+=1
if count == 1000:
break
#############################
"""
# if count%24==0: #快些看到效果
# result = inference_detector(model, img)
# ch = cv2.waitKey(1)
# if ch == 27 or ch == ord('q') or ch == ord('Q'):
# break
# frame=model.show_result(
# img, result, score_thr=args.score_thr, wait_time=1, show=False,thickness=1,font_scale=1)
# cv2.imshow('frame',frame)
# if len(frame)>=1 or frame:
# #写入视频
# videoWriter.write(frame)
# count+=1
# print('Write {} in result Successfully!'.format(count))
# else:
# count+=1
"""
else:
print('fail!!')
break
pbar.set_description("Processing video %s, frame : %d" % (video_in.replace(args.video_in_dir, ''), i))
cap.release()
videoWriter.release()
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
input_videos = list_files(args.video_in_dir, '.mp4')
print(input_videos)
for video in input_videos:
video_out = video.replace(args.video_in_dir, args.video_out_dir)
dir_name = osp.abspath(osp.dirname(video_out))
mkdir_or_exist(dir_name)
if 'RGB' in video:
continue
detectvideo(model, video, video_out, args)
if __name__ == '__main__':
main()
|
py
|
1a5d4fc15a22223a5c90aca1533e340aa25a221d
|
import numpy as np
from collections import OrderedDict
from concurrent import futures as futures
from os import path as osp
from pathlib import Path
from skimage import io
from pdb import set_trace
def get_image_index_str(img_idx, use_prefix_id=False):
if use_prefix_id:
return '{:07d}'.format(img_idx)
else:
return '{:06d}'.format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
img_idx_str = get_image_index_str(idx, use_prefix_id)
img_idx_str += file_tail
prefix = Path(prefix)
if training:
file_path = Path('training') / info_type / img_idx_str
else:
file_path = Path('testing') / info_type / img_idx_str
if exist_check and not (prefix / file_path).exists():
raise ValueError('file not exist: {}'.format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
info_type='image_2',
use_prefix_id=False):
exist_check=False
return get_kitti_info_path(idx, prefix, info_type, '.png', training,
relative_path, exist_check, use_prefix_id)
def get_label_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
info_type='label_2',
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, info_type, '.txt', training,
relative_path, exist_check, use_prefix_id)
def get_velodyne_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path, exist_check, use_prefix_id)
def get_calib_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path, exist_check, use_prefix_id)
def get_pose_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'pose', '.txt', training,
relative_path, exist_check, use_prefix_id)
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
num_objects = len([x[0] for x in content if x[0] != 'DontCare'])
annotations['name'] = np.array([x[0] for x in content])
num_gt = len(annotations['name'])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array([[float(info) for info in x[4:8]]
for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array([[float(info) for info in x[8:11]]
for x in content
]).reshape(-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array([[float(info) for info in x[11:14]]
for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array([float(x[14])
for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros((annotations['bbox'].shape[0], ))
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
annotations['group_ids'] = np.arange(num_gt, dtype=np.int32)
return annotations
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
"""
KITTI annotation format version 2:
{
[optional]points: [N, 3+] point cloud
[optional, for kitti]image: {
image_idx: ...
image_path: ...
image_shape: ...
}
point_cloud: {
num_features: 4
velodyne_path: ...
}
[optional, for kitti]calib: {
R0_rect: ...
Tr_velo_to_cam: ...
P2: ...
}
annos: {
location: [num_gt, 3] array
dimensions: [num_gt, 3] array
rotation_y: [num_gt] angle array
name: [num_gt] ground truth name array
[optional]difficulty: kitti difficulty
[optional]group_ids: used for multi-part object
}
}
"""
root_path = Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
info = {}
pc_info = {'num_features': 4}
calib_info = {}
image_info = {'image_idx': idx}
annotations = None
if velodyne:
pc_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['image_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['image_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['image_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
info['image'] = image_info
info['point_cloud'] = pc_info
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]
]).reshape([3, 4])
P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]
]).reshape([3, 4])
P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]
]).reshape([3, 4])
P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
calib_info['P0'] = P0
calib_info['P1'] = P1
calib_info['P2'] = P2
calib_info['P3'] = P3
calib_info['R0_rect'] = rect_4x4
calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam
calib_info['Tr_imu_to_velo'] = Tr_imu_to_velo
info['calib'] = calib_info
if annotations is not None:
info['annos'] = annotations
add_difficulty_to_annos(info)
return info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def get_waymo_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
pose=False,
image_ids=7481,
extend_matrix=True,
num_worker=1,
relative_path=True,
with_imageshape=True,
max_sweeps=5):
"""
Waymo annotation format version like KITTI:
{
[optional]points: [N, 3+] point cloud
[optional, for kitti]image: {
image_idx: ...
image_path: ...
image_shape: ...
}
point_cloud: {
num_features: 6
velodyne_path: ...
}
[optional, for kitti]calib: {
R0_rect: ...
Tr_velo_to_cam0: ...
P0: ...
}
annos: {
location: [num_gt, 3] array
dimensions: [num_gt, 3] array
rotation_y: [num_gt] angle array
name: [num_gt] ground truth name array
[optional]difficulty: kitti difficulty
[optional]group_ids: used for multi-part object
}
}
"""
root_path = Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
print(f"Sample idx:{idx}")
info = {}
pc_info = {'num_features': 6}
calib_info = {}
image_info = {'image_idx': idx}
annotations = None
if velodyne:
pc_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path, use_prefix_id=True)
points = np.fromfile(
Path(path) / pc_info['velodyne_path'], dtype=np.float32)
points = np.copy(points).reshape(-1, pc_info['num_features'])
info['timestamp'] = np.int64(points[0, -1])
# values of the last dim are all the timestamp
image_info['image_path'] = get_image_path(
idx,
path,
training,
relative_path,
info_type='image_0',
use_prefix_id=True)
if with_imageshape:
image_info['image_shape'] = np.array([1,1], dtype=np.int32)
#img_path = image_info['image_path']
#if relative_path:
# img_path = str(root_path / img_path)
#image_info['image_shape'] = np.array(
# io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(
idx,
path,
training,
relative_path,
info_type='label_all',
use_prefix_id=True)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
info['image'] = image_info
info['point_cloud'] = pc_info
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False, use_prefix_id=True)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]
]).reshape([3, 4])
P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]
]).reshape([3, 4])
P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]
]).reshape([3, 4])
P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]
]).reshape([3, 4])
P4 = np.array([float(info) for info in lines[4].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
P4 = _extend_matrix(P4)
R0_rect = np.array([
float(info) for info in lines[5].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
Tr_velo_to_cam = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
calib_info['P0'] = P0
calib_info['P1'] = P1
calib_info['P2'] = P2
calib_info['P3'] = P3
calib_info['P4'] = P4
calib_info['R0_rect'] = rect_4x4
calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam
info['calib'] = calib_info
if pose:
pose_path = get_pose_path(
idx, path, training, relative_path=False, use_prefix_id=True)
info['pose'] = np.loadtxt(pose_path)
if annotations is not None:
info['annos'] = annotations
info['annos']['camera_id'] = info['annos'].pop('score')
add_difficulty_to_annos(info)
sweeps = []
prev_idx = idx
while len(sweeps) < max_sweeps:
prev_info = {}
prev_idx -= 1
prev_info['velodyne_path'] = get_velodyne_path(
prev_idx,
path,
training,
relative_path,
exist_check=False,
use_prefix_id=True)
if_prev_exists = osp.exists(
Path(path) / prev_info['velodyne_path'])
if if_prev_exists:
prev_points = np.fromfile(
Path(path) / prev_info['velodyne_path'], dtype=np.float32)
prev_points = np.copy(prev_points).reshape(
-1, pc_info['num_features'])
prev_info['timestamp'] = np.int64(prev_points[0, -1])
prev_pose_path = get_pose_path(
prev_idx,
path,
training,
relative_path=False,
use_prefix_id=True)
prev_info['pose'] = np.loadtxt(prev_pose_path)
sweeps.append(prev_info)
else:
break
info['sweeps'] = sweeps
return info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def kitti_anno_to_label_file(annos, folder):
folder = Path(folder)
for anno in annos:
image_idx = anno['metadata']['image_idx']
label_lines = []
for j in range(anno['bbox'].shape[0]):
label_dict = {
'name': anno['name'][j],
'alpha': anno['alpha'][j],
'bbox': anno['bbox'][j],
'location': anno['location'][j],
'dimensions': anno['dimensions'][j],
'rotation_y': anno['rotation_y'][j],
'score': anno['score'][j],
}
label_line = kitti_result_line(label_dict)
label_lines.append(label_line)
label_file = folder / f'{get_image_index_str(image_idx)}.txt'
label_str = '\n'.join(label_lines)
with open(label_file, 'w') as f:
f.write(label_str)
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for evaluation
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for evaluation
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos['difficulty'] = np.array(diff, np.int32)
return diff
def kitti_result_line(result_dict, precision=4):
prec_float = '{' + ':.{}f'.format(precision) + '}'
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', 0.0),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError('you must specify a value for {}'.format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError('unknown key. supported key:{}'.format(
res_dict.keys()))
return ' '.join(res_line)
|
py
|
1a5d504f1641375b7cb243d7e38677dc3161e251
|
import os
import queue
import sys
from time import time
import sounddevice as sd
import soundfile as sf
from PySide2.QtCore import QObject, Signal
from ..models.ActualProjectModel import ActualProjectModel as actual_project
from . import file as fileutils
class MicWorker(QObject):
update_volume = Signal(object)
finished = Signal(bool)
@staticmethod
def log(msg):
print(f'[MicWorker] {msg}')
def config_mic(self, input_device, buffer=0):
"""[summary]
Args:
input_device ([type]): index of the actual input mic.
fs (int, optional): Sampling frequency 44.1 kHz.
buffer (int, optional): Defaults to 0 => automatic blocksize
"""
self.io = (input_device, sd.default.device[1])
self.fs = int(sd.query_devices()[input_device]['default_samplerate'])
self.buffer = buffer
self.q = queue.Queue()
self._running = False
self._rec = False
def rec(self):
self.log("Start recording!")
self.start_time = time()
self._rec = True
def stop_rec(self):
self.log("Stopping rec...")
self._rec = False
self._running = False
def stop(self):
self.error = True
self._running = False # to raise the Exception
def run(self):
self.error = False
self.log("Running!")
self._running = True
self.stream = sd.Stream(device=self.io,
channels=2,
samplerate=self.fs,
blocksize=self.buffer,
callback=self.callback)
path = os.path.join(actual_project.project_location, 'Audio Files')
fileutils.mkdir(path)
self.file_stream = sf.SoundFile(os.path.join(path, 'audio.wav'),
mode='w',
samplerate=self.fs,
channels=2)
try:
with self.file_stream as file:
with self.stream:
while self._running:
if self._rec:
file.write(self.q.get())
else:
self.q.get()
if not self._running:
raise KeyboardInterrupt("Recording stopped!")
except KeyboardInterrupt as e:
self.log(e)
except Exception as e:
self.log("Unexpected Exception:", e)
if not self.error:
elapsed = time()-self.start_time
print(f' -> Time spent recording: {round(elapsed,2)}s')
print(f' -> fs = {self.fs}')
print(
f' -> Theoretical num of samples => {round(elapsed*self.fs)}')
self.finished.emit(self.error)
def callback(self, indata, outdata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
# outdata[:] = indata
# self.update_volume.emit(indata.copy())
# print(indata)
self.q.put(indata.copy())
|
py
|
1a5d510299d7d6eb600bbc91ae74e8adf36eee17
|
import cv2
# Chapter 1 - Reading in Images, Videos, and WebCam
if __name__ == '__main__':
"""
print("Package imported")
# reading in an image file
img = cv2.imread('static/mugshot.png')
# showing an image
cv2.imshow("Output", img)
# delaying the amount of time the image stays
cv2.waitKey(7000)
"""
"""
# reading in a video file
cap = cv2.VideoCapture('static/forest.mov')
# display the video
while True:
# read in one image frame from thr video at a time
sucess, img = cap.read()
cv2.imshow("Video", img)
# event-driven loop early exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
"""
# use the webcam for video
cam = cv2.VideoCapture(0)
# set the dimensions of the video to take
cam.set(3, 640) # 3 is for the width
cam.set(4, 480) # 4 is for the height
cam.set(10, 100) # 10 is for brightness settings
# display the video from the camera - same as before
while True:
# read in one image frame from thr video at a time
sucess, img = cam.read()
cv2.imshow("Video", img)
# event-driven loop early exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
py
|
1a5d51765b88572823f87c47e72b7aab6a6b6326
|
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...data_asset.util import parse_result_format
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import (
ColumnMapExpectation,
Expectation,
InvalidExpectationConfigurationError,
TableExpectation,
_format_map_output,
)
from ..registry import extract_metrics
class ExpectTableRowCountToEqual(TableExpectation):
"""Expect the number of rows to equal a value.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.validator.validator.Validator.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of rows.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
metric_dependencies = ("table.row_count",)
success_keys = ("value",)
default_kwarg_values = {
"value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
value = configuration.kwargs.get("value")
try:
assert value is not None, "An expected row count must be provided"
if not isinstance(value, (int, dict)):
raise ValueError("Provided row count must be an integer")
if isinstance(value, dict):
assert (
"$PARAMETER" in value
), 'Evaluation Parameter dict for value kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["value", "row_condition", "condition_parser"],
)
template_str = "Must have exactly $value rows."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = (
conditional_template_str
+ ", then "
+ template_str[0].lower()
+ template_str[1:]
)
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
expected_table_row_count = self.get_success_kwargs().get("value")
actual_table_row_count = metrics.get("table.row_count")
return {
"success": actual_table_row_count == expected_table_row_count,
"result": {"observed_value": actual_table_row_count},
}
|
py
|
1a5d527c3c9ca41b34ca6ca11656cec828b085ea
|
"""novel_site URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from novels import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='home'),
path('novel_', include('novels.urls')),
# path('base/', views.base),
]
|
py
|
1a5d52c9681fc6f0823d97253a27e58edd6d7fa0
|
"""
Miscellaneous Helpers for NetworkX.
These are not imported into the base networkx namespace but
can be accessed, for example, as
>>> import networkx
>>> networkx.utils.make_list_of_ints({1, 2, 3})
[1, 2, 3]
>>> networkx.utils.arbitrary_element({5, 1, 7}) # doctest: +SKIP
1
"""
from collections import defaultdict, deque
from collections.abc import Iterable, Iterator, Sized
import warnings
import sys
import uuid
from itertools import tee, chain
import networkx as nx
np = nx.lazy_import("numpy")
__all__ = [
"is_string_like",
"iterable",
"empty_generator",
"flatten",
"make_list_of_ints",
"is_list_of_ints",
"make_str",
"generate_unique_node",
"default_opener",
"dict_to_numpy_array",
"dict_to_numpy_array1",
"dict_to_numpy_array2",
"is_iterator",
"arbitrary_element",
"consume",
"pairwise",
"groups",
"to_tuple",
"create_random_state",
"create_py_random_state",
"PythonRandomInterface",
"nodes_equal",
"edges_equal",
"graphs_equal",
]
# some cookbook stuff
# used in deciding whether something is a bunch of nodes, edges, etc.
# see G.add_nodes and others in Graph Class in networkx/base.py
def is_string_like(obj): # from John Hunter, types-free version
"""Check if obj is string.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = (
"is_string_like is deprecated and will be removed in 3.0."
"Use isinstance(obj, str) instead."
)
warnings.warn(msg, DeprecationWarning)
return isinstance(obj, str)
def iterable(obj):
"""Return True if obj is iterable with a well-defined len().
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = (
"iterable is deprecated and will be removed in 3.0."
"Use isinstance(obj, (collections.abc.Iterable, collections.abc.Sized)) instead."
)
warnings.warn(msg, DeprecationWarning)
if hasattr(obj, "__iter__"):
return True
try:
len(obj)
except:
return False
return True
def empty_generator():
"""Return a generator with no members.
.. deprecated:: 2.6
"""
warnings.warn(
"empty_generator is deprecated and will be removed in v3.0.", DeprecationWarning
)
return (i for i in ())
def flatten(obj, result=None):
"""Return flattened version of (possibly nested) iterable object."""
if not isinstance(obj, (Iterable, Sized)) or isinstance(obj, str):
return obj
if result is None:
result = []
for item in obj:
if not isinstance(item, (Iterable, Sized)) or isinstance(item, str):
result.append(item)
else:
flatten(item, result)
return tuple(result)
def make_list_of_ints(sequence):
"""Return list of ints from sequence of integral numbers.
All elements of the sequence must satisfy int(element) == element
or a ValueError is raised. Sequence is iterated through once.
If sequence is a list, the non-int values are replaced with ints.
So, no new list is created
"""
if not isinstance(sequence, list):
result = []
for i in sequence:
errmsg = f"sequence is not all integers: {i}"
try:
ii = int(i)
except ValueError:
raise nx.NetworkXError(errmsg) from None
if ii != i:
raise nx.NetworkXError(errmsg)
result.append(ii)
return result
# original sequence is a list... in-place conversion to ints
for indx, i in enumerate(sequence):
errmsg = f"sequence is not all integers: {i}"
if isinstance(i, int):
continue
try:
ii = int(i)
except ValueError:
raise nx.NetworkXError(errmsg) from None
if ii != i:
raise nx.NetworkXError(errmsg)
sequence[indx] = ii
return sequence
def is_list_of_ints(intlist):
"""Return True if list is a list of ints.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = (
"is_list_of_ints is deprecated and will be removed in 3.0."
"See also: ``networkx.utils.make_list_of_ints.``"
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if not isinstance(intlist, list):
return False
for i in intlist:
if not isinstance(i, int):
return False
return True
def make_str(x):
"""Returns the string representation of t.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = "make_str is deprecated and will be removed in 3.0. Use str instead."
warnings.warn(msg, DeprecationWarning)
return str(x)
def generate_unique_node():
"""Generate a unique node label.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = "generate_unique_node is deprecated and will be removed in 3.0. Use uuid.uuid4 instead."
warnings.warn(msg, DeprecationWarning)
return str(uuid.uuid4())
def default_opener(filename):
"""Opens `filename` using system's default program.
.. deprecated:: 2.6
default_opener is deprecated and will be removed in version 3.0.
Consider an image processing library to open images, such as Pillow::
from PIL import Image
Image.open(filename).show()
Parameters
----------
filename : str
The path of the file to be opened.
"""
warnings.warn(
"default_opener is deprecated and will be removed in version 3.0. ",
DeprecationWarning,
)
from subprocess import call
cmds = {
"darwin": ["open"],
"linux": ["xdg-open"],
"linux2": ["xdg-open"],
"win32": ["cmd.exe", "/C", "start", ""],
}
cmd = cmds[sys.platform] + [filename]
call(cmd)
def dict_to_numpy_array(d, mapping=None):
"""Convert a dictionary of dictionaries to a numpy array
with optional mapping."""
try:
return dict_to_numpy_array2(d, mapping)
except (AttributeError, TypeError):
# AttributeError is when no mapping was provided and v.keys() fails.
# TypeError is when a mapping was provided and d[k1][k2] fails.
return dict_to_numpy_array1(d, mapping)
def dict_to_numpy_array2(d, mapping=None):
"""Convert a dictionary of dictionaries to a 2d numpy array
with optional mapping.
"""
if mapping is None:
s = set(d.keys())
for k, v in d.items():
s.update(v.keys())
mapping = dict(zip(s, range(len(s))))
n = len(mapping)
a = np.zeros((n, n))
for k1, i in mapping.items():
for k2, j in mapping.items():
try:
a[i, j] = d[k1][k2]
except KeyError:
pass
return a
def dict_to_numpy_array1(d, mapping=None):
"""Convert a dictionary of numbers to a 1d numpy array
with optional mapping.
"""
if mapping is None:
s = set(d.keys())
mapping = dict(zip(s, range(len(s))))
n = len(mapping)
a = np.zeros(n)
for k1, i in mapping.items():
i = mapping[k1]
a[i] = d[k1]
return a
def is_iterator(obj):
"""Returns True if and only if the given object is an iterator object.
.. deprecated:: 2.6.0
Deprecated in favor of ``isinstance(obj, collections.abc.Iterator)``
"""
msg = (
"is_iterator is deprecated and will be removed in version 3.0. "
"Use ``isinstance(obj, collections.abc.Iterator)`` instead."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
has_next_attr = hasattr(obj, "__next__") or hasattr(obj, "next")
return iter(obj) is obj and has_next_attr
def arbitrary_element(iterable):
"""Returns an arbitrary element of `iterable` without removing it.
This is most useful for "peeking" at an arbitrary element of a set,
but can be used for any list, dictionary, etc., as well.
Parameters
----------
iterable : `abc.collections.Iterable` instance
Any object that implements ``__iter__``, e.g. set, dict, list, tuple,
etc.
Returns
-------
The object that results from ``next(iter(iterable))``
Raises
------
ValueError
If `iterable` is an iterator (because the current implementation of
this function would consume an element from the iterator).
Examples
--------
Arbitrary elements from common Iterable objects:
>>> nx.utils.arbitrary_element([1, 2, 3]) # list
1
>>> nx.utils.arbitrary_element((1, 2, 3)) # tuple
1
>>> nx.utils.arbitrary_element({1, 2, 3}) # set
1
>>> d = {k: v for k, v in zip([1, 2, 3], [3, 2, 1])}
>>> nx.utils.arbitrary_element(d) # dict_keys
1
>>> nx.utils.arbitrary_element(d.values()) # dict values
3
`str` is also an Iterable:
>>> nx.utils.arbitrary_element("hello")
'h'
:exc:`ValueError` is raised if `iterable` is an iterator:
>>> iterator = iter([1, 2, 3]) # Iterator, *not* Iterable
>>> nx.utils.arbitrary_element(iterator)
Traceback (most recent call last):
...
ValueError: cannot return an arbitrary item from an iterator
Notes
-----
This function does not return a *random* element. If `iterable` is
ordered, sequential calls will return the same value::
>>> l = [1, 2, 3]
>>> nx.utils.arbitrary_element(l)
1
>>> nx.utils.arbitrary_element(l)
1
"""
if isinstance(iterable, Iterator):
raise ValueError("cannot return an arbitrary item from an iterator")
# Another possible implementation is ``for x in iterable: return x``.
return next(iter(iterable))
# Recipe from the itertools documentation.
def consume(iterator):
"""Consume the iterator entirely.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
# Feed the entire iterator into a zero-length deque.
msg = (
"consume is deprecated and will be removed in version 3.0. "
"Use ``collections.deque(iterator, maxlen=0)`` instead."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
deque(iterator, maxlen=0)
# Recipe from the itertools documentation.
def pairwise(iterable, cyclic=False):
"s -> (s0, s1), (s1, s2), (s2, s3), ..."
a, b = tee(iterable)
first = next(b, None)
if cyclic is True:
return zip(a, chain(b, (first,)))
return zip(a, b)
def groups(many_to_one):
"""Converts a many-to-one mapping into a one-to-many mapping.
`many_to_one` must be a dictionary whose keys and values are all
:term:`hashable`.
The return value is a dictionary mapping values from `many_to_one`
to sets of keys from `many_to_one` that have that value.
Examples
--------
>>> from networkx.utils import groups
>>> many_to_one = {"a": 1, "b": 1, "c": 2, "d": 3, "e": 3}
>>> groups(many_to_one) # doctest: +SKIP
{1: {'a', 'b'}, 2: {'c'}, 3: {'e', 'd'}}
"""
one_to_many = defaultdict(set)
for v, k in many_to_one.items():
one_to_many[k].add(v)
return dict(one_to_many)
def to_tuple(x):
"""Converts lists to tuples.
Examples
--------
>>> from networkx.utils import to_tuple
>>> a_list = [1, 2, [1, 4]]
>>> to_tuple(a_list)
(1, 2, (1, 4))
"""
if not isinstance(x, (tuple, list)):
return x
return tuple(map(to_tuple, x))
def create_random_state(random_state=None):
"""Returns a numpy.random.RandomState or numpy.random.Generator instance
depending on input.
Parameters
----------
random_state : int or NumPy RandomState or Generator instance, optional (default=None)
If int, return a numpy.random.RandomState instance set with seed=int.
if `numpy.random.RandomState` instance, return it.
if `numpy.random.Generator` instance, return it.
if None or numpy.random, return the global random number generator used
by numpy.random.
"""
if random_state is None or random_state is np.random:
return np.random.mtrand._rand
if isinstance(random_state, np.random.RandomState):
return random_state
if isinstance(random_state, int):
return np.random.RandomState(random_state)
if isinstance(random_state, np.random.Generator):
return random_state
msg = (
f"{random_state} cannot be used to create a numpy.random.RandomState or\n"
"numpy.random.Generator instance"
)
raise ValueError(msg)
class PythonRandomInterface:
def __init__(self, rng=None):
try:
import numpy as np
except ImportError:
msg = "numpy not found, only random.random available."
warnings.warn(msg, ImportWarning)
if rng is None:
self._rng = np.random.mtrand._rand
else:
self._rng = rng
def random(self):
return self._rng.random()
def uniform(self, a, b):
return a + (b - a) * self._rng.random()
def randrange(self, a, b=None):
if isinstance(self._rng, np.random.Generator):
return self._rng.integers(a, b)
return self._rng.randint(a, b)
# NOTE: the numpy implementations of `choice` don't support strings, so
# this cannot be replaced with self._rng.choice
def choice(self, seq):
if isinstance(self._rng, np.random.Generator):
idx = self._rng.integers(0, len(seq))
else:
idx = self._rng.randint(0, len(seq))
return seq[idx]
def gauss(self, mu, sigma):
return self._rng.normal(mu, sigma)
def shuffle(self, seq):
return self._rng.shuffle(seq)
# Some methods don't match API for numpy RandomState.
# Commented out versions are not used by NetworkX
def sample(self, seq, k):
return self._rng.choice(list(seq), size=(k,), replace=False)
def randint(self, a, b):
if isinstance(self._rng, np.random.Generator):
return self._rng.integers(a, b + 1)
return self._rng.randint(a, b + 1)
# exponential as expovariate with 1/argument,
def expovariate(self, scale):
return self._rng.exponential(1 / scale)
# pareto as paretovariate with 1/argument,
def paretovariate(self, shape):
return self._rng.pareto(shape)
# weibull as weibullvariate multiplied by beta,
# def weibullvariate(self, alpha, beta):
# return self._rng.weibull(alpha) * beta
#
# def triangular(self, low, high, mode):
# return self._rng.triangular(low, mode, high)
#
# def choices(self, seq, weights=None, cum_weights=None, k=1):
# return self._rng.choice(seq
def create_py_random_state(random_state=None):
"""Returns a random.Random instance depending on input.
Parameters
----------
random_state : int or random number generator or None (default=None)
If int, return a random.Random instance set with seed=int.
if random.Random instance, return it.
if None or the `random` package, return the global random number
generator used by `random`.
if np.random package, return the global numpy random number
generator wrapped in a PythonRandomInterface class.
if np.random.RandomState instance, return it wrapped in
PythonRandomInterface
if a PythonRandomInterface instance, return it
"""
import random
try:
import numpy as np
if random_state is np.random:
return PythonRandomInterface(np.random.mtrand._rand)
if isinstance(random_state, np.random.RandomState):
return PythonRandomInterface(random_state)
if isinstance(random_state, PythonRandomInterface):
return random_state
except ImportError:
pass
if random_state is None or random_state is random:
return random._inst
if isinstance(random_state, random.Random):
return random_state
if isinstance(random_state, int):
return random.Random(random_state)
msg = f"{random_state} cannot be used to generate a random.Random instance"
raise ValueError(msg)
def nodes_equal(nodes1, nodes2):
"""Check if nodes are equal.
Equality here means equal as Python objects.
Node data must match if included.
The order of nodes is not relevant.
Parameters
----------
nodes1, nodes2 : iterables of nodes, or (node, datadict) tuples
Returns
-------
bool
True if nodes are equal, False otherwise.
"""
nlist1 = list(nodes1)
nlist2 = list(nodes2)
try:
d1 = dict(nlist1)
d2 = dict(nlist2)
except (ValueError, TypeError):
d1 = dict.fromkeys(nlist1)
d2 = dict.fromkeys(nlist2)
return d1 == d2
def edges_equal(edges1, edges2):
"""Check if edges are equal.
Equality here means equal as Python objects.
Edge data must match if included.
The order of the edges is not relevant.
Parameters
----------
edges1, edges2 : iterables of with u, v nodes as
edge tuples (u, v), or
edge tuples with data dicts (u, v, d), or
edge tuples with keys and data dicts (u, v, k, d)
Returns
-------
bool
True if edges are equal, False otherwise.
"""
from collections import defaultdict
d1 = defaultdict(dict)
d2 = defaultdict(dict)
c1 = 0
for c1, e in enumerate(edges1):
u, v = e[0], e[1]
data = [e[2:]]
if v in d1[u]:
data = d1[u][v] + data
d1[u][v] = data
d1[v][u] = data
c2 = 0
for c2, e in enumerate(edges2):
u, v = e[0], e[1]
data = [e[2:]]
if v in d2[u]:
data = d2[u][v] + data
d2[u][v] = data
d2[v][u] = data
if c1 != c2:
return False
# can check one direction because lengths are the same.
for n, nbrdict in d1.items():
for nbr, datalist in nbrdict.items():
if n not in d2:
return False
if nbr not in d2[n]:
return False
d2datalist = d2[n][nbr]
for data in datalist:
if datalist.count(data) != d2datalist.count(data):
return False
return True
def graphs_equal(graph1, graph2):
"""Check if graphs are equal.
Equality here means equal as Python objects (not isomorphism).
Node, edge and graph data must match.
Parameters
----------
graph1, graph2 : graph
Returns
-------
bool
True if graphs are equal, False otherwise.
"""
return (
graph1.adj == graph2.adj
and graph1.nodes == graph2.nodes
and graph1.graph == graph2.graph
)
|
py
|
1a5d531e4414d71d39a2d2535091a98e613c8b29
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
"""Userbot module for managing events. One of the main components of the userbot."""
import inspect
import re
import sys
from asyncio import create_subprocess_shell as asyncsubshell
from asyncio import subprocess as asyncsub
from pathlib import Path
from time import gmtime, strftime
from traceback import format_exc
from telethon import events
from AyiinXd import CMD_HANDLER, CMD_LIST, DEFAULT, DEVS, AYIIN2, AYIIN3, AYIIN4, AYIIN5, AYIIN6, AYIIN7, AYIIN8, AYIIN9, AYIIN10, bot
def ayiin_cmd(pattern=None, command=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith(r"\#"):
args["pattern"] = re.compile(pattern)
elif pattern.startswith(r"^"):
args["pattern"] = re.compile(pattern)
cmd = pattern.replace("$", "").replace("^", "").replace("\\", "")
try:
CMD_LIST[file_test].append(cmd)
except BaseException:
CMD_LIST.update({file_test: [cmd]})
else:
if len(CMD_HANDLER) == 2:
catreg = "^" + CMD_HANDLER
reg = CMD_HANDLER[1]
elif len(CMD_HANDLER) == 1:
catreg = "^\\" + CMD_HANDLER
reg = CMD_HANDLER
args["pattern"] = re.compile(catreg + pattern)
if command is not None:
cmd = reg + command
else:
cmd = (
(reg +
pattern).replace(
"$",
"").replace(
"\\",
"").replace(
"^",
""))
try:
CMD_LIST[file_test].append(cmd)
except BaseException:
CMD_LIST.update({file_test: [cmd]})
if "allow_edited_updates" in args and args["allow_edited_updates"]:
del args["allow_edited_updates"]
return events.NewMessage(**args)
def command(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern")
allow_edited_updates = args.get("allow_edited_updates", False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith("(?i)"):
args["pattern"] = "(?i)" + pattern
except BaseException:
pass
reg = re.compile("(.*)")
if pattern is not None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace(
"$",
"").replace(
"\\",
"").replace(
"^",
"")
except BaseException:
pass
try:
CMD_LIST[file_test].append(cmd)
except BaseException:
CMD_LIST.update({file_test: [cmd]})
except BaseException:
pass
def decorator(func):
async def wrapper(check):
if check.edit_date and check.is_channel and not check.is_group:
return
if not trigger_on_fwd and check.fwd_from:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
if allow_edited_updates:
bot.add_event_handler(func, events.MessageEdited(**args))
bot.add_event_handler(func, events.NewMessage(**args))
return decorator
def register(**args):
"""Register a new event."""
pattern = args.get("pattern")
disable_edited = args.get("disable_edited", False)
ignore_unsafe = args.get("ignore_unsafe", False)
unsafe_pattern = r"^[^/!#@\$A-Za-z]"
groups_only = args.get("groups_only", False)
trigger_on_fwd = args.get("trigger_on_fwd", False)
disable_errors = args.get("disable_errors", False)
insecure = args.get("insecure", False)
args.get("sudo", False)
args.get("own", False)
if pattern is not None and not pattern.startswith("(?i)"):
args["pattern"] = "(?i)" + pattern
if "disable_edited" in args:
del args["disable_edited"]
if "sudo" in args:
del args["sudo"]
args["incoming"] = True
args["from_users"] = DEVS
if "ignore_unsafe" in args:
del args["ignore_unsafe"]
if "groups_only" in args:
del args["groups_only"]
if "disable_errors" in args:
del args["disable_errors"]
if "trigger_on_fwd" in args:
del args["trigger_on_fwd"]
if "own" in args:
del args["own"]
args["incoming"] = True
args["from_users"] = DEFAULT
if "insecure" in args:
del args["insecure"]
if pattern and not ignore_unsafe:
args["pattern"] = pattern.replace("^.", unsafe_pattern, 1)
def decorator(func):
async def wrapper(check):
if check.edit_date and check.is_channel and not check.is_group:
# Messages sent in channels can be edited by other users.
# Ignore edits that take place in channels.
return
if not trigger_on_fwd and check.fwd_from:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
if check.via_bot_id and not insecure and check.out:
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
# Check if we have to disable it.
# If not silence the log spam on the console,
# with a dumb except.
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**✘ AYIIN-USERBOT ERROR REPORT ✘**\n\n"
link = "[Group Support](https://t.me/AyiinXdSupport)"
text += "Jika mau, Anda bisa melaporkan error ini, "
text += f"Cukup forward saja pesan ini ke {link}.\n\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nFile ini HANYA diupload di sini,"
ftext += "\nkami hanya mencatat fakta error dan tanggal,"
ftext += "\nkami menghormati privasi Anda."
ftext += "\nJika mau, Anda bisa melaporkan error ini,"
ftext += "\ncukup forward saja pesan ini ke @AyiinXdSupport"
ftext += "\n================================\n\n"
ftext += "--------BEGIN USERBOT TRACEBACK LOG--------\n"
ftext += "\nTanggal : " + date
ftext += "\nChat ID : " + str(check.chat_id)
ftext += "\nUser ID : " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END USERBOT TRACEBACK LOG--------"
command = 'git log --pretty=format:"%an: %s" -10'
ftext += "\n\n\n10 commits Terakhir:\n"
process = await asyncsubshell(
command, stdout=asyncsub.PIPE, stderr=asyncsub.PIPE
)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) + \
str(stderr.decode().strip())
ftext += result
with open("error.log", "w+") as file:
file.write(ftext)
if bot:
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN2:
if not disable_edited:
AYIIN2.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN2.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN3:
if not disable_edited:
AYIIN3.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN3.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN4:
if not disable_edited:
AYIIN4.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN4.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN5:
if not disable_edited:
AYIIN5.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN5.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN6:
if not disable_edited:
AYIIN6.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN6.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN7:
if not disable_edited:
AYIIN7.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN7.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN8:
if not disable_edited:
AYIIN8.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN8.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN9:
if not disable_edited:
AYIIN9.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN9.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN10:
if not disable_edited:
AYIIN10.add_event_handler(
wrapper, events.MessageEdited(**args))
AYIIN10.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
|
py
|
1a5d53ed215bd69a052e3ecb7000b478bf180404
|
metric_dimension = {
"all_post": {
"fields": [
"id",
"link",
"message",
"created_time"
]
},
}
|
py
|
1a5d540c65c3fbe5e738bf90c223f3c45b6b0623
|
""" 1) Stock_TSA - Approach: TSA models a yearly growth rate, combined with probabilistic model."""
# Disclaimer: There have been several attempts to predict financial markets and stock prices using time series analysis. Many of them were not successful!
# Neither trading nor investment decisions should be influenced by this repository and the code, which is built only to introduce and demonstrate a methodology for time series modeling.
# No responsibility is taken for correctness or completeness of historic, current or future data, models and / or predictions!
#-----------------------------------------------------------------------------------------------------------------------------------
__author__ = "Christian Simonis"
__copyright__ = "Copyright 2021"
__version__ = "1.1"
__maintainer__ = "Christian Simonis"
__email__ = "[email protected]"
__status__ = "work in progress"
# Approach: TSA models a yearly return rate, combined with a probabilistic model.
# While the general growth rate of the stock or index is described in a domain model,
# especially non-efficient artifacts are modeled in a probabilistic way,
# including these parts that the domain model is not capable of describing.
# Assumptions rely on the course tinancial markets by Robert Shiller.
# Information links (no promotion), see sources:
# https://www.coursera.org/learn/financial-markets-global (no promotion)
# and https://en.wikipedia.org/wiki/Brownian_model_of_financial_markets
#-----------------------------------------------------------------------------------------------------------------------------------
# Name Version License
# FinQuant 0.2.2 MIT License, Copyright (C) 2019 Frank Milthaler:https://github.com/fmilthaler/FinQuant/blob/master/LICENSE.txt
# numpy 1.19.5 BSD, Copyright (c) 2005-2020, NumPy Developers: https://numpy.org/doc/stable/license.html#:~:text=Copyright%20(c)%202005%2D2020%2C%20NumPy%20Developers.&text=THIS%20SOFTWARE%20IS%20PROVIDED%20BY,A%20PARTICULAR%20PURPOSE%20ARE%20DISCLAIMED.
# yfinance 0.1.59 Apache License, Version 2.0, Copyright (c) January 2004, Ran Aroussi: https://github.com/ranaroussi/yfinance
# matplotlib 3.4.2 Python Software Foundation License, Copyright (c) 2002 - 2012 John Hunter, Darren Dale, Eric Firing, Michael Droettboom and the Matplotlib development team; 2012 - 2021 The Matplotlib development team: https://matplotlib.org/stable/users/license.html
# scikit-learn 0.23.1 BSD 3-Clause License, Copyright (c) 2007-2021 The scikit-learn developers: https://github.com/scikit-learn/scikit-learn/blob/main/COPYING
# pandas 1.2.4 BSD 3-Clause License Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team: https://github.com/pandas-dev/pandas/blob/master/LICENSE
# seaborn 0.11.1 BSD 3-Clause "New" or "Revised" License, Copyright (c) 2012-2021, Michael L. Waskom: https://github.com/mwaskom/seaborn/blob/master/LICENSE
# scipy 1.5.2 BSD 3-Clause "New" or "Revised" License, Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers: https://github.com/scipy/scipy/blob/master/LICENSE.txt
# neuralprophet 0.2.7 MIT License, Copyright (c) 2020 Oskar Triebe: https://github.com/ourownstory/neural_prophet/blob/master/LICENSE
#-----------------------------------------------------------------------------------------------------------------------------------
import numpy as np
from finquant.portfolio import build_portfolio, EfficientFrontier
import yfinance as yfin
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, WhiteKernel, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
import pandas as pd
import seaborn as sns
from scipy.optimize import curve_fit
import warnings # https://docs.python.org/3/library/warnings.html
import random # https://docs.python.org/3/library/random.html
import datetime as dt # https://docs.python.org/3/library/datetime.html
#-----------------------------------------------------------------------------------------------------------------------------------
# Hint: No responsibility is taken for correctness or completeness of historic, current or future data, models and / or predictions
#-----------------------------------------------------------------------------------------------------------------------------------
# Class definition TSA
#-----------------------------------------------------------------------------------------------------------------------------------
class Stock_Analysis:
"""The purpose of the class Stock_Analysis is:
- to model and predict the time series behavior
- to visualize the model results
"""
#Initialization
def __init__(self):
"""initial call"""
#Get data via YFIN API
def obtain_timeseries(self,Stock_Name, start):
#--------------------------------------------------------
""" obtain timeseries for stocks
e.g. --> obtain_timeseries("AAPL","2018-07-20")
Input:
Stock_Name: Name of stock, e.g. "AAPL"
start: Start data, from which stock data should be downloaded, e.g. "2018-07-20"
Output:
Time: Time data as numpy.ndarray
Stock: Stock data as numpy.ndarray
Time_idx: Time for user visualization: Raw time data as pandas.core.indexes.datetimes.DatetimeIndex
Class:
DF: Dataframe, consisting of time and closing price information
"""
#download with Yahoo Finance API
if hasattr(self,"end") == False:
stocks = yfin.download(Stock_Name, start) # till most recent value
else:
stocks = yfin.download(Stock_Name, start, end = self.end) # till definition
stocks.columns = stocks.columns.to_flat_index()
#Export time series of stock sequence
Stock = stocks.loc[:, "Close"].to_numpy()
Time = stocks.loc[:, "Close"].index.to_numpy().astype("float")
Time_idx = stocks.loc[:, "Close"].index
self.DF = pd.DataFrame({ 'ds': Time_idx,
'y': stocks.loc[:, "Close"]})
return Time, Stock, Time_idx
#Conduct train / test split based on target by user
def conduct_train_test_split(self,option,split_factor, Time, Stock):
#--------------------------------------------------------
""" predictics, using forecast model, consisting of a domain model and a data-driven model
e.g. --> conduct_train_test_split(1,0.3, np.array([1, 2, 3]))
Input:
option: User choice: #1 = real prediction , 2= backtest
split_factor: Train test split
Time: Time data
Stock: Stock data
Output:
Time_training: Time allocated to Training data
y_training: Labels allocated to Training data
Time_test: Time allocated to Test data
y_test: Labels allocated to Test data
"""
if option == 1: #Option 1) Real forecast
delta_T = Time[1]-Time[0]
Label_span = Time.max()-Time.min()
# Chosing Training data proportional split factor
Time_training = Time.copy()
y_training = Stock.copy()
#take most recent data as test data
Time_test = np.arange(Time.max(),Time.max() + (1-split_factor)*Label_span, delta_T)
y_test = []
else: #Option 2) Simulate real forecast (done in past)
length = len(Time)
till = int(np.round(split_factor*length))
# Chosing Training data proportional split factor
Time_training = Time[0:till]
y_training = Stock[0:till]
#take most recent data as test data
Time_test = Time[till+1:length]
y_test = Stock[till+1:length]
return Time_training, y_training, Time_test, y_test
#domain model for times series description in stock market
def func(self, x, a, c):
#--------------------------------------------------------
""" Domain model to describe exponential behavior
e.g. --> func(np.array([1, 2, 3]),7,8,9)
Input:
x: Input
a: Scaling factor, multiplied to exp-function
b: interest parameter --> can be specified by user to incorporate domain knowledge
c: Constant offset parameter
Output:
y: Ouput according to domain model
"""
#User choice, representing market knowledge
if hasattr(self,"exp_interest") == False:
b = 1.07 #interest in the long run, e.g. 1.07 = 7% interest
else:
b = self.exp_interest #otherwise, take class attribute
#Calculation of domain model
y = a * np.exp(b * x) + c
return y
#Forecasting model
def fit_ForecastMdl(self, X,Label,L,N):
#--------------------------------------------------------
""" fits forecast model to data, using a domain model, combined with a data-driven model
e.g. --> fit_ForecastMdl(np.array([[1,4]]).T,np.array([1,5]).T,2,2)
Input:
X: Feature as Input,
Label: Ground Truth as label to be learned (output)
L: Length scale: Hyperparameter of Gaussian process, kernel definition
N: restarts of optimizer: Hyperparameter of Gaussian process fitting
Output:
forecast: Forecast model regression value
sigma: Uncertainty, represented by standard deviation
y: Domain regression value
Class:
reg: Domain regression model (as part of class)
gpr: Gaussian process model (as part of class)
"""
# Domain model, e.g. via exponential approach (alternative linear model in bracket comments)
#Exp function fitting
reg, pcov = curve_fit(self.func, X[:,0], Label) #fit of domain model, Alternative: #reg = LinearRegression().fit(Time_scaled, Label)
#Exp function evaluation
y = self.func(X[:,0], *reg) #evaluation of domain model, Alternative: #y = reg.predict(Time_scaled) #linear function
#Calculation of Residuum
res = Label.copy() - y.copy() #exp function
sigma_est = np.std(res)*15 #safety margin
#Definition of Machine Learning model to learn residuum in supervised manner
kernel = 1.0 * RBF(length_scale=L, length_scale_bounds=(L*1e-1, L*1e1)) + 1e3*WhiteKernel(noise_level=1e2*sigma_est, noise_level_bounds=(1e1*sigma_est, 1e2*sigma_est)) # Alternative: #kernel = 1.0 * RationalQuadratic(length_scale=L) + WhiteKernel(noise_level=0, noise_level_bounds=(1e-8, sigma_est))
gpr = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=N,alpha = 0.5)
gpr.fit(X, res)
#Fit of Machine Learning model
GP_pred, sigma = gpr.predict(X, return_std=True)
#Combination of results
forecast = GP_pred + y
self.gpr = gpr # data-driven (probabilistic) model
self.reg = reg # domain model
return forecast, sigma, y
#Prediction function
def pred_ForecastMdl(self, X):
#--------------------------------------------------------
""" predictics, using forecast model, consisting of a domain model and a data-driven model
e.g. --> forecast, sigma, y = fit_ForecastMdl(np.array([[1,4]]).T,np.array([1,5]).T,2,2); pred_ForecastMdl(np.array([[1,1.2]]).T)
Input:
X: Feature as Input,
reg: Domain regression model
gpr: Gaussian process model
Output:
forecast_pred: Predicted forecast model regression value
sigma_pred: Predicted uncertainty, represented by standard deviation
y_pred: Predicted domain regression value
"""
#predict with domain model
y_pred = self.func(X[:,0], *self.reg) #exp function, Alternative: #y_pred = reg.predict(Time_scaled) # linear function
#predict with data-driven model
GP_pred, sigma_pred = self.gpr.predict(X, return_std=True)
#Combine predictions
forecast_pred = GP_pred + y_pred
return forecast_pred, sigma_pred, y_pred
#Visualization
def vis_results(self,Time_training,forecast,sigma,Time_test,forecast_future,sigma_future, Time,Stock, Stock_Name):
#--------------------------------------------------------
""" visualizes results of forecast model, consisting of a domain model and a data-driven model
e.g. --> runfile('RUN_Stock-Forecast.py')
Input:
Time_training: Time allocated to Training data
forecast: Forecast model regression value
sigma: Uncertainty, represented by standard deviation
Time_test: Time allocated to Test data
forecast_future: Predicted forecast model regression value
sigma_future: Predicted uncertainty, represented by standard deviation
Time: Time data as numpy.ndarray
Stock: Stock data as numpy.ndarray
Stock_Name: Name if Stock or Index
"""
#Fit & Prediction visualization of TSA (Time series analysis) approach
plt.style.use("seaborn")
plt.plot(Time_training,forecast,'b-',linewidth=3, label = 'Model Fit')
plt.fill(np.concatenate([Time_training, Time_training[::-1]]),np.concatenate([forecast - 3 * sigma,(forecast + 3 * sigma)[::-1]]),
alpha=.3, fc='y', ec='None', label='99% confidence interval for training')
plt.plot(Time_test,forecast_future,'k-.',linewidth=2, label = 'Forecast with Prediction Model')
plt.fill(np.concatenate([Time_test, Time_test[::-1]]),np.concatenate([forecast_future - 3 * sigma_future,(forecast_future + 3 * sigma_future)[::-1]]),
alpha=.2, fc='g', ec='None', label='99% confidence interval for prediction')
plt.fill(np.concatenate([Time_test, Time_test[::-1]]),np.concatenate([forecast_future - 1 * sigma_future,(forecast_future + 1 * sigma_future)[::-1]]),
alpha=.5, fc='g', ec='None', label='68% confidence interval for prediction')
plt.scatter(Time,Stock, label = Stock_Name, c="coral")
plt.xlabel('Time', fontsize=16)
plt.ylabel('Closing Price', fontsize=16)
plt.legend(loc='upper left', shadow=False, ncol=1)
return 0
#Optimize portfolio using finquant library
def optimize_pf(self, df_data, nr_mc, risk_free_rate):
#--------------------------------------------------------
""" optimizes portfolio (either historic or predictive) based on defined criteria
e.g. --> runfile('RUN_Pred-optimization.py')
Input:
df_data: Portfolio dataframe to be optimized
nr_mc: Number of samples for Monte Carlo simulation
risk_free_rate: risk free rate
Output:
opt_w: Optimized weights for asset allocation
"""
plt.style.use("seaborn-darkgrid")
# set line width
plt.rcParams["lines.linewidth"] = 2
# set font size for titles
plt.rcParams["axes.titlesize"] = 14
# set font size for labels on axes
plt.rcParams["axes.labelsize"] = 12
# set size of numbers on x-axis
plt.rcParams["xtick.labelsize"] = 10
# set size of numbers on y-axis
plt.rcParams["ytick.labelsize"] = 10
# set figure size
plt.rcParams["figure.figsize"] = (10, 6)
# building a portfolio by providing stock data
pf = build_portfolio(data=df_data)
pf.risk_free_rate = risk_free_rate # risk free rate
print(pf)
pf.properties()
# if needed, change risk free rate and frequency/time window of the portfolio
print("pf.risk_free_rate = {}".format(pf.risk_free_rate))
print("pf.freq = {}".format(pf.freq))
"""
pf.ef_minimum_volatility(verbose=True)
# optimisation for maximum Sharpe ratio
pf.ef_maximum_sharpe_ratio(verbose=True)
# minimum volatility for a given target return of 0.26
pf.ef_efficient_return(0.26, verbose=True)
"""
# optimisation for maximum Sharpe ratio
pf.ef_maximum_sharpe_ratio(verbose=True)
# Monte Carlo portfolios and Efficient Frontier solutions
opt_w, opt_res = pf.mc_optimisation(num_trials=nr_mc)
pf.mc_properties()
pf.mc_plot_results()
# visualization
pf.ef_plot_efrontier()
pf.ef.plot_optimal_portfolios()
pf.plot_stocks()
plt.show()
#provide result
self.optimized_weights = opt_w
self.optimized_weights.head()
return opt_w
#-----------------------------------------------------------------------------------------------------------------------------------
|
py
|
1a5d548920923fa71f7af60757dcef5097f7448b
|
import sys
import argparse
import csv
import sqlite3
import bz2
import gzip
from datetime import datetime
from six import string_types, text_type
if sys.version_info[0] > 2:
read_mode = 'rt'
else:
read_mode = 'rU'
def convert_header(filepath_or_fileobj, dbpath, table, headerspath_or_fileobj=None, compression=None, typespath_or_fileobj=None):
if isinstance(filepath_or_fileobj, string_types):
if compression is None:
fo = open(filepath_or_fileobj, mode=read_mode)
elif compression == 'bz2':
try:
fo = bz2.open(filepath_or_fileobj, mode=read_mode)
except AttributeError:
fo = bz2.BZ2File(filepath_or_fileobj, mode='r')
elif compression == 'gzip':
fo = gzip.open(filepath_or_fileobj, mode=read_mode)
else:
fo = filepath_or_fileobj
try:
dialect = csv.Sniffer().sniff(fo.readline())
except TypeError:
dialect = csv.Sniffer().sniff(str(fo.readline()))
fo.seek(0)
# get the headers
header_given = headerspath_or_fileobj is not None
if header_given:
if isinstance(headerspath_or_fileobj, string_types):
ho = open(headerspath_or_fileobj, mode=read_mode)
else:
ho = headerspath_or_fileobj
header_reader = csv.reader(ho, dialect)
headers = [header.strip() for header in next(header_reader)]
ho.close()
else:
reader = csv.reader(fo, dialect)
headers = [header.strip() for header in next(reader)]
print(headers)
fo.seek(0)
# get the types
if typespath_or_fileobj is not None:
if isinstance(typespath_or_fileobj, string_types):
to = open(typespath_or_fileobj, mode=read_mode)
else:
to = typespath_or_fileobj
type_reader = csv.reader(to, dialect)
types = [_type.strip() for _type in next(type_reader)]
to.close()
else:
# guess types
type_reader = csv.reader(fo, dialect)
if not header_given: next(type_reader)
types = _guess_types(type_reader, len(headers))
print(types)
fo.seek(0)
return
def createCSVSchema(filepath_or_fileobj, dbpath, table, headerspath_or_fileobj=None, compression=None, typespath_or_fileobj=None):
if isinstance(filepath_or_fileobj, string_types):
if compression is None:
fo = open(filepath_or_fileobj, mode=read_mode)
elif compression == 'bz2':
try:
fo = bz2.open(filepath_or_fileobj, mode=read_mode)
except AttributeError:
fo = bz2.BZ2File(filepath_or_fileobj, mode='r')
elif compression == 'gzip':
fo = gzip.open(filepath_or_fileobj, mode=read_mode)
else:
fo = filepath_or_fileobj
try:
dialect = csv.Sniffer().sniff(fo.readline())
except TypeError:
dialect = csv.Sniffer().sniff(str(fo.readline()))
fo.seek(0)
# get the headers
header_given = headerspath_or_fileobj is not None
if header_given:
if isinstance(headerspath_or_fileobj, string_types):
ho = open(headerspath_or_fileobj, mode=read_mode)
else:
ho = headerspath_or_fileobj
header_reader = csv.reader(ho, dialect)
headers = [header.strip() for header in next(header_reader)]
ho.close()
else:
reader = csv.reader(fo, dialect)
headers = [header.strip() for header in next(reader)]
fo.seek(0)
# get the types
if typespath_or_fileobj is not None:
if isinstance(typespath_or_fileobj, string_types):
to = open(typespath_or_fileobj, mode=read_mode)
else:
to = typespath_or_fileobj
type_reader = csv.reader(to, dialect)
types = [_type.strip() for _type in next(type_reader)]
to.close()
else:
# guess types
type_reader = csv.reader(fo, dialect)
if not header_given: next(type_reader)
types = _guess_types(type_reader, len(headers))
fo.seek(0)
# now load data
_columns = ','.join(
['"%s" %s' % (header, _type) for (header,_type) in zip(headers, types)]
)
return headers, types, _columns
def convert(filepath_or_fileobj, dbpath, table, headerspath_or_fileobj=None, compression=None, typespath_or_fileobj=None):
if isinstance(filepath_or_fileobj, string_types):
if compression is None:
fo = open(filepath_or_fileobj, mode=read_mode)
elif compression == 'bz2':
try:
fo = bz2.open(filepath_or_fileobj, mode=read_mode)
except AttributeError:
fo = bz2.BZ2File(filepath_or_fileobj, mode='r')
elif compression == 'gzip':
fo = gzip.open(filepath_or_fileobj, mode=read_mode)
else:
fo = filepath_or_fileobj
try:
dialect = csv.Sniffer().sniff(fo.readline())
except TypeError:
dialect = csv.Sniffer().sniff(str(fo.readline()))
fo.seek(0)
# get the headers
header_given = headerspath_or_fileobj is not None
if header_given:
if isinstance(headerspath_or_fileobj, string_types):
ho = open(headerspath_or_fileobj, mode=read_mode)
else:
ho = headerspath_or_fileobj
header_reader = csv.reader(ho, dialect)
headers = [header.strip() for header in next(header_reader)]
ho.close()
else:
reader = csv.reader(fo, dialect)
headers = [header.strip() for header in next(reader)]
fo.seek(0)
# get the types
if typespath_or_fileobj is not None:
if isinstance(typespath_or_fileobj, string_types):
to = open(typespath_or_fileobj, mode=read_mode)
else:
to = typespath_or_fileobj
type_reader = csv.reader(to, dialect)
types = [_type.strip() for _type in next(type_reader)]
to.close()
else:
# guess types
type_reader = csv.reader(fo, dialect)
if not header_given: next(type_reader)
types = _guess_types(type_reader, len(headers))
fo.seek(0)
# now load data
_columns = ','.join(
['"%s" %s' % (header, _type) for (header,_type) in zip(headers, types)]
)
reader = csv.reader(fo, dialect)
if not header_given: # Skip the header
next(reader)
conn = sqlite3.connect(dbpath)
# shz: fix error with non-ASCII input
conn.text_factory = str
c = conn.cursor()
try:
create_query = 'CREATE TABLE %s (%s)' % (table, _columns)
c.execute(create_query)
except:
pass
_insert_tmpl = 'INSERT INTO %s VALUES (%s)' % (table,
','.join(['?']*len(headers)))
line = 0
for row in reader:
line += 1
if len(row) == 0:
continue
# we need to take out commas from int and floats for sqlite to
# recognize them properly ...
try:
row = [
None if x == ''
else float(x.replace(',', '')) if y == 'real'
else int(x) if y == 'integer'
else x for (x,y) in zip(row, types) ]
c.execute(_insert_tmpl, row)
except ValueError as e:
print("Unable to convert value '%s' to type '%s' on line %d" % (x, y, line), file=sys.stderr)
except Exception as e:
print("Error on line %d: %s" % (line, e), file=sys.stderr)
conn.commit()
c.close()
def _guess_types(reader, number_of_columns, max_sample_size=100):
'''Guess column types (as for SQLite) of CSV.
:param fileobj: read-only file object for a CSV file.
'''
# we default to text for each field
types = ['text'] * number_of_columns
# order matters
# (order in form of type you want used in case of tie to be last)
options = [
('text', text_type),
('real', float),
('integer', int),
('date', lambda value: datetime.strptime(value, "%Y-%m-%d").date()),
('datetime', lambda value: datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f%z").date())
# 'date',
]
# for each column a set of bins for each type counting successful casts
perresult = {
'integer': 0,
'real': 0,
'text': 0,
'date':0,
'datetime':0
}
results = [ dict(perresult) for x in range(number_of_columns) ]
sample_counts = [ 0 for x in range(number_of_columns) ]
for row_index,row in enumerate(reader):
for column,cell in enumerate(row):
cell = cell.strip()
if len(cell) == 0:
continue
# replace ',' with '' to improve cast accuracy for ints and floats
if(cell.count(',') > 0):
cell = cell.replace(',', '')
if(cell.count('E') == 0):
cell = cell + "E0"
for data_type,cast in options:
try:
cast(cell)
if data_type=='integer' and len(cell)>20:
raise ValueError('too long integer to handle')
results[column][data_type] += 1
sample_counts[column] += 1
except ValueError:
pass
have_max_samples = True
for column,cell in enumerate(row):
if sample_counts[column] < max_sample_size:
have_max_samples = False
if have_max_samples:
break
for column,colresult in enumerate(results):
for _type, _ in options:
if colresult[_type] > 0 and colresult[_type] >= colresult[types[column]]:
types[column] = _type
return types
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
Convert a CSV file to a table in a SQLite database.
The database is created if it does not yet exist.
''')
parser.add_argument('csv_file', type=str, help='Input CSV file path')
parser.add_argument('sqlite_db_file', type=str, help='Output SQLite file')
parser.add_argument('table_name', type=str, nargs='?', help='Name of table to write to in SQLite file', default='data')
parser.add_argument('--headers', type=str, nargs='?', help='Headers are read from this file, if provided.', default=None)
parser.add_argument('--types', type=list, nargs='?', help='Types are read from this file, if provided.', default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument('--bz2', help='Input csv file is compressed using bzip2.', action='store_true')
group.add_argument('--gzip', help='Input csv file is compressed using gzip.', action='store_true')
args = parser.parse_args()
compression = None
if args.bz2:
compression = 'bz2'
elif args.gzip:
compression = 'gzip'
#convert(args.csv_file, args.sqlite_db_file, args.table_name, args.headers, compression, args.types)
convert(args.csv_file, args.sqlite_db_file, args.table_name, args.headers, compression, args.types)
|
py
|
1a5d54efcd1380115ece412bb21f6093ed20678a
|
def print_list_to_console(some_list):
print("Printing List Items: ")
for item in some_list:
print(item.__str__())
|
py
|
1a5d551920427d53e3ebd08181b95b7379f9a171
|
# qubit number=5
# total number=52
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=28
prog.cx(input_qubit[3],input_qubit[0]) # number=49
prog.z(input_qubit[3]) # number=50
prog.cx(input_qubit[3],input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=29
prog.h(input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=43
prog.cz(input_qubit[1],input_qubit[0]) # number=44
prog.h(input_qubit[0]) # number=45
prog.cx(input_qubit[1],input_qubit[0]) # number=35
prog.cx(input_qubit[1],input_qubit[0]) # number=38
prog.x(input_qubit[0]) # number=39
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=46
prog.cz(input_qubit[1],input_qubit[0]) # number=47
prog.h(input_qubit[0]) # number=48
prog.cx(input_qubit[1],input_qubit[0]) # number=27
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[0],input_qubit[1]) # number=22
prog.y(input_qubit[2]) # number=41
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.rx(1.0398671683382215,input_qubit[2]) # number=31
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1079.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py
|
1a5d5638b86f25c74abb652129bcd2e7bbcd8d10
|
# C20 and C28 contained extra character "
# I decided to remove this character on both lines
# I assume that it was a mistype
# Tab-separated values should not contain these extra charachters
# please compare data.txt and data2.txt
''' I decided to work with data2.txt because if we had data coming
with an extra character, we should probably ensure that this data is fixed
'''
'''
# please uncomment this section for comparison
with open('data.txt') as tab_data:
print(tab_data.read())
with open('data2.txt') as tab_data2:
print(tab_data2.read())
# end comparison
'''
def main():
create_simple_html()
def create_simple_html():
f = open('data-to-html.html','w')
text_data = open_read_data()
# print(text_data)
# multi-line 'f strings' are used to insert into <pre> element
message = f"""<html>
<head></head>
<body><p>customer data</p>
<pre>{text_data}</pre></body>
</html>"""
message.format(text_data=text_data)
f.write(message)
f.close()
def open_read_data():
with open('data2.txt') as customers:
c =customers.read()
# print(c)
# print(type(c))
return c
main()
|
py
|
1a5d56fca3c1ee72df79ecdacc18f252253691d3
|
# MongoDB 4.0
import os
from mongo_basic import BasicMongo
class Mongo(BasicMongo):
def __init__(self):
pass
def version(self):
return '4.0'
|
py
|
1a5d5858f34515e6f327c0dd7f223f1ac3ba7dd3
|
"""
Validate serialized SPDXv3 files against schema
"""
import jadn
import json
import os
from urllib.parse import urlparse
SCHEMA = 'Schemas/spdx-v3.jidl'
DATA_DIR = 'Data3'
OUT_DIR = 'Out'
DEFAULT_PROPERTIES = ('specVersion', 'created', 'profile', 'dataLicense')
IRI_LOCATIONS = ('id', 'created/by', '*/elements/*', 'relationship/from', 'relationship/to/*',
'*/originator', 'elementRefs/id', 'annotation/subject')
def expand_iri(context: dict, element_id: str) -> str:
"""
Convert an Element ID in namespace:local form to an IRI
"""
if context:
u = urlparse(element_id)
if u.scheme:
if prefix := context.get('prefixes', {}).get(u.scheme, ''):
return prefix + u.path
return element_id
if element_id not in context.get('local_ids', []):
print(f' Undefined Element: {element_id}')
return context.get('baseIRI', '') + element_id
return element_id
def compress_iri(context: dict, iri: str) -> str:
"""
Convert an Element IRI to namespace:local form
"""
if context:
if base := context.get('baseIRI', ''):
if iri.startswith(base):
return iri.replace(base, '')
for k, v in context.get('prefixes', {}).items():
if iri.startswith(v):
return iri.replace(v, k + ':')
return iri
def expand_ids(context: dict, element: dict, paths: list) -> None:
"""
Convert all IRIs in element from namespace:local form to absolute IRI
Hardcode IRI locations for now; replace with path-driven dynamic update
"""
etype = element['type']
element.update({'id': expand_iri(context, element['id'])})
if 'created' in element:
element['created']['by'] = [expand_iri(context, k) for k in element['created']['by']]
for t in etype:
if 'elements' in etype[t]:
etype[t]['elements'] = [expand_iri(context, k) for k in etype[t]['elements']]
elif 'originator' in etype[t]:
etype[t]['originator'] = [expand_iri(context, k) for k in etype[t]['originator']]
if 'annotation' in etype:
etype['annotation']['subject'] = expand_iri(context, etype['annotation']['subject'])
if 'relationship' in etype:
etype['relationship']['from'] = expand_iri(context, etype['relationship']['from'])
etype['relationship']['to'] = [expand_iri(context, k) for k in etype['relationship']['to']]
def compress_ids(context: dict, element: dict) -> None:
etype = element['type']
element.update({'id': compress_iri(context, element['id'])})
if 'created' in element:
element['created']['by'] = [compress_iri(context, k) for k in element['created']['by']]
for t in etype:
if 'elements' in etype[t]:
etype[t]['elements'] = [compress_iri(context, k) for k in etype[t]['elements']]
elif 'originator' in etype[t]:
etype[t]['originator'] = [compress_iri(context, k) for k in etype[t]['originator']]
if 'annotation' in etype:
etype['annotation']['subject'] = compress_iri(context, etype['annotation']['subject'])
if 'relationship' in etype:
etype['relationship']['from'] = compress_iri(context, etype['relationship']['from'])
etype['relationship']['to'] = [compress_iri(context, k) for k in etype['relationship']['to']]
def expand_element(context: dict, element: dict) -> dict:
"""
Fill in Element properties from Context
"""
element_x = {'id': ''} # put id first
element_x.update({k: context[k] for k in DEFAULT_PROPERTIES if k in context})
element_x.update(element)
# print(f" {element_x}")
expand_ids(context, element_x, IRI_LOCATIONS)
print(f" {element_x}")
return element_x
def split_element_set(context: dict, element: dict) -> list:
"""
Split an Element + Context into a set of individual Elements
"""
context.update({k: element[k] for k in DEFAULT_PROPERTIES if k in element})
elist = [expand_element(context, element)]
for e in context.get('elementValues', []):
elist.append(expand_element(context, e))
return elist
def join_element_set(context: dict, element_id: str, elements: list) -> dict:
"""
Combine a set of individual Elements into a designated Element, update Context
"""
return
def load_any(path: str) -> (dict, None):
fn, ext = os.path.splitext(path)
try:
loader = {
'.jadn': jadn.load,
'.jidl': jadn.convert.jidl_load,
'.html': jadn.convert.html_load
}[ext]
except KeyError:
if os.path.isfile(path):
raise ValueError(f'Unsupported schema format: {path}')
return
return loader(path)
def make_dot(context: dict, elist: list, fp: str) -> None:
ex = {e['id']: k for k, e in enumerate(elist, start=1)}
with open(os.path.splitext(fp)[0] + '.dot', 'w') as fx:
fx.write('digraph G {\nnode [fontname=Arial, fontsize=8, shape=box, style=filled, fillcolor=lightskyblue1]\n')
for e in elist:
id = compress_iri(context, e['id'])
# print(f" n{ex[e['id']]}: {id}: {e.get('name', id)}")
fx.write(f"n{ex[e['id']]} [label=\"{id}\\n{e.get('name', '')}\"]\n")
for t in e['type']:
for n in e['type'][t].get('elements', []):
dest = f'n{ex[n]}' if n in ex else f'"{compress_iri(context, n)}"'
fx.write(f" n{ex[e['id']]} -> {dest}\n")
fx.write('}\n')
if __name__ == '__main__':
print(f'Installed JADN version: {jadn.__version__}\n')
os.makedirs(OUT_DIR, exist_ok=True)
s = load_any(SCHEMA)
sc = jadn.codec.Codec(s, verbose_rec=True, verbose_str=True)
for f in os.scandir(DATA_DIR):
print(f.name)
if not f.is_file():
continue
data = json.load(open(f.path))
el = sc.decode('Element', data)
cx = el.pop('context', {})
cx['local_ids'] = [compress_iri(cx, el['id'])] + [compress_iri(cx, ev['id']) for ev in cx.get('elementValues', {})]
elements = split_element_set(cx, el)
make_dot(cx, elements, os.path.join(OUT_DIR, f.name))
|
py
|
1a5d5bcd9595258e9b294e5cf80969f7e86cedc1
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from typing import Dict, Optional, Tuple, Type, Union
from merlin.models.tf.blocks.core.aggregation import SequenceAggregation, SequenceAggregator
from merlin.models.tf.blocks.core.base import Block, BlockType
from merlin.models.tf.blocks.core.combinators import ParallelBlock, TabularAggregationType
from merlin.models.tf.blocks.core.masking import MaskingBlock, masking_registry
from merlin.models.tf.blocks.core.transformations import AsDenseFeatures
from merlin.models.tf.features.continuous import ContinuousFeatures
from merlin.models.tf.features.embedding import (
ContinuousEmbedding,
EmbeddingFeatures,
EmbeddingOptions,
SequenceEmbeddingFeatures,
)
from merlin.schema import Schema, Tags, TagsType
LOG = logging.getLogger("merlin-models")
def InputBlock(
schema: Schema,
branches: Optional[Dict[str, Block]] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
seq: bool = False,
max_seq_length: Optional[int] = None,
add_continuous_branch: bool = True,
continuous_tags: Optional[Union[TagsType, Tuple[Tags]]] = (Tags.CONTINUOUS,),
continuous_projection: Optional[Block] = None,
add_embedding_branch: bool = True,
embedding_options: EmbeddingOptions = EmbeddingOptions(),
categorical_tags: Optional[Union[TagsType, Tuple[Tags]]] = (Tags.CATEGORICAL,),
sequential_tags: Optional[Union[TagsType, Tuple[Tags]]] = (Tags.SEQUENCE,),
split_sparse: bool = False,
masking: Optional[Union[str, MaskingBlock]] = None,
seq_aggregator: Block = SequenceAggregator(SequenceAggregation.MEAN),
**kwargs,
) -> Block:
"""The entry block of the model to process input features from a schema.
This function creates continuous and embedding layers, and connects them via `ParallelBlock`.
If aggregation argument is not set, it returns a dictionary of multiple tensors
each corresponds to an input feature.
Otherwise, it merges the tensors into one using the aggregation method.
Example usage::
mlp = ml.InputBlock(schema).connect(ml.MLPBlock([64, 32]))
Parameters:
----------
schema: Schema
Schema of the input data. This Schema object will be automatically generated using
[NVTabular](https://nvidia-merlin.github.io/NVTabular/main/Introduction.html).
Next to this, it's also possible to construct it manually.
branches: Dict[str, Block], optional
Dictionary of branches to use inside the InputBlock.
post: Optional[BlockType]
Transformations to apply on the inputs after the module is
called (so **after** `forward`).
Defaults to None
aggregation: Optional[TabularAggregationType]
Aggregation to apply after processing the `forward`-method to output a single Tensor.
Defaults to None
seq: bool
Whether to process inputs for sequential model (returns 3-D tensor)
or not (returns 2-D tensor). Use `seq=True` to treat the sparse (list) features
as sequences (e.g. for sequential recommendation) and `seq=False` to treat sparse
features as multi-hot categorical representations.
Defaults to False
add_continuous_branch: bool
If set, add the branch to process continuous features
Defaults to True
continuous_tags: Optional[Union[TagsType, Tuple[Tags]]]
Tags to filter the continuous features
Defaults to (Tags.CONTINUOUS,)
continuous_projection: Optional[Block]
If set, concatenate all numerical features and projet using the
specified Block.
Defaults to None
add_embedding_branch: bool
If set, add the branch to process categorical features
Defaults to True
categorical_tags: Optional[Union[TagsType, Tuple[Tags]]]
Tags to filter the continuous features
Defaults to (Tags.CATEGORICAL,)
sequential_tags: Optional[Union[TagsType, Tuple[Tags]]]
Tags to filter the sparse features
Defaults to (Tags.SEQUENCE,)
split_sparse: Optional[bool]
When True, separate the processing of context (2-D) and sparse features (3-D).
Defaults to False
masking: Optional[Union[str, MaskSequence]], optional
If set, Apply masking to the input embeddings and compute masked labels.
Defaults to None
seq_aggregator: Block
If non-sequential model (seq=False):
aggregate the sparse features tensor along the sequence axis.
Defaults to SequenceAggregator('mean')
"""
branches = branches or {}
if split_sparse:
sparse_schema = schema.select_by_tag(sequential_tags)
context_schema = schema.remove_by_tag(sequential_tags)
if not sparse_schema:
raise ValueError(
"Please make sure that schema has features tagged as 'sequence' when"
"`split_context` is set to True"
)
if not aggregation:
LOG.info(
"aggregation is not provided, "
"default `concat` will be used to merge sequential features"
)
aggregation = "concat"
agg = aggregation
sparse_interactions = InputBlock(
sparse_schema,
branches,
post,
aggregation=agg,
seq=True,
max_seq_length=max_seq_length,
add_continuous_branch=add_continuous_branch,
continuous_tags=continuous_tags,
continuous_projection=continuous_projection,
add_embedding_branch=add_embedding_branch,
embedding_options=embedding_options,
categorical_tags=categorical_tags,
split_sparse=False,
)
if masking:
if isinstance(masking, str):
masking = masking_registry.parse(masking)()
sparse_interactions = sparse_interactions.connect(masking)
if not seq:
sparse_interactions = sparse_interactions.connect(seq_aggregator)
if not context_schema:
return sparse_interactions
branches["sparse"] = sparse_interactions
return InputBlock(
context_schema,
branches,
post,
aggregation=agg,
seq=False,
add_continuous_branch=add_continuous_branch,
continuous_tags=continuous_tags,
continuous_projection=continuous_projection,
add_embedding_branch=add_embedding_branch,
embedding_options=embedding_options,
categorical_tags=categorical_tags,
split_sparse=False,
)
if add_continuous_branch and schema.select_by_tag(continuous_tags).column_schemas:
pre = None
if max_seq_length and seq:
pre = AsDenseFeatures(max_seq_length)
branches["continuous"] = ContinuousFeatures.from_schema( # type: ignore
schema,
tags=continuous_tags,
pre=pre,
)
if add_embedding_branch and schema.select_by_tag(categorical_tags).column_schemas:
emb_cls: Type[EmbeddingFeatures] = SequenceEmbeddingFeatures if seq else EmbeddingFeatures
emb_kwargs = {}
if max_seq_length and seq:
emb_kwargs["max_seq_length"] = max_seq_length
branches["categorical"] = emb_cls.from_schema( # type: ignore
schema, tags=categorical_tags, options=embedding_options, **emb_kwargs
)
if continuous_projection:
return ContinuousEmbedding(
ParallelBlock(branches),
continuous_projection,
aggregation=aggregation,
post=post,
name="continuous_projection",
)
return ParallelBlock(branches, aggregation=aggregation, post=post, is_input=True, **kwargs)
|
py
|
1a5d5bd6ab78ed16c911e6a130a91699c8c3f60c
|
"""
Add run data to station info
Sourced from http://ourairports.com/data/
"""
import csv
import json
stations = json.load(open('stations.json'))
# Add runway data subset to station data
with open('runways.csv') as fin:
runways = csv.reader(fin)
header = True
for runway in runways:
# Skip header row
if header:
header = False
continue
data = {
'length': int(runway[3]) if runway[3] else 0,
'width': int(runway[4]) if runway[4] else 0,
'ident1': runway[8],
'ident2': runway[14],
}
station = runway[2]
if station in stations:
if 'runways' in stations[station]:
stations[station]['runways'].append(data)
else:
stations[station]['runways'] = [data]
# Sort runways by longest length and add missing nulls
for station in stations:
if 'runways' in stations[station]:
stations[station]['runways'].sort(key=lambda x: x['length'], reverse=True)
else:
stations[station]['runways'] = None
json.dump(stations, open('stations.1.json', 'w'))
|
py
|
1a5d5bf396d5a56e1248345012696dab6435f80b
|
# ECE457A Assignment 2 Maze path finding
# Michael Lin
from maze import maze
# Each node in the maze is represented by an object
import pygame
import math
from queue import PriorityQueue
WIDTH = 700
WIN = pygame.display.set_mode((WIDTH, WIDTH + 100))
pygame.display.set_caption("A* Path Finding Algorithm")
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 255, 0)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PURPLE = (128, 0, 128)
ORANGE = (255, 165, 0)
GREY = (128, 128, 128)
TURQUOISE = (64, 224, 208)
BFS = 0
DFS = 1
ASTAR = 2
class Node:
def __init__(self, row, col, width, total_rows):
self.row = row
self.col = col
self.x = row * width
self.y = col * width
self.color = WHITE
self.neighbors = []
self.width = width
self.total_rows = total_rows
def get_pos(self):
return self.row, self.col
def is_closed(self):
return self.color == RED
def is_open(self):
return self.color == GREEN
def is_barrier(self):
return self.color == BLACK
def is_start(self):
return self.color == ORANGE
def is_end(self):
return self.color == TURQUOISE
def reset(self):
self.color = WHITE
def make_start(self):
self.color = ORANGE
def make_closed(self):
self.color = RED
def make_open(self):
self.color = GREEN
def make_barrier(self):
self.color = BLACK
def make_end(self):
self.color = TURQUOISE
def make_path(self):
self.color = PURPLE
def draw(self, win):
pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))
def update_neighbors(self, grid):
self.neighbors = []
# DOWN
if self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier():
self.neighbors.append(grid[self.row + 1][self.col])
if self.row > 0 and not grid[self.row - 1][self.col].is_barrier(): # UP
self.neighbors.append(grid[self.row - 1][self.col])
# RIGHT
if self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier():
self.neighbors.append(grid[self.row][self.col + 1])
if self.col > 0 and not grid[self.row][self.col - 1].is_barrier(): # LEFT
self.neighbors.append(grid[self.row][self.col - 1])
def __lt__(self, other):
return False
# pygame's x,y starts at top left
# Maze's co-ordinates start at bottom left
def h(p1, p2):
x1, y1 = p1
x2, y2 = p2
return abs(x1 - x2) + abs(y1 - y2)
def reconstruct_path(came_from, current, draw):
minimumPathCost=0
while current in came_from:
minimumPathCost += 1
current = came_from[current]
current.make_path()
draw()
return minimumPathCost + 1#include end node
#Three algorithms used:
#BFS
#DFS
#A* search
#output: complete path, its cost and the number of nodes explored
#bfs uses a queue to store its open set
def algorithm_bfs(draw, grid, start, end):
return False
#dfs uses a stack
def algorithm_dfs(draw, grid, start, end):
count = 0
open_set = []
open_set.append(start)
return False
#A* uses a priority queue, and a cost function f = g + h
#h is the heuristic function, using Manhattan distance
def algorithm_astar(draw, grid, start, end):
count = 0
minimumCost_=-1
nodesExplored=-1
open_set = PriorityQueue()#Fringe nodes to be visited
open_set.put((0, count, start))
came_from = {}
g_score = {node: float("inf") for row in grid for node in row}
g_score[start] = 0
f_score = {node: float("inf") for row in grid for node in row}
f_score[start] = h(start.get_pos(), end.get_pos())
open_set_hash = {start}#Visited nodes
while not open_set.empty():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current = open_set.get()[2]
open_set_hash.remove(current)
if current == end:
minimumCost_ = reconstruct_path(came_from, end, draw)
nodesExplored=count
end.make_end()
return minimumCost_, nodesExplored
for neighbor in current.neighbors:
temp_g_score = g_score[current] + 1
if temp_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = temp_g_score
f_score[neighbor] = temp_g_score + h(neighbor.get_pos(), end.get_pos())
if neighbor not in open_set_hash:
count += 1
open_set.put((f_score[neighbor], count, neighbor))
open_set_hash.add(neighbor)
neighbor.make_open()
draw()
if current != start:
current.make_closed()
return -1, -1
def make_grid(rows, width):
grid = []
gap = width // rows
for i in range(rows):
grid.append([])
for j in range(rows):
node = Node(i, j, gap, rows)
grid[i].append(node)
return grid
def draw_grid(win, rows, width):
gap = width // rows
for i in range(rows):
pygame.draw.line(win, GREY, (0, i * gap), (width, i * gap))
for j in range(rows):
pygame.draw.line(win, GREY, (j * gap, 0), (j * gap, width))
def draw(win, grid, rows, width, pathCost=-1, nodesExplored=-1):
win.fill(WHITE)
for row in grid:
for node in row:
node.draw(win)
draw_grid(win, rows, width)
pygame.font.init()
myFont = pygame.font.Font("./OpenSans-VariableFont_wdth,wght.ttf", 15)
if pathCost != -1:
label = myFont.render("Minimum path cost: " + str(pathCost), 1, BLACK)
WIN.blit(label, (100, WIDTH + 20))
if nodesExplored!=-1:
label2 = myFont.render("Nodes explored: " + str(nodesExplored), 1, BLACK)
WIN.blit(label2, (100, WIDTH + 50))
label3 = myFont.render("Press space to begin search ", 1, BLACK)
WIN.blit(label3, (300, WIDTH + 20))
label4 = myFont.render("Press c to clear board", 1, BLACK)
WIN.blit(label4, (300, WIDTH + 50))
pygame.display.update()
def get_clicked_pos(pos, rows, width):
gap = width // rows
y, x = pos
row = y // gap
col = x // gap
return row, col
def main(win, width):
ROWS = 25
algo = ASTAR
minimumCost = -1
nodesExplored = -1
grid = make_grid(ROWS, width)
# note, pygame uses top left as (0,0) but data is given as bottom left first
# using pygame coordinate as master
start = grid[2][13]
start.make_start()#orange
end = grid[23][5] # E1: 5, 23 E2: 2, 3
end.make_end()#blue
for j in range(ROWS): # col
for i in range(ROWS): # row
i_ = ROWS - 1 - i
node = grid[j][i]
if maze[i_][j] == 1:
node.make_barrier()
run = True
while run:
draw(win, grid, ROWS, width, minimumCost, nodesExplored)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and start and end:
for row in grid:
for node in row:
node.update_neighbors(grid)
if algo == ASTAR:
minimumCost, nodesExplored= algorithm_astar(lambda: draw(win, grid, ROWS, width), grid, start, end)
elif algo == BFS:
algorithm_bfs(lambda: draw(win, grid, ROWS, width), grid, start, end)
elif algo == DFS:
algorithm_dfs(lambda: draw(win, grid, ROWS, width), grid, start, end)
if event.key == pygame.K_c:
start = None
end = None
minimumCost = -1
nodesExplored = -1
grid = make_grid(ROWS, width)
pygame.quit()
main(WIN, WIDTH)
|
py
|
1a5d5cd43b3eaa8cbb0d33956dd04d5ca53a8127
|
from .retainitwell import retainitwell_commandline_entrypoint
from .retainitwell import Application
|
py
|
1a5d5d3877c153dd6a0fc3e4b8d53c4a2c8ebd92
|
# -*- coding: utf-8 -*-
import os
import sys
import platform
import mathics.builtin.system as msystem
import mathics.builtin.datentime as datentime
import mathics.builtin.files_io.filesystem as filesystem
import mathics.builtin.atomic.numbers as numeric
from mathics.core.evaluation import Evaluation
def mathics_system_info(defs):
def eval(name, needs_head=True):
evaled = name().evaluate(evaluation)
if needs_head:
return evaled.head.to_python(string_quotes=False)
else:
return evaled.to_python(string_quotes=False)
evaluation = Evaluation(defs, output=None)
return {
"$Machine": sys.platform,
"$MachineName": platform.uname().node,
"$ProcessID": os.getppid(),
"$ProcessorType": platform.machine(),
"$SystemID": sys.platform,
"$UserName": eval(msystem.UserName),
"$SystemMemory": eval(msystem.SystemMemory),
"MemoryAvailable[]": eval(msystem.MemoryAvailable, needs_head=False),
"$SystemTimeZone": eval(datentime.SystemTimeZone),
"MachinePrecision": eval(numeric.MachinePrecision_),
"$BaseDirectory": eval(filesystem.BaseDirectory),
"$RootDirectory": eval(filesystem.RootDirectory),
"$HomeDirectory": eval(filesystem.HomeDirectory),
"$InstallationDirectory": eval(filesystem.InstallationDirectory),
"$TemporaryDirectory": eval(filesystem.TemporaryDirectory),
}
|
py
|
1a5d5e4f86a04b5533ecc78418945fffee028d93
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os
import sys
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = 'django.contrib.staticfiles' in settings.INSTALLED_APPS
except ImportError, e:
USE_STATICFILES = False
def null_technical_500_response(request, exc_type, exc_value, tb):
raise exc_type, exc_value, tb
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
from django.core.servers.basehttp import run, AdminMediaHandler, WSGIServerException
from django.core.handlers.wsgi import WSGIHandler
try:
from werkzeug import run_simple, DebuggedApplication
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/download")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
threaded = options.get('threaded', False)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
print "Validating models..."
self.validate(display_num_errors=True)
print "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE)
print "Development server is running at http://%s:%s/" % (addr, port)
print "Using the Werkzeug debugger (http://werkzeug.pocoo.org/)"
print "Quit the server with %s." % quit_command
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = AdminMediaHandler(WSGIHandler(), path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving) and 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
url = "http://%s:%s/" % (addr, port)
webbrowser.open(url)
run_simple(addr, int(port), DebuggedApplication(handler, True),
use_reloader=use_reloader, use_debugger=True, threaded=threaded)
inner_run()
|
py
|
1a5d5e98e147fdbf8fbbb677cee244e4bacdbc59
|
import logging
import os, sys
import shutil
from datetime import datetime
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard.summary import hparams
from volsim.metrics import *
from volsim.params import *
class Logger(object):
def __init__(self, path:str, params:Params=None, override:bool=False, addNumber:bool=True, addDate:bool=False):
if addDate:
self.path = "runs/" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S_") + path
elif addNumber:
self.path = "runs/%s_%02d" % (path, 0)
else:
self.path = "runs/" + path
if os.path.isdir(self.path):
if override:
shutil.rmtree(self.path)
else:
if addNumber:
num = 1
while os.path.isdir(self.path):
self.path = "runs/%s_%02d" % (path, num)
num += 1
else:
raise ValueError("Model directory already exists!")
os.makedirs(self.path)
shutil.copy("src/training.py", os.path.join(self.path, "training.py"))
self.tfWriter = CustomSummaryWriter(self.path, flush_secs=20)
# hacky reload fix for logging to work properly
import importlib
importlib.reload(logging)
logging.basicConfig(filename=self.path+"/log.txt", format="%(asctime)s %(message)s", level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Path: %s" % self.path)
logging.info("PyTorch Seed: %d" % torch.random.initial_seed())
if params:
logging.info(str(params.asDict()))
def setup(self, model:nn.Module, optimizer:Optimizer, lrScheduler:_LRScheduler, valSplit:dict, testSplit:dict):
self.model = model
self.optimizer = optimizer
self.lrScheduler = lrScheduler
datasetsCor = {}
for split in valSplit:
datasetsCor[split] = ["Multiline", ["datasets/Correlation_" + split]]
for split in testSplit:
datasetsCor[split] = ["Multiline", ["datasets/Correlation_" + split]]
datasetsCor["All (Val)"] = ["Multiline", ["datasets/Correlation_ValAll"]]
datasetsCor["All (Test)"] = ["Multiline", ["datasets/Correlation_TestAll"]]
layout = {
"Training":{
"Correlation": ["Multiline", ["train/Epoch_CorrelationFull"]],
"Correlation (Mean)": ["Margin", ["train/Epoch_CorrelationMean", "train/Epoch_CorrelationMeanLow", "train/Epoch_CorrelationMeanHigh"]],
"Loss": ["Multiline", ["train/Epoch_Loss", "train/Epoch_LossL2", "train/Epoch_LossCorr", "train/Epoch_LossSizeReg", "train/Epoch_LossSlConvReg"]],
},
"Training Batches":{
"Loss (Batch)": ["Multiline", ["train/Batch_Loss", "train/Batch_LossL2", "train/Batch_LossCorr", "train/Batch_LossSlConvReg"]],
"Correlation (Batch)": ["Multiline", ["train/Batch_Correlation"]],
"Correlation (Sample Sliced)": ["Multiline", ["train/Sample_Correlation"]],
},
"Validation":{
"Correlation": ["Multiline", ["val/Epoch_CorrelationFull"]],
"Correlation (Mean)": ["Margin", ["val/Epoch_CorrelationMean", "val/Epoch_CorrelationMeanLow", "val/Epoch_CorrelationMeanHigh"]],
"Distance": ["Margin", ["val/Epoch_Distance", "val/Epoch_DistanceLow", "val/Epoch_DistanceHigh"]],
},
"Validation Batches":{
"Correlation (Batch)": ["Multiline", ["val/Batch_Correlation"]],
},
"Test":{
"Correlation": ["Multiline", ["test/Epoch_CorrelationFull"]],
"Correlation (Mean)": ["Margin", ["test/Epoch_CorrelationMean", "test/Epoch_CorrelationMeanLow", "test/Epoch_CorrelationMeanHigh"]],
"Distance": ["Margin", ["test/Epoch_Distance", "test/Epoch_DistanceLow", "test/Epoch_DistanceHigh"]],
},
"Test Batches":{
"Correlation (Batch)": ["Multiline", ["test/Batch_Correlation"]],
},
"Datasets": datasetsCor,
}
self.tfWriter.add_custom_scalars(layout)
def close(self):
logging.info("\nLog completed.")
logging.shutdown()
self.tfWriter.close()
def saveTrainState(self, epoch:int, milestone:bool=False):
assert (self.model), "No model to save, setup logger first!"
saveDict = {
"epoch" : epoch,
"optimizer" : self.optimizer.state_dict,
"lrScheduler" : self.lrScheduler.state_dict
}
torch.save(saveDict, self.path + "/TrainState.pth")
if milestone:
self.model.save(self.path + "/Epoch%02d.pth" % (epoch), override=True, noPrint=True)
else:
self.model.save(self.path + "/Model.pth", override=True, noPrint=True)
def resumeTrainState(self, epoch:int):
if epoch <= 0:
return
assert (self.model), "No model to load, setup logger first!"
saveDict = torch.load(self.path + "/TrainState.pth")
assert (saveDict["epoch"] == epoch), "Epoch mismatch when loading train state."
self.model.resume(self.path + "Model.pth")
self.optimizer.load_state_dict(saveDict["optimizer"])
schedulerState = saveDict.get("lrScheduler", None)
if schedulerState:
self.lrScheduler.load_state_dict(schedulerState)
# Adjust hParam behavior of SummaryWriter to store results in a single folder
# Workaround from:
# https://github.com/pytorch/pytorch/issues/32651#issuecomment-643791116
class CustomSummaryWriter(SummaryWriter):
def add_hparams(self, hparam_dict, metric_dict):
# remove all lists from hParam dict since only int, float, str, bool and torch.Tensor are possible
for key, value in hparam_dict.items():
if type(value) is list:
valueStr = " ".join([str(elem) for elem in value])
hparam_dict[key] = valueStr
elif not type(value) in [int, float, str, bool, torch.Tensor]:
hparam_dict[key] = " "
torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
if type(hparam_dict) is not dict or type(metric_dict) is not dict:
raise TypeError('hparam_dict and metric_dict should be dictionary.')
exp, ssi, sei = hparams(hparam_dict, metric_dict)
logdir = self._get_file_writer().get_logdir()
with SummaryWriter(log_dir=logdir) as w_hp:
w_hp.file_writer.add_summary(exp)
w_hp.file_writer.add_summary(ssi)
w_hp.file_writer.add_summary(sei)
for k, v in metric_dict.items():
w_hp.add_scalar(k, v)
|
py
|
1a5d5f9047adfa32783a8cc094cb9e4ace20fdf6
|
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
df = pd.read_csv('data_mining.csv')
#x = df[['bitcoin','bitcoin buy','bitcoin mining', 'bitcoin price', 'blockchain']]
x = df[['bitcoin', 'revenue', 'trade_volume', 'market_cap', 'value']]
y = df[['value_tomorrow']]
scale = MinMaxScaler()
x = scale.fit_transform(x)
y = scale.fit_transform(y)
pf = PolynomialFeatures(degree=2)
x_poly = pf.fit_transform(x)
lr = LinearRegression()
prediction = cross_val_predict(lr, x_poly, y, cv=22)
y_test = scale.inverse_transform(y)
prediction = scale.inverse_transform(prediction)
#result = pd.DataFrame(columns=['Date', 'Test', 'Prediction'])
#
#result['Date'] = df['date']
#result['Test'] = y['value_tomorrow']
#result['Prediction'] = prediction
###-----
print('MAE:', metrics.mean_absolute_error(y_test,prediction))
|
py
|
1a5d600aeec0458accc9d7693509e6158c495a77
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
import sys
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
DCNForMaskedLM,
AutoTokenizer,
BertTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPinyinIndexLanguageModeling,
HfArgumentParser,
LineByLineTextDataset,
PinyinShuffleLineByLineTextDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={
"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
eval_data_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_data_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
line_by_line: bool = field(
default=True,
metadata={
"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
#do_pred: bool = field(
#default=False, metadata={"help": "do predict"}
#)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def get_dataset(args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate=False, shuffle=True, test=False):
if test:
file_path = args.test_data_file
else:
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return PinyinShuffleLineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, shuffle=shuffle)
else:
return TextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [
-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(
model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning(
"You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = BertTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = BertTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
if model_args.model_name_or_path:
model = DCNForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = DCNBertForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = get_dataset(
data_args, tokenizer=tokenizer, shuffle=True) if training_args.do_train else None
eval_dataset = get_dataset(
data_args, tokenizer=tokenizer, evaluate=True, shuffle=False) if training_args.do_eval or training_args.do_predict else None
data_collator = DataCollatorForPinyinIndexLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
data_args=data_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
)
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
result = {"perplexity": perplexity}
output_eval_file = os.path.join(
training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(result)
if training_args.do_predict:
trainer.evaluate_sighan()
return results
def result_predict(sentence_list, tokenizer, model, device, batch_size=50, max_seq_length=180):
eval_examples = []
for i in range(len(sentence_list)):
eval_examples.append(
InputExample(guid="1", text_a=sentence_list[i]))
eval_features = convert_examples_to_features(eval_examples,
max_seq_length,
tokenizer,
no_prefix=False)
sys.stdout.flush()
all_input_ids = torch.tensor([f.input_ids for f in eval_features],
dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features],
dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask,
all_segment_ids)
sys.stdout.flush()
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data,
sampler=eval_sampler,
batch_size=batch_size)
result = []
result_prob = []
i = 0
for input_ids, input_mask, segment_ids in eval_dataloader:
i += 1
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
sys.stdout.flush()
with torch.no_grad():
#logits = self.model(input_ids, segment_ids, input_mask)
logits = model(input_ids, input_mask, segment_ids)[0]
logits = torch.nn.Softmax(dim=-1)(logits)
pred_probs, preds = logits.max(dim=-1)
preds = preds.detach().cpu().numpy()
pred_probs = pred_probs.detach().cpu().numpy()
result.extend(preds.tolist())
result_prob.extend(pred_probs.tolist())
labels = [tokenizer.convert_ids_to_tokens(r)[1:] for r in result]
return labels
def convert_examples_to_features(examples,
max_seq_length,
tokenizer,
no_prefix=False):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = example.text_a
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
if no_prefix:
tokens = tokens_a
else:
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
sys.stdout.flush()
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
pinyin_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(pinyin_ids) == max_seq_length
#label_id = label_map[example.label]
#label_id = float(example.label)
label_id = example.label
"""
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
#logger.info("label: %s (id = %d)" % (example.label, label_id))
"""
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
pinyin_ids=pinyin_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, pinyin_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.pinyin_ids = pinyin_ids
self.label_id = label_id
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
py
|
1a5d6019d7329a52e17ab1e375d26b7fbca7b38e
|
# IMPORT MODULES
from __future__ import division
import numpy as np
import h5py
from .Tools import timeToIntVec, validate_array_ndim
# DEFINE CLASSES FOR REPRESENTING SIMULATIONS
class GIFnet_Simulation(h5py.File):
"""Represents a GIFnet simulation.
Subclass of h5py.File.
Suggested usage:
sim = GIFnet_Simulation(
'example.hdf5', 'Example simulation',
T = 100., dt = 0.1, no_sweeps = 10,
no_ser_neurons = 10,
no_ser_examples = 3,
no_gaba_neurons = 5,
no_gaba_examples = 3,
propagation_delay = 2.
)
sim.set_connectivity_matrix(connectivity_matrix)
sim.init_ser_examples(**ser_examples)
sim.init_ser_spktrains()
for sweep_no in ser_spktrains.shape[0]:
for cell_no in ser_spktrains.shape[1]:
sim.ser_spktrains[sweep_no, cell_no, :] = ser_spktrains[sweep_no, cell_no, :]
sim.init_gaba_examples(
I = gaba_examples['I'],
V = gaba_examples['V'],
some_channel = gaba_examples['some_channel']
)
sim.init_gaba_spktrains(
spktrains = gaba_spktrains
)
"""
def __init__(
self,
fname,
name=None,
T=None,
dt=None,
no_sweeps=None,
no_ser_neurons=None,
no_ser_examples=None,
no_gaba_neurons=None,
no_gaba_examples=None,
propagation_delay=None,
**kwargs
):
"""Create a new GIFnet_Simulation object.
Inputs:
fname (str)
-- Name of file on disk in which to store
contents of GIFnet_Simulation. (Equivalent
to h5py.File's 'name' argument.)
name (str)
-- Meta-attribute with short description
of experiment.
T (float)
-- Duration of each sweep (ms).
dt (float)
-- Timestep (ms).
no_sweeps (int)
-- Number of sweeps in simulation.
no_ser_neurons, no_gaba_neurons (int)
-- Total number of ser/gaba neurons in population.
Spiketrains of this number of neurons are stored.
no_ser_examples, no_gaba_examples (int)
-- Number of neurons in population for which
full traces are stored.
propagation_delay (float)
-- Delay between GABA spike and start of IPSC
in 5HT neurons (ms).
kwargs
-- Keyword arguments to be passed to h5py.File
initializer.
"""
if kwargs.get('mode', 'a') not in ['r', 'a']:
raise ValueError('\'mode\' must be \'r\' or \'a\'')
super(GIFnet_Simulation, self).__init__(
name=fname, mode=kwargs.pop('mode', 'a'), **kwargs
)
if name is not None:
self.set_name(name)
if T is not None:
self.set_T(T)
if dt is not None:
self.set_dt(dt)
if no_sweeps is not None:
self.set_no_sweeps(no_sweeps)
if no_ser_neurons is not None:
self.set_no_ser_neurons(no_ser_neurons)
if no_ser_examples is not None:
self.set_no_ser_examples(no_ser_examples)
if no_gaba_neurons is not None:
self.set_no_gaba_neurons(no_gaba_neurons)
if no_gaba_examples is not None:
self.set_no_gaba_examples(no_gaba_examples)
if propagation_delay is not None:
self.set_propagation_delay(propagation_delay)
### Getters and setters for meta-attributes.
def get_name(self):
# 'name' is a short description, not a filename.
if 'name' not in self.attrs.keys():
raise KeyError('\'name\' not set.')
else:
return self.attrs['name']
def set_name(self, val):
# 'name' is a short description, not a filename.
self.attrs['name'] = val
def get_no_sweeps(self):
if 'no_sweeps' not in self.attrs.keys():
raise KeyError('\'no_sweeps\' not set.')
else:
return self.attrs['no_sweeps']
def set_no_sweeps(self, val):
self.attrs['no_sweeps'] = val
def get_T(self):
if 'T' not in self.attrs.keys():
raise KeyError('\'T\' not set.')
else:
return self.attrs['T']
def set_T(self, val):
self.attrs['T'] = val
def get_dt(self):
if 'dt' not in self.attrs.keys():
raise KeyError('\'dt\' not set.')
else:
return self.attrs['dt']
def set_dt(self, val):
self.attrs['dt'] = val
def get_no_timesteps(self):
if not ('dt' in self.attrs.keys() and 'T' in self.attrs.keys()):
raise KeyError('\'dt\' and \'T\' must both be set.')
else:
return int(self.get_T() / self.get_dt() + 0.5)
def get_no_ser_neurons(self):
if 'no_ser_neurons' not in self.attrs.keys():
raise KeyError('\'no_ser_neurons\' not set.')
else:
return self.attrs['no_ser_neurons']
def set_no_ser_neurons(self, val):
self.attrs['no_ser_neurons'] = val
def get_no_ser_examples(self):
if 'no_ser_examples' not in self.attrs.keys():
raise KeyError('\'no_ser_examples\' not set.')
else:
return self.attrs['no_ser_examples']
def set_no_ser_examples(self, val):
self.attrs['no_ser_examples'] = val
def get_no_gaba_neurons(self):
if 'no_gaba_neurons' not in self.attrs.keys():
raise KeyError('\'no_gaba_neurons\' not set.')
else:
return self.attrs['no_gaba_neurons']
def set_no_gaba_neurons(self, val):
self.attrs['no_gaba_neurons'] = val
def get_no_gaba_examples(self):
if 'no_gaba_examples' not in self.attrs.keys():
raise KeyError('\'no_gaba_examples\' not set.')
else:
return self.attrs['no_gaba_examples']
def set_no_gaba_examples(self, val):
self.attrs['no_gaba_examples'] = val
def get_propagation_delay(self):
if 'propagation_delay' not in self.attrs.keys():
raise KeyError('\'propagation_delay\' not set.')
else:
return self.attrs['propagation_delay']
def set_propagation_delay(self, val):
self.attrs['propagation_delay'] = val
### Getter and setter for connectivity matrix.
def get_connectivity_matrix(self):
if 'connectivity_matrix' not in self.keys():
raise AttributeError('connectivity_matrix not set.')
else:
return self['connectivity_matrix']
def set_connectivity_matrix(self, arr):
"""Create connectivity matrix for feedforward connections
Inputs:
arr (2D array)
-- 2D array with dimensionality
[no_ser_neurons, no_gaba_neurons] specifyinging
gaba->ser connections.
"""
self._validate_connectivity_matrix_shape(arr)
self.create_dataset(
'connectivity_matrix', data=arr, dtype=np.float32, compression=5
)
# Ensure attributes are up to date.
self.set_no_ser_neurons(arr.shape[0])
self.set_no_gaba_neurons(arr.shape[1])
def _validate_connectivity_matrix_shape(self, connectivity_matrix):
"""Ensure connectivity matrix shape matches existing attributes."""
validate_array_ndim('connectivity matrix', connectivity_matrix, 2)
for attr, axis in zip(['no_ser_neurons', 'no_gaba_neurons'], [0, 1]):
if (
hasattr(self.attrs, attr)
and self.attrs[attr] != np.shape(connectivity_matrix)[axis]
):
raise ValueError(
'Instance `no_ser_neurons`={nser} and `no_gaba_neurons`='
'{ngaba} imply connectivity matrix of size '
'({nser}, {ngaba}), got {cm_shape} instead.'.format(
nser=getattr(self.attrs, 'no_ser_neurons', 'any'),
ngaba=getattr(self.attrs, 'no_gaba_neurons', 'any'),
cm_shape=np.shape(connectivity_matrix),
)
)
### Properties and initializers for recorded signals.
@property
def ser_spktrains(self):
if 'ser' not in self.keys() or 'spktrains' not in self['ser'].keys():
raise AttributeError(
'ser_spktrains must be initialized via init_ser_spktrains '
'first.'
)
else:
return self['ser/spktrains']
def init_ser_spktrains(self, spktrains=None, spktimes=None):
"""Initialize ser spiketrains as an indicator tensor
Save spiketrains as an indicator tensor, starting
from a tensor of spiketrains or list of lists.
Note that both types of input are equivalent, but
at most one should be passed at a time.
If neither spktrains nor spktimes is passed in, an empty
spktrain array is simply created with the correct shape.
ser_pktrains can be written and read via instance
ser_spktrains attribute.
Inputs:
spktrains (3D array, or None)
-- 3D indicator tensor (1 when a spike
occurs, 0 otherwise) with dimensionality
[sweeps, cells, timesteps].
spktimes (nested list of depth == 3, or None)
-- Nested list laid out according to
[sweep][cell][spike_number] with times of
each spike for each cell on each sweep.
"""
if spktimes is not None and spktrains is not None:
raise ValueError(
'Only spktimes or spktrains should be provided, ' 'not both.'
)
sergroup = self.require_group('ser')
sspks = sergroup.create_dataset(
'spktrains',
shape=(
self.get_no_sweeps(),
self.get_no_ser_neurons(),
self.get_no_timesteps(),
),
dtype=np.int8,
compression=5,
)
# Case that spktrains have been provided directly.
if spktrains is not None:
sspks[:, :, :] = spktrains
# Case that nested list of spktimes has been provided.
elif spktimes is not None:
for i in range(len(spktimes)):
for j in range(len(spktimes[0])):
sspks[i, j, :] = timeToIntVec(
spktimes[i][j], self.get_T(), self.get_dt()
)
@property
def ser_examples(self):
if 'ser' not in self.keys() or 'examples' not in self['ser'].keys():
raise AttributeError(
'ser_examples must be initialized via init_ser_examples '
'first.'
)
else:
return self['ser/examples']
def init_ser_examples(
self, I=None, V=None, feedforward_input=None, **kwargs
):
"""Initialize ser example traces
Any inputs set to None will be initialized as empty
arrays.
Inputs:
I (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
input current channel.
V (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
recorded voltage channel.
feedforward_input (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
recorded gaba->ser feedforward_input.
kwargs (3D array or None)
-- Identically-shaped 3D arrays for
any other channels to initialize.
"""
sergroup = self.require_group('ser')
serex = sergroup.require_group('examples')
pairs = kwargs.copy()
pairs.update({'I': I, 'V': V, 'feedforward_input': feedforward_input})
for key, val in pairs.iteritems():
# Initialize with data, if available.
if val is not None:
serex.create_dataset(
key,
data=val,
shape=(
self.get_no_sweeps(),
self.get_no_ser_examples(),
self.get_no_timesteps(),
),
dtype=np.float32,
compression=5,
)
# Initialize empty if no data available.
else:
serex.create_dataset(
key,
fillvalue=0,
shape=(
self.get_no_sweeps(),
self.get_no_ser_examples(),
self.get_no_timesteps(),
),
dtype=np.float32,
compression=5,
)
@property
def gaba_examples(self):
if 'gaba' not in self.keys() or 'examples' not in self['gaba'].keys():
raise AttributeError(
'gaba_examples must be initialized via init_gaba_examples '
'first.'
)
else:
return self['gaba/examples']
def init_gaba_examples(self, I=None, V=None, **kwargs):
"""Initialize gaba example traces
Any inputs set to None will be initialized as empty
arrays.
Inputs:
I (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
input current channel.
V (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
recorded voltage channel.
kwargs (3D array or None)
-- Identically-shaped 3D arrays for
any other channels to initialize.
"""
gabagroup = self.require_group('gaba')
gabaex = gabagroup.require_group('examples')
pairs = kwargs.copy()
pairs.update({'I': I, 'V': V})
for key, val in pairs.iteritems():
# Initialize with data, if available.
if val is not None:
gabaex.create_dataset(
key,
data=val,
shape=(
self.get_no_sweeps(),
self.get_no_gaba_examples(),
self.get_no_timesteps(),
),
dtype=np.float32,
compression=5,
)
# Initialize empty if no data available.
else:
gabaex.create_dataset(
key,
fillvalue=0,
shape=(
self.get_no_sweeps(),
self.get_no_gaba_examples(),
self.get_no_timesteps(),
),
dtype=np.float32,
compression=5,
)
@property
def gaba_spktrains(self):
if 'gaba' not in self.keys() or 'spktrains' not in self['gaba'].keys():
raise AttributeError(
'gaba_spktrains must be initialized via init_gaba_spktrains '
'first.'
)
else:
return self['gaba/spktrains']
def init_gaba_spktrains(self, spktrains=None, spktimes=None):
"""Initialize gaba spiketrains as an indicator tensor
Save spiketrains as an indicator tensor, starting
from a tensor of spiketrains or list of lists.
Note that both types of input are equivalent, but
only one should be passed at a time.
If neither spktrains nor spktimes is passed in, an empty
spktrain array is simply created with the correct shape.
gaba_pktrains can be written and read via instance
gaba_spktrains attribute.
Inputs:
spktrains (3D array, or None)
-- 3D indicator tensor (1 when a spike
occurs, 0 otherwise) with dimensionality
[sweeps, cells, timesteps].
spktimes (nested list of depth == 3, or None)
-- Nested list laid out according to
[sweep][cell][spike_number] with times of
each spike for each cell on each sweep.
"""
if spktimes is not None and spktrains is not None:
raise ValueError(
'Only spktimes or spktrains should be provided, ' 'not both.'
)
gabagroup = self.require_group('gaba')
gspks = gabagroup.create_dataset(
'spktrains',
shape=(
self.get_no_sweeps(),
self.get_no_gaba_neurons(),
self.get_no_timesteps(),
),
dtype=np.int8,
compression=5,
)
# Case that spktrains have been provided directly.
if spktrains is not None:
gspks[:, :, :] = spktrains
# Case that nested list of spktimes has been provided.
elif spktimes is not None:
for i in range(len(spktimes)):
for j in range(len(spktimes[0])):
gspks[i, j, :] = timeToIntVec(
spktimes[i][j], self.get_T(), self.get_dt()
)
### Data processing and support arrays.
def get_ser_spktimes(self):
"""Get nested list of 5HT neuron spktimes.
Nested list should be indexed according
to [sweep_no][cell_no][spk_no].
"""
spktimes = []
for sweep_no in range(self.get_no_sweeps()):
spktimes_singlesweep = []
for cell_no in range(self.get_no_ser_neurons()):
spktimes_singlesweep.append(
np.where(self.ser_spktrains[sweep_no, cell_no, :] > 0.5)[0]
* self.get_dt()
)
spktimes.append(spktimes_singlesweep)
return spktimes
def get_gaba_spktimes(self):
"""Get nested list of GABA neuron spktimes.
Nested list should be indexed according
to [sweep_no][cell_no][spk_no].
"""
spktimes = []
for sweep_no in range(self.get_no_sweeps()):
spktimes_singlesweep = []
for cell_no in range(self.get_no_gaba_neurons()):
spktimes_singlesweep.append(
np.where(self.gaba_spktrains[sweep_no, cell_no, :] > 0.5)[
0
]
* self.get_dt()
)
spktimes.append(spktimes_singlesweep)
return spktimes
def get_t_vec(self):
"""Return a time support vector (ms).
"""
t_vec = np.arange(0, self.get_T(), self.get_dt())
# Shape checks.
if 'ser' in self.keys() and 'spktrains' in self['ser'].keys():
assert self.ser_spktrains.shape[2] == len(
t_vec
), 'Bad t_vec length ({})'.format(len(t_vec))
if 'gaba' in self.keys() and 'spktrains' in self['gaba'].keys():
assert self.gaba_spktrains.shape[2] == len(
t_vec
), 'Bad t_vec length ({})'.format(len(t_vec))
return t_vec
def get_ser_examples_supp(self):
"""Get support array for ser_examples.
"""
return np.broadcast_to(
self.get_t_vec(),
self.ser_examples[self.ser_examples.keys()[0]].shape,
)
def get_gaba_examples_supp(self):
"""Get support array for gaba_examples.
"""
return np.broadcast_to(
self.get_t_vec(),
self.gaba_examples[self.gaba_examples.keys()[0]].shape,
)
|
py
|
1a5d60d15e7bfc38a2adc1343a273f9c6de657b6
|
# Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import re
import threading
from collections import defaultdict
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common import conf
from azurelinuxagent.common.event import EVENTS_DIRECTORY, TELEMETRY_LOG_EVENT_ID, \
TELEMETRY_LOG_PROVIDER_ID, add_event, WALAEventOperation, add_log_event, get_event_logger
from azurelinuxagent.common.exception import InvalidExtensionEventError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.telemetryevent import TelemetryEventList, TelemetryEvent, TelemetryEventParam, \
GuestAgentGenericLogsSchema
from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
def get_extension_telemetry_handler(protocol_util):
return ExtensionTelemetryHandler(protocol_util)
class ExtensionEventSchema(object): # pylint: disable=R0903
"""
Class for defining the schema for Extension Events.
"""
Version = "Version"
Timestamp = "Timestamp"
TaskName = "TaskName"
EventLevel = "EventLevel"
Message = "Message"
EventPid = "EventPid"
EventTid = "EventTid"
OperationId = "OperationId"
class ProcessExtensionTelemetry(PeriodicOperation):
"""
Periodic operation for collecting and sending extension telemetry events to Wireserver.
"""
_EXTENSION_EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=5)
_EXTENSION_EVENT_FILE_NAME_REGEX = re.compile(r"^(\d+)\.json$", re.IGNORECASE)
# Limits
_MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD = 300
_EXTENSION_EVENT_FILE_MAX_SIZE = 4 * 1024 * 1024 # 4 MB = 4 * 1,048,576 Bytes
_EXTENSION_EVENT_MAX_SIZE = 1024 * 6 # 6Kb or 6144 characters. Limit for the whole event. Prevent oversized events.
_EXTENSION_EVENT_MAX_MSG_LEN = 1024 * 3 # 3Kb or 3072 chars.
_EXTENSION_EVENT_REQUIRED_FIELDS = [attr.lower() for attr in dir(ExtensionEventSchema) if
not callable(getattr(ExtensionEventSchema, attr)) and not attr.startswith("__")]
def __init__(self, protocol_util):
super(ProcessExtensionTelemetry, self).__init__(
name="collect and send extension events",
operation=self._collect_and_send_events,
period=ProcessExtensionTelemetry._EXTENSION_EVENT_COLLECTION_PERIOD)
self._protocol = protocol_util.get_protocol()
def _collect_and_send_events(self):
event_list = self._collect_extension_events()
if len(event_list.events) > 0: # pylint: disable=C1801
self._protocol.report_event(event_list)
def _collect_extension_events(self):
events_list = TelemetryEventList()
extension_handler_with_event_dirs = []
try:
extension_handler_with_event_dirs = self._get_extension_events_dir_with_handler_name(conf.get_ext_log_dir())
if len(extension_handler_with_event_dirs) == 0: # pylint: disable=C1801
logger.verbose("No Extension events directory exist")
return events_list
for extension_handler_with_event_dir in extension_handler_with_event_dirs:
handler_name = extension_handler_with_event_dir[0]
handler_event_dir_path = extension_handler_with_event_dir[1]
self._capture_extension_events(handler_name, handler_event_dir_path, events_list)
except Exception as e: # pylint: disable=C0103
msg = "Unknown error occurred when trying to collect extension events. Error: {0}".format(ustr(e))
add_event(op=WALAEventOperation.ExtensionTelemetryEventProcessing, message=msg, is_success=False)
finally:
# Always ensure that the events directory are being deleted each run,
# even if we run into an error and dont process them this run.
self._ensure_all_events_directories_empty(extension_handler_with_event_dirs)
return events_list
@staticmethod
def _get_extension_events_dir_with_handler_name(extension_log_dir):
"""
Get the full path to events directory for all extension handlers that have one
:param extension_log_dir: Base log directory for all extensions
:return: A list of full paths of existing events directory for all handlers
"""
extension_handler_with_event_dirs = []
for ext_handler_name in os.listdir(extension_log_dir):
# Check if its an Extension directory
if not os.path.isdir(os.path.join(extension_log_dir, ext_handler_name)) \
or re.match(HANDLER_NAME_PATTERN, ext_handler_name) is None:
continue
# Check if EVENTS_DIRECTORY directory exists
extension_event_dir = os.path.join(extension_log_dir, ext_handler_name, EVENTS_DIRECTORY)
if os.path.exists(extension_event_dir):
extension_handler_with_event_dirs.append((ext_handler_name, extension_event_dir))
return extension_handler_with_event_dirs
def _capture_extension_events(self, handler_name, handler_event_dir_path, events_list):
"""
Capture Extension events and add them to the events_list
:param handler_name: Complete Handler Name. Eg: Microsoft.CPlat.Core.RunCommandLinux
:param handler_event_dir_path: Full path. Eg: '/var/log/azure/Microsoft.CPlat.Core.RunCommandLinux/events'
:param events_list: List of captured extension events
"""
convert_to_mb = lambda x: (1.0 * x)/(1000 * 1000)
# Filter out the files that do not follow the pre-defined EXTENSION_EVENT_FILE_NAME_REGEX
event_files = [event_file for event_file in os.listdir(handler_event_dir_path) if
re.match(self._EXTENSION_EVENT_FILE_NAME_REGEX, event_file) is not None]
# Pick the latest files first, we'll discard older events if len(events) > MAX_EVENT_COUNT
event_files.sort(reverse=True)
captured_extension_events_count = 0
dropped_events_with_error_count = defaultdict(int)
for event_file in event_files:
event_file_path = os.path.join(handler_event_dir_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
# We only support _EXTENSION_EVENT_FILE_MAX_SIZE=4Mb max file size
event_file_size = os.stat(event_file_path).st_size
if event_file_size > self._EXTENSION_EVENT_FILE_MAX_SIZE:
msg = "Skipping file: {0} as its size is {1:.2f} Mb > Max size allowed {2:.1f} Mb".format(
event_file_path, convert_to_mb(event_file_size),
convert_to_mb(self._EXTENSION_EVENT_FILE_MAX_SIZE))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
continue
# We support multiple events in a file, read the file and parse events.
parsed_events = self._parse_event_file_and_capture_events(handler_name, event_file_path,
captured_extension_events_count,
dropped_events_with_error_count)
events_list.events.extend(parsed_events)
captured_extension_events_count += len(parsed_events)
# We only allow MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD=300 maximum events per period per handler
if captured_extension_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
msg = "Reached max count for the extension: {0}; Max Limit: {1}. Skipping the rest.".format(
handler_name, self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
break
except Exception as e: # pylint: disable=C0103
msg = "Failed to process event file {0}: {1}", event_file, ustr(e)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
finally:
os.remove(event_file_path)
if dropped_events_with_error_count is not None and len(dropped_events_with_error_count) > 0: # pylint: disable=C1801
msg = "Dropped events for Extension: {0}; Details:\n\t{1}".format(handler_name, '\n\t'.join(
["Reason: {0}; Dropped Count: {1}".format(k, v) for k, v in dropped_events_with_error_count.items()]))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
if captured_extension_events_count > 0:
logger.info("Collected {0} events for extension: {1}".format(captured_extension_events_count, handler_name))
@staticmethod
def _ensure_all_events_directories_empty(extension_events_directories):
if len(extension_events_directories) == 0: # pylint: disable=C1801
return
for extension_handler_with_event_dir in extension_events_directories:
event_dir_path = extension_handler_with_event_dir[1]
if not os.path.exists(event_dir_path):
return
err = None
# Delete any residue files in the events directory
for residue_file in os.listdir(event_dir_path):
try:
os.remove(os.path.join(event_dir_path, residue_file))
except Exception as e: # pylint: disable=C0103
# Only log the first error once per handler per run if unable to clean off residue files
err = ustr(e) if err is None else err
if err is not None:
logger.error("Failed to completely clear the {0} directory. Exception: {1}", event_dir_path, err)
def _parse_event_file_and_capture_events(self, handler_name, event_file_path, captured_events_count,
dropped_events_with_error_count):
events_list = []
event_file_time = datetime.datetime.fromtimestamp(os.path.getmtime(event_file_path))
# Read event file and decode it properly
with open(event_file_path, "rb") as fd: # pylint: disable=C0103
event_data = fd.read().decode("utf-8")
# Parse the string and get the list of events
events = json.loads(event_data)
# We allow multiple events in a file but there can be an instance where the file only has a single
# JSON event and not a list. Handling that condition too
if not isinstance(events, list):
events = [events]
for event in events:
try:
events_list.append(self._parse_telemetry_event(handler_name, event, event_file_time))
captured_events_count += 1
except InvalidExtensionEventError as e: # pylint: disable=C0103
# These are the errors thrown if there's an error parsing the event. We want to report these back to the
# extension publishers so that they are aware of the issues.
# The error messages are all static messages, we will use this to create a dict and emit an event at the
# end of each run to notify if there were any errors parsing events for the extension
dropped_events_with_error_count[ustr(e)] += 1
except Exception as e: # pylint: disable=C0103
logger.warn("Unable to parse and transmit event, error: {0}".format(e))
if captured_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
break
return events_list
def _parse_telemetry_event(self, handler_name, extension_unparsed_event, event_file_time):
"""
Parse the Json event file and convert it to TelemetryEvent object with the required data.
:return: Complete TelemetryEvent with all required fields filled up properly. Raises if event breaches contract.
"""
extension_event = self._parse_event_and_ensure_it_is_valid(extension_unparsed_event)
# Create a telemetry event, add all common parameters to the event
# and then overwrite all the common params with extension events params if same
event = TelemetryEvent(TELEMETRY_LOG_EVENT_ID, TELEMETRY_LOG_PROVIDER_ID)
event.file_type = "json"
self.add_common_params_to_extension_event(event, event_file_time)
replace_or_add_params = {
GuestAgentGenericLogsSchema.EventName: "{0}-{1}".format(handler_name, extension_event[
ExtensionEventSchema.Version.lower()]),
GuestAgentGenericLogsSchema.CapabilityUsed: extension_event[ExtensionEventSchema.EventLevel.lower()],
GuestAgentGenericLogsSchema.TaskName: extension_event[ExtensionEventSchema.TaskName.lower()],
GuestAgentGenericLogsSchema.Context1: extension_event[ExtensionEventSchema.Message.lower()],
GuestAgentGenericLogsSchema.Context2: extension_event[ExtensionEventSchema.Timestamp.lower()],
GuestAgentGenericLogsSchema.Context3: extension_event[ExtensionEventSchema.OperationId.lower()],
GuestAgentGenericLogsSchema.EventPid: extension_event[ExtensionEventSchema.EventPid.lower()],
GuestAgentGenericLogsSchema.EventTid: extension_event[ExtensionEventSchema.EventTid.lower()]
}
self._replace_or_add_param_in_event(event, replace_or_add_params)
return event
def _parse_event_and_ensure_it_is_valid(self, extension_event):
"""
Parse the Json event from file. Raise InvalidExtensionEventError if the event breaches pre-set contract.
:param extension_event: The json event from file
:return: Verified Json event that qualifies the contract.
"""
clean_string = lambda x: x.strip() if x is not None else x
event_size = 0
key_err_msg = "{0}: {1} not found"
# Convert the dict to all lower keys to avoid schema confusion.
# Only pick the params that we care about and skip the rest.
event = dict((k.lower(), clean_string(v)) for k, v in extension_event.items() if
k.lower() in self._EXTENSION_EVENT_REQUIRED_FIELDS)
# Trim message and only pick the first 3k chars
message_key = ExtensionEventSchema.Message.lower()
if message_key in event:
event[message_key] = event[message_key][:self._EXTENSION_EVENT_MAX_MSG_LEN]
else:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, ExtensionEventSchema.Message))
if event[message_key] is None or len(event[message_key]) == 0: # pylint: disable=C1801
raise InvalidExtensionEventError(
"{0}: {1} should not be empty".format(InvalidExtensionEventError.EmptyMessageError,
ExtensionEventSchema.Message))
for required_key in self._EXTENSION_EVENT_REQUIRED_FIELDS:
# If all required keys not in event then raise
if not required_key in event:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, required_key))
# If the event_size > _EXTENSION_EVENT_MAX_SIZE=6k, then raise
if event_size > self._EXTENSION_EVENT_MAX_SIZE:
raise InvalidExtensionEventError(
"{0}: max event size allowed: {1}".format(InvalidExtensionEventError.OversizeEventError,
self._EXTENSION_EVENT_MAX_SIZE))
event_size += len(event[required_key])
return event
@staticmethod
def _replace_or_add_param_in_event(event, replace_or_add_params):
for param in event.parameters:
if param.name in replace_or_add_params:
param.value = replace_or_add_params.pop(param.name)
if not replace_or_add_params:
# All values replaced, return
return
# Add the remaining params to the event
for param_name in replace_or_add_params:
event.parameters.append(TelemetryEventParam(param_name, replace_or_add_params[param_name]))
@staticmethod
def add_common_params_to_extension_event(event, event_time):
reporter = get_event_logger()
reporter.add_common_event_parameters(event, event_time)
class ExtensionTelemetryHandler(ThreadHandlerInterface):
"""
This Handler takes care of fetching the Extension Telemetry events from the {extension_events_dir} and sends it to
Kusto for advanced debuggability.
"""
_THREAD_NAME = "ExtensionTelemetryHandler"
def __init__(self, protocol_util):
self.protocol_util = protocol_util
self.should_run = True
self.thread = None
@staticmethod
def get_thread_name():
return ExtensionTelemetryHandler._THREAD_NAME
def run(self):
logger.info("Start Extension Telemetry service.")
self.start()
def is_alive(self):
return self.thread is not None and self.thread.is_alive()
def start(self):
self.thread = threading.Thread(target=self.daemon)
self.thread.setDaemon(True)
self.thread.setName(ExtensionTelemetryHandler.get_thread_name())
self.thread.start()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.should_run = False
if self.is_alive():
self.thread.join()
def stopped(self):
return not self.should_run
def daemon(self):
op = ProcessExtensionTelemetry(self.protocol_util) # pylint: disable=C0103
logger.info("Successfully started the {0} thread".format(self.get_thread_name()))
while not self.stopped():
try:
op.run()
except Exception as e: # pylint: disable=C0103
logger.warn(
"An error occurred in the Telemetry Extension thread main loop; will skip the current iteration.\n{0}",
ustr(e))
finally:
PeriodicOperation.sleep_until_next_operation([op])
|
py
|
1a5d61a723fe0ace98593d10fd4370616fb6ee61
|
from bricks_modeling.file_IO.model_writer import write_bricks_to_file_with_steps
from util.debugger import MyDebugger
import os
import csv
import solvers.brick_heads.bach_render_images as render
import solvers.brick_heads.part_selection as p_select
from bricks_modeling.file_IO.model_reader import read_bricks_from_file
import numpy as np
if __name__ == "__main__":
debugger = MyDebugger("brick_heads")
dir_path = r"/Users/apple/workspace/lego-photo-studio/debug/2020-08-04_11-35-00_brick_heads"
final_str = ""
brick_count = {}
for i in range(1, 108):
file_path = os.path.join(dir_path, f"complete_{i}.ldr")
bricks = read_bricks_from_file(file_path, read_fake_bricks=True)
for b in bricks:
if b.template.id not in brick_count:
brick_count[b.template.id] = 1
else:
brick_count[b.template.id] += 1
for key, value in brick_count.items():
print(f"{key}\t{value}")
|
py
|
1a5d62bb397a2382ea52b2370269eaca2c86ee5c
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional, Sequence, Tuple, Union
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import datasets
from google.cloud.aiplatform.datasets import _datasources
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import utils
class TimeSeriesDataset(datasets._Dataset):
"""Managed time series dataset resource for Vertex AI"""
_supported_metadata_schema_uris: Optional[Tuple[str]] = (
schema.dataset.metadata.time_series,
)
@classmethod
def create(
cls,
display_name: str,
gcs_source: Optional[Union[str, Sequence[str]]] = None,
bq_source: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
request_metadata: Optional[Sequence[Tuple[str, str]]] = (),
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
) -> "TimeSeriesDataset":
"""Creates a new time series dataset.
Args:
display_name (str):
Required. The user-defined name of the Dataset.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
gcs_source (Union[str, Sequence[str]]):
Google Cloud Storage URI(-s) to the
input file(s). May contain wildcards. For more
information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
examples:
str: "gs://bucket/file.csv"
Sequence[str]: ["gs://bucket/file1.csv", "gs://bucket/file2.csv"]
bq_source (str):
BigQuery URI to the input table.
example:
"bq://project.dataset.table_name"
project (str):
Project to upload this model to. Overrides project set in
aiplatform.init.
location (str):
Location to upload this model to. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
request_metadata (Sequence[Tuple[str, str]]):
Strings which should be sent along with the request as metadata.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the dataset. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Dataset and all sub-resources of this Dataset will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
time_series_dataset (TimeSeriesDataset):
Instantiated representation of the managed time series dataset resource.
"""
utils.validate_display_name(display_name)
api_client = cls._instantiate_client(location=location, credentials=credentials)
metadata_schema_uri = schema.dataset.metadata.time_series
datasource = _datasources.create_datasource(
metadata_schema_uri=metadata_schema_uri,
gcs_source=gcs_source,
bq_source=bq_source,
)
return cls._create_and_import(
api_client=api_client,
parent=initializer.global_config.common_location_path(
project=project, location=location
),
display_name=display_name,
metadata_schema_uri=metadata_schema_uri,
datasource=datasource,
project=project or initializer.global_config.project,
location=location or initializer.global_config.location,
credentials=credentials or initializer.global_config.credentials,
request_metadata=request_metadata,
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name
),
sync=sync,
)
def import_data(self):
raise NotImplementedError(
f"{self.__class__.__name__} class does not support 'import_data'"
)
|
py
|
1a5d634851346381fddd8cddcae8f54256b25523
|
from importlib import import_module
def create_builder(model_type: str, config: dict):
package_name = __package__.split('.')[0]
builder_class = getattr(import_module('%s.models.%s.builder' % (package_name, model_type)), 'Builder')
return builder_class(config)
|
py
|
1a5d649b40f5315177b8bc73268e9b65e54b2a1d
|
"""
Test suite for _osx_support: shared OS X support functions.
"""
import os
import platform
import stat
import sys
import unittest
import test.support
import _osx_support
@unittest.skipUnless(sys.platform.startswith("darwin"), "requires OS X")
class Test_OSXSupport(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.prog_name = 'bogus_program_xxxx'
self.temp_path_dir = os.path.abspath(os.getcwd())
self.env = test.support.EnvironmentVarGuard()
self.addCleanup(self.env.__exit__)
for cv in ('CFLAGS', 'LDFLAGS', 'CPPFLAGS',
'BASECFLAGS', 'BLDSHARED', 'LDSHARED', 'CC',
'CXX', 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS'):
if cv in self.env:
self.env.unset(cv)
def add_expected_saved_initial_values(self, config_vars, expected_vars):
# Ensure that the initial values for all modified config vars
# are also saved with modified keys.
expected_vars.update(('_OSX_SUPPORT_INITIAL_'+ k,
config_vars[k]) for k in config_vars
if config_vars[k] != expected_vars[k])
def test__find_executable(self):
if self.env['PATH']:
self.env['PATH'] = self.env['PATH'] + ':'
self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir)
test.support.unlink(self.prog_name)
self.assertIsNone(_osx_support._find_executable(self.prog_name))
self.addCleanup(test.support.unlink, self.prog_name)
with open(self.prog_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo OK\n")
os.chmod(self.prog_name, stat.S_IRWXU)
self.assertEqual(self.prog_name,
_osx_support._find_executable(self.prog_name))
def test__read_output(self):
if self.env['PATH']:
self.env['PATH'] = self.env['PATH'] + ':'
self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir)
test.support.unlink(self.prog_name)
self.addCleanup(test.support.unlink, self.prog_name)
with open(self.prog_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo ExpectedOutput\n")
os.chmod(self.prog_name, stat.S_IRWXU)
self.assertEqual('ExpectedOutput',
_osx_support._read_output(self.prog_name))
def test__find_build_tool(self):
out = _osx_support._find_build_tool('cc')
self.assertTrue(os.path.isfile(out),
'cc not found - check xcode-select')
def test__get_system_version(self):
self.assertTrue(platform.mac_ver()[0].startswith(
_osx_support._get_system_version()))
def test__remove_original_values(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = {
'CC': 'clang -pthreads',
}
cv = 'CC'
newvalue = 'clang -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertNotEqual(expected_vars, config_vars)
_osx_support._remove_original_values(config_vars)
self.assertEqual(expected_vars, config_vars)
def test__save_modified_value(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = {
'CC': 'clang -pthreads',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
cv = 'CC'
newvalue = 'clang -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertEqual(expected_vars, config_vars)
def test__save_modified_value_unchanged(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = config_vars.copy()
cv = 'CC'
newvalue = 'gcc-test -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertEqual(expected_vars, config_vars)
def test__supports_universal_builds(self):
import platform
mac_ver_tuple = tuple(int(i) for i in
platform.mac_ver()[0].split('.')[0:2])
self.assertEqual(mac_ver_tuple >= (10, 4),
_osx_support._supports_universal_builds())
def test__find_appropriate_compiler(self):
compilers = (
('gcc-test', 'i686-apple-darwin11-llvm-gcc-4.2'),
('clang', 'clang version 3.1'),
)
config_vars = {
'CC': 'gcc-test -pthreads',
'CXX': 'cc++-test',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-test -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-test -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang -pthreads',
'CXX': 'clang++',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'clang -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'clang -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
suffix = (':' + self.env['PATH']) if self.env['PATH'] else ''
self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix
for c_name, c_output in compilers:
test.support.unlink(c_name)
self.addCleanup(test.support.unlink, c_name)
with open(c_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo " + c_output)
os.chmod(c_name, stat.S_IRWXU)
self.assertEqual(expected_vars,
_osx_support._find_appropriate_compiler(
config_vars))
def test__remove_universal_flags(self):
config_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 ',
'LDFLAGS': ' -g',
'CPPFLAGS': '-I. ',
'BLDSHARED': 'gcc-4.0 -bundle -g',
'LDSHARED': 'gcc-4.0 -bundle -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._remove_universal_flags(
config_vars))
def test__remove_universal_flags_alternate(self):
# bpo-38360: also test the alternate single-argument form of -isysroot
config_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot/Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot/Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 ',
'LDFLAGS': ' -g',
'CPPFLAGS': '-I. ',
'BLDSHARED': 'gcc-4.0 -bundle -g',
'LDSHARED': 'gcc-4.0 -bundle -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._remove_universal_flags(
config_vars))
def test__remove_unsupported_archs(self):
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch i386 ',
'LDFLAGS': ' -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
suffix = (':' + self.env['PATH']) if self.env['PATH'] else ''
self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix
c_name = 'clang'
test.support.unlink(c_name)
self.addCleanup(test.support.unlink, c_name)
# exit status 255 means no PPC support in this compiler chain
with open(c_name, 'w') as f:
f.write("#!/bin/sh\nexit 255")
os.chmod(c_name, stat.S_IRWXU)
self.assertEqual(expected_vars,
_osx_support._remove_unsupported_archs(
config_vars))
def test__override_all_archs(self):
self.env['ARCHFLAGS'] = '-arch x86_64'
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch x86_64',
'LDFLAGS': ' -g -arch x86_64',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -g -arch x86_64',
'LDSHARED': 'gcc-4.0 -bundle -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk -g -arch x86_64',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._override_all_archs(
config_vars))
def test__check_for_unavailable_sdk(self):
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.1.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
' ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. ',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
' -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._check_for_unavailable_sdk(
config_vars))
def test__check_for_unavailable_sdk_alternate(self):
# bpo-38360: also test the alternate single-argument form of -isysroot
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
'-isysroot/Developer/SDKs/MacOSX10.1.sdk',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot/Developer/SDKs/MacOSX10.1.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot/Developer/SDKs/MacOSX10.1.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
' ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. ',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
' -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._check_for_unavailable_sdk(
config_vars))
def test_get_platform_osx(self):
# Note, get_platform_osx is currently tested more extensively
# indirectly by test_sysconfig and test_distutils
config_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk',
'MACOSX_DEPLOYMENT_TARGET': '10.6',
}
result = _osx_support.get_platform_osx(config_vars, ' ', ' ', ' ')
self.assertEqual(('macosx', '10.6', 'fat'), result)
if __name__ == "__main__":
unittest.main()
|
py
|
1a5d65a5b28a2b2e1cb8db41683884ac75a84aa5
|
from __future__ import unicode_literals
# This needs to stay as the first import, it sets up paths.
from gaetest_common import DummyPostData, fill_authors
from google.appengine.ext import ndb
from unittest import TestCase
from wtforms import Form, TextField, IntegerField, BooleanField
from wtforms.compat import text_type
from wtforms.ext.appengine.fields import KeyPropertyField
from wtforms.ext.appengine.ndb import model_form
class Author(ndb.Model):
name = ndb.StringProperty(required=True)
city = ndb.StringProperty()
age = ndb.IntegerProperty(required=True)
is_admin = ndb.BooleanProperty(default=False)
class Book(ndb.Model):
author = ndb.KeyProperty(Author)
class TestKeyPropertyField(TestCase):
class F(Form):
author = KeyPropertyField(reference_class=Author)
def setUp(self):
self.authors = fill_authors(Author)
self.first_author_id = self.authors[0].key.id()
def tearDown(self):
for author in Author.query():
author.key.delete()
def test_no_data(self):
form = self.F()
form.author.query = Author.query().order(Author.name)
assert not form.validate()
ichoices = list(form.author.iter_choices())
self.assertEqual(len(ichoices), len(self.authors))
for author, (key, label, selected) in zip(self.authors, ichoices):
self.assertEqual(key, text_type(author.key.id()))
def test_form_data(self):
# Valid data
form = self.F(DummyPostData(author=text_type(self.first_author_id)))
form.author.query = Author.query().order(Author.name)
assert form.validate()
ichoices = list(form.author.iter_choices())
self.assertEqual(len(ichoices), len(self.authors))
self.assertEqual(list(x[2] for x in ichoices), [True, False, False])
# Bogus Data
form = self.F(DummyPostData(author='fooflaf'))
assert not form.validate()
print list(form.author.iter_choices())
assert all(x[2] is False for x in form.author.iter_choices())
class TestModelForm(TestCase):
EXPECTED_AUTHOR = [('name', TextField), ('city', TextField), ('age', IntegerField), ('is_admin', BooleanField)]
def test(self):
form = model_form(Author)
for (expected_name, expected_type), field in zip(self.EXPECTED_AUTHOR, form()):
self.assertEqual(field.name, expected_name)
self.assertEqual(type(field), expected_type)
|
py
|
1a5d66b734f79a32e9099e8128f8405b8639ac74
|
import sublime
import sublime_plugin
from Mercurial.mercurial import hg
from Mercurial.mercurial import running_servers
class SublimeHgDiffSelectedCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return self.view.match_selector(0, "text.mercurial-log")
def run(self, edit):
if len(self.view.sel()) > 2:
sublime.status_message("SublimeHg: Please select only two commits.")
return
sels = list(self.view.sel())
if not (self.view.match_selector(sels[0].begin(), "keyword.other.changeset-ref.short.mercurial-log") and
self.view.match_selector(sels[1].begin(), "keyword.other.changeset-ref.short.mercurial-log")):
sublime.status_message("SublimeHg: SublimeHg: Please select only two commits.")
return
commit_nrs = [int(self.view.substr(x)) for x in self.view.sel()]
older, newer = min(commit_nrs), max(commit_nrs)
w = self.view.window()
w.run_command("close")
# FIXME: We're assuming this is the correct view, and it might not be.
v = sublime.active_window().active_view()
path = v.file_name()
v.run_command("hg_command_runner", {"cmd": "diff -r%d:%d" % (older, newer),
"display_name": "diff",
"cwd": path})
class SublimeHgUpdateToRevisionCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return self.view.match_selector(0, "text.mercurial-log")
def run(self, edit):
if len(self.view.sel()) > 1:
sublime.status_message("SublimeHg: Please select only one commit.")
return
sels = list(self.view.sel())
if not (self.view.match_selector(sels[0].begin(), "keyword.other.changeset-ref.short.mercurial-log")):
sublime.status_message("SublimeHg: SublimeHg: Please select only one commit.")
return
w = self.view.window()
w.run_command("close")
# FIXME: We're assuming this is the correct view, and it might not be.
v = sublime.active_window().active_view()
path = v.file_name()
text, exit_code = hg(running_servers[path], "status")
if text:
msg = "SublimeHg: Don't update to a different revision with uncommited changes. Aborting."
# todo: Use warnings module instead?
logging.warning(msg)
sublime.status_message("Mercurial: Error")
return
v.run_command("hg_command_runner", {"cmd": "update %d" % int(self.view.substr(self.view.sel()[0])),
"display_name": "update",
"cwd": path})
|
py
|
1a5d672297853eef7938bb0093652a8924c53d9d
|
import gevent
from scales.async import AsyncResult
from scales.constants import ChannelState, SinkProperties
from scales.core import ScalesUriParser
from scales.message import (MethodReturnMessage, FailedFastError)
from scales.loadbalancer.serverset import ServerSetProvider
from scales.sink import (ClientMessageSink, ClientMessageSinkStack, SinkProviderBase)
from scales.varz import VarzSocketWrapper
from scales.scales_socket import ScalesSocket
class MockSinkStack(ClientMessageSinkStack):
def __init__(self):
self.processed_response = False
self.return_message = None
self.return_stream = None
super(ClientMessageSinkStack, self).__init__()
def AsyncProcessResponse(self, stream, msg):
self.processed_response = True
self.return_message = msg
self.return_stream = stream
super(MockSinkStack, self).AsyncProcessResponse(stream, msg)
class MockSink(ClientMessageSink):
def __init__(self, properties):
super(MockSink, self).__init__()
self._state = ChannelState.Idle
self.ProcessRequest = None
self.ProcessResponse = None
self._open_delay = properties.get('open_delay', 0)
self._num_failures = properties.get('num_failures', [0])
self._properties = properties
self._open_result = None
self.endpoint = properties[SinkProperties.Endpoint]
def AsyncProcessRequest(self, sink_stack, msg, stream, headers):
if self.ProcessRequest:
self.ProcessRequest(sink_stack, msg, stream, headers)
else:
if self.is_closed:
sink_stack.AsyncProcessResponseMessage(MethodReturnMessage(error=FailedFastError()))
def AsyncProcessResponse(self, sink_stack, context, stream, msg):
if self.ProcessResponse:
self.ProcessResponse(sink_stack, context, stream, msg)
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
def Open(self):
if not self._open_result:
def open_impl():
if self._num_failures[0] > 0:
self._num_failures[0] -= 1
self._state = ChannelState.Closed
raise Exception("Error opening socket")
gevent.sleep(self._open_delay)
self._state = ChannelState.Open
self._open_result = AsyncResult()
self._open_result.SafeLink(open_impl)
return self._open_result
def Close(self):
self._state = ChannelState.Closed
self._open_result = None
def Fault(self):
self._state = ChannelState.Closed
self.on_faulted.Set()
class MockSinkProvider(SinkProviderBase):
def __init__(self):
super(MockSinkProvider, self).__init__()
self.ProcessRequest = None
self.ProcessResponse = None
self.sinks_created = []
def CreateSink(self, properties):
ms = MockSink(properties)
ms.ProcessRequest = self.ProcessRequest
ms.ProcessResponse = self.ProcessResponse
self.sinks_created.append(ms)
return ms
@property
def sink_class(self):
return MockSink
class MockServerSetProvider(ServerSetProvider):
def __init__(self):
self._servers = set()
self._on_leave = None
self._on_join = None
def Initialize(self, on_join, on_leave):
self._on_join = on_join
self._on_leave = on_leave
def Close(self):
pass
def AddServer(self, host, port):
ep = ScalesUriParser.Endpoint(host, port)
server = ScalesUriParser.Server(ep)
self._servers.add(server)
if self._on_join:
self._on_join(server)
def RemoveServer(self, host, port):
server = next(n for n in self._servers if n.service_endpoint.host == host and n.service_endpoint.port == port)
self._servers.discard(server)
if self._on_leave:
self._on_leave(server)
def GetServers(self):
return [s for s in self._servers]
def RemoveAllServers(self):
while self._servers:
s = self._servers.pop()
if self._on_leave:
self._on_leave(s)
def noop(*args, **kwargs): pass
class MockSocket(ScalesSocket):
def __init__(self, host, port, open=None, close=None, read=None, write=None):
super(MockSocket, self).__init__(host, port)
self.host = host
self.port = port
self._open = open or noop
self._close = close or noop
self._read = read or noop
self._write = write or noop
self._is_open = False
def open(self):
self._open()
self._is_open = True
def close(self):
self._close()
self._is_open = False
def isOpen(self):
return self._is_open
def read(self, sz):
return self._read(sz)
def write(self, buff):
return self._write(buff)
|
py
|
1a5d67f031e6ef658c419730f62260365a565d73
|
"""
raven.middleware
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import galaxy.eggs; galaxy.eggs.require( "raven" )
from raven import Client
from raven.utils.wsgi import get_current_url, get_headers, \
get_environ
class Sentry(object):
"""
A WSGI middleware which will attempt to capture any
uncaught exceptions and send them to Sentry.
"""
def __init__(self, application, dsn):
self.application = application
self.client = Client( dsn )
def __call__(self, environ, start_response):
try:
iterable = self.application(environ, start_response)
except Exception:
self.handle_exception(environ)
raise
try:
for event in iterable:
yield event
except Exception:
self.handle_exception(environ)
raise
finally:
# wsgi spec requires iterable to call close if it exists
# see http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
if iterable and hasattr(iterable, 'close') and callable(iterable.close):
try:
iterable.close()
except Exception:
self.handle_exception(environ)
def handle_exception(self, environ):
event_id = self.client.captureException(
data={
'sentry.interfaces.Http': {
'method': environ.get('REQUEST_METHOD'),
'url': get_current_url(environ, strip_querystring=True),
'query_string': environ.get('QUERY_STRING'),
# TODO
# 'data': environ.get('wsgi.input'),
'headers': dict(get_headers(environ)),
'env': dict(get_environ(environ)),
}
},
# Galaxy: add request id from environment if available
extra={
'request_id': environ.get( 'request_id', 'Unknown' )
}
)
# Galaxy: store event_id in environment so we can show it to the user
environ['sentry_event_id'] = event_id[0]
return event_id
|
py
|
1a5d685c4950c69b70e255d260d8246d596ebf2a
|
import ray
import ray._private.services as services
import ray.worker
import ray._private.profiling as profiling
import ray._private.utils as utils
from ray import ray_constants
from ray.state import GlobalState
from ray._raylet import GcsClientOptions
__all__ = ["free", "global_gc"]
MAX_MESSAGE_LENGTH = ray._config.max_grpc_message_size()
def global_gc():
"""Trigger gc.collect() on all workers in the cluster."""
worker = ray.worker.global_worker
worker.core_worker.global_gc()
def memory_summary(
address=None,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
group_by="NODE_ADDRESS",
sort_by="OBJECT_SIZE",
units="B",
line_wrap=True,
stats_only=False,
num_entries=None,
):
from ray.dashboard.memory_utils import memory_summary
address = services.canonicalize_bootstrap_address(address)
state = GlobalState()
options = GcsClientOptions.from_gcs_address(address)
state._initialize_global_state(options)
if stats_only:
return get_store_stats(state)
return memory_summary(
state, group_by, sort_by, line_wrap, units, num_entries
) + get_store_stats(state)
def get_store_stats(state, node_manager_address=None, node_manager_port=None):
"""Returns a formatted string describing memory usage in the cluster."""
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info, that Raylet internally
# asks all nodes in the cluster for memory stats.
if node_manager_address is None or node_manager_port is None:
# We should ask for a raylet that is alive.
raylet = None
for node in state.node_table():
if node["Alive"]:
raylet = node
break
assert raylet is not None, "Every raylet is dead"
raylet_address = "{}:{}".format(
raylet["NodeManagerAddress"], raylet["NodeManagerPort"]
)
else:
raylet_address = "{}:{}".format(node_manager_address, node_manager_port)
channel = utils.init_grpc_channel(
raylet_address,
options=[
("grpc.max_send_message_length", MAX_MESSAGE_LENGTH),
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(include_memory_info=False),
timeout=30.0,
)
return store_stats_summary(reply)
def node_stats(
node_manager_address=None, node_manager_port=None, include_memory_info=True
):
"""Returns NodeStats object describing memory usage in the cluster."""
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info.
assert node_manager_address is not None and node_manager_port is not None
raylet_address = "{}:{}".format(node_manager_address, node_manager_port)
channel = utils.init_grpc_channel(
raylet_address,
options=[
("grpc.max_send_message_length", MAX_MESSAGE_LENGTH),
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
node_stats = stub.GetNodeStats(
node_manager_pb2.GetNodeStatsRequest(include_memory_info=include_memory_info),
timeout=30.0,
)
return node_stats
def store_stats_summary(reply):
"""Returns formatted string describing object store stats in all nodes."""
store_summary = "--- Aggregate object store stats across all nodes ---\n"
# TODO(ekl) it would be nice if we could provide a full memory usage
# breakdown by type (e.g., pinned by worker, primary, etc.)
store_summary += (
"Plasma memory usage {} MiB, {} objects, {}% full, {}% "
"needed\n".format(
int(reply.store_stats.object_store_bytes_used / (1024 * 1024)),
reply.store_stats.num_local_objects,
round(
100
* reply.store_stats.object_store_bytes_used
/ reply.store_stats.object_store_bytes_avail,
2,
),
round(
100
* reply.store_stats.object_store_bytes_primary_copy
/ reply.store_stats.object_store_bytes_avail,
2,
),
)
)
if reply.store_stats.object_store_bytes_fallback > 0:
store_summary += "Plasma filesystem mmap usage: {} MiB\n".format(
int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024))
)
if reply.store_stats.spill_time_total_s > 0:
store_summary += (
"Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n".format(
int(reply.store_stats.spilled_bytes_total / (1024 * 1024)),
reply.store_stats.spilled_objects_total,
int(
reply.store_stats.spilled_bytes_total
/ (1024 * 1024)
/ reply.store_stats.spill_time_total_s
),
)
)
if reply.store_stats.restore_time_total_s > 0:
store_summary += (
"Restored {} MiB, {} objects, avg read throughput {} MiB/s\n".format(
int(reply.store_stats.restored_bytes_total / (1024 * 1024)),
reply.store_stats.restored_objects_total,
int(
reply.store_stats.restored_bytes_total
/ (1024 * 1024)
/ reply.store_stats.restore_time_total_s
),
)
)
if reply.store_stats.consumed_bytes > 0:
store_summary += "Objects consumed by Ray tasks: {} MiB.\n".format(
int(reply.store_stats.consumed_bytes / (1024 * 1024))
)
if reply.store_stats.object_pulls_queued:
store_summary += "Object fetches queued, waiting for available memory."
return store_summary
def free(object_refs, local_only=False):
"""Free a list of IDs from the in-process and plasma object stores.
This function is a low-level API which should be used in restricted
scenarios.
If local_only is false, the request will be send to all object stores.
This method will not return any value to indicate whether the deletion is
successful or not. This function is an instruction to the object store. If
some of the objects are in use, the object stores will delete them later
when the ref count is down to 0.
Examples:
>>> x_id = f.remote()
>>> ray.get(x_id) # wait for x to be created first
>>> free([x_id]) # unpin & delete x globally
Args:
object_refs (List[ObjectRef]): List of object refs to delete.
local_only (bool): Whether only deleting the list of objects in local
object store or all object stores.
"""
worker = ray.worker.global_worker
if isinstance(object_refs, ray.ObjectRef):
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise TypeError(
"free() expects a list of ObjectRef, got {}".format(type(object_refs))
)
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"Attempting to call `free` on the value {}, "
"which is not an ray.ObjectRef.".format(object_ref)
)
worker.check_connected()
with profiling.profile("ray.free"):
if len(object_refs) == 0:
return
worker.core_worker.free_objects(object_refs, local_only)
|
py
|
1a5d6c83e6cdcdaef627bd8853c683bc3d3c9da0
|
from baby_steps import given, then, when
from pytest import raises
from district42 import schema
from district42.errors import DeclarationError
from district42.types import FloatSchema
def test_float_declaration():
with when:
sch = schema.float
with then:
assert isinstance(sch, FloatSchema)
def test_float_value_declaration():
with given:
value = 3.14
with when:
sch = schema.float(value)
with then:
assert sch.props.value == value
def test_float_invalid_value_type_declaration_error():
with when, raises(Exception) as exception:
schema.float("3.14")
with then:
assert exception.type is DeclarationError
assert str(exception.value) == ("`schema.float` value must be an instance of 'float', "
"instance of 'str' '3.14' given")
def test_float_already_declared_declaration_error():
with when, raises(Exception) as exception:
schema.float(3.14)(3.14)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == "`schema.float(3.14)` is already declared"
def test_float_min_value_declaration():
with given:
min_value = 3.14
with when:
sch = schema.float.min(min_value)
with then:
assert sch.props.min == min_value
def test_float_invalid_min_value_type_declaration_error():
with when, raises(Exception) as exception:
schema.float.min("3.14")
with then:
assert exception.type is DeclarationError
assert str(exception.value) == ("`schema.float` value must be an instance of 'float', "
"instance of 'str' '3.14' given")
def test_float_value_already_declared_min_declaration_error():
with when, raises(Exception) as exception:
schema.float.min(3.14)(3.2)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == "`schema.float.min(3.14)` is already declared"
def test_float_min_value_already_declared_less_value_declaration_error():
with given:
sch = schema.float(3.14)
with when, raises(Exception) as exception:
sch.min(3.15)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` min value must be less than or equal to 3.14, 3.15 given"
)
def test_float_min_value_already_declared_min_declaration_error():
with when, raises(Exception) as exception:
schema.float.min(3.14).min(3.14)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == "`schema.float.min(3.14)` is already declared"
def test_float_max_value_declaration():
with given:
max_value = 3.14
with when:
sch = schema.float.max(max_value)
with then:
assert sch.props.max == max_value
def test_float_invalid_max_value_type_declaration_error():
with when, raises(Exception) as exception:
schema.float.max("3.14")
with then:
assert exception.type is DeclarationError
assert str(exception.value) == ("`schema.float` value must be an instance of 'float', "
"instance of 'str' '3.14' given")
def test_float_value_already_declared_max_declaration_error():
with when, raises(Exception) as exception:
schema.float.max(3.2)(3.14)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == "`schema.float.max(3.2)` is already declared"
def test_float_max_value_already_declared_greater_value_declaration_error():
with given:
sch = schema.float(3.14)
with when, raises(Exception) as exception:
sch.max(3.13)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` max value must be greater than or equal to 3.14, 3.13 given"
)
def test_float_max_value_already_declared_max_declaration_error():
with when, raises(Exception) as exception:
schema.float.max(3.14).max(3.14)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == "`schema.float.max(3.14)` is already declared"
def test_float_min_max_value_declaration():
with given:
min_value, max_value = 3.14, 3.15
with when:
sch = schema.float.min(min_value).max(max_value)
with then:
assert sch.props.min == min_value
assert sch.props.max == max_value
def test_float_min_max_with_value_declaration():
with given:
value = 3.14
min_value, max_value = 3.13, 3.15
with when:
sch = schema.float(value).min(min_value).max(max_value)
with then:
assert sch.props.value == value
assert sch.props.min == min_value
assert sch.props.max == max_value
|
py
|
1a5d6d17d8b68b4660bcde157b197c2697dc6104
|
from django.db.models import Sum
from django.core.exceptions import ObjectDoesNotExist
from ...sale.models import Sales, SoldItem
from structlog import get_logger
logger = get_logger(__name__)
def get_hours_results(date, h):
try:
sales_at_date = Sales.objects.filter(created__contains=date)
sales_at_h = sales_at_date.extra(where=['extract(hour from created) in (' + str(h - 3) + ')'])
try:
amount = sales_at_h.aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_hours_results_range(date_from, date_to, l, h):
try:
sales_at_date = Sales.objects.filter(created__range=[date_from, date_to])
sales_at_h = sales_at_date.filter(created__hour__range=[l, h])
try:
amount = Sales.objects.filter(pk__in=sales_at_h).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_date_results_range(date_from, date_to):
try:
sales_at_date = Sales.objects.filter(created__range=[date_from, date_to])
try:
amount = Sales.objects.filter(pk__in=sales_at_date).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_date_results(date):
try:
sales_at_date = Sales.objects.filter(created__contains=date)
try:
amount = Sales.objects.filter(pk__in=sales_at_date).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_category_results(category, year, month):
try:
amount = SoldItem.objects.filter(product_category__contains=category, sales__created__year=year,
sales__created__month=month).aggregate(Sum('total_cost'))['total_cost__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_item_results(item, year, month):
try:
amount = SoldItem.objects.filter(product_name__contains=item, sales__created__year=year,
sales__created__month=month).aggregate(Sum('total_cost'))['total_cost__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_user_results(user, year, month):
try:
amount = Sales.objects.filter(user__name__contains=user, created__year=year, created__month=month).aggregate(
Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_terminal_results(terminal, year, month):
try:
amount = Sales.objects.filter(terminal__terminal_name__contains=terminal, created__year=year,
created__month=month).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
|
py
|
1a5d6e730add2a0ad0728c4db5cc4867a3e703f5
|
"""Annotate vibrations with a network."""
import logging
import numpy as np
import defopt
import os
import dss.utils, dss.data, dss.models, dss.event_utils, dss.predict
import scipy.signal as ss
import h5py
from glob import glob
from typing import List
import flammkuchen
# move to cli module
def deepss(data_name: str, save_name: str, model_save_name: str, data_key: str = 'samples',
nb_channels: int = 16,
event_thres: float = 0.5, event_tol: float = 0.01,
segment_thres: float = 0.5):
"""[summary]
Args:
data_name (str): [description]
save_name (str): [description]. Defaults to None.
model_save_name (str): [description]
data_key (str): [description]. Defaults to 'samples'.
nb_channels (int): Number of channels to take from data file. Defaults to 16.
event_thres (float): [description]. Defaults to 0.5.
event_tol (float): [description]. Defaults to 0.01 seconds (10ms).
Raises:
ValueError: if data_name or save_name are of unknown type (allowed: wav, h5, zarr, npy/npz)
"""
# load model
logging.info(f'loading parameters for {model_save_name}')
params = dss.utils.load_params(model_save_name)
logging.info(f'loading data for {data_name}')
with h5py.File(data_name, 'r') as f:
x = f[data_key][..., :nb_channels]
samplerate = f.attrs['rate']
# channel_names = f.attrs['analog_chans_in']
logging.info(f' filtering')
sos_bp = ss.butter(5, [50, 1000], 'bandpass', output='sos', fs=samplerate)
x = ss.sosfiltfilt(sos_bp, x, axis=0).astype(np.float16)
logging.info(f' annotating {x.shape[0]/samplerate:1.2f} seconds')
events, segments, class_probabilities = dss.predict.predict(x, model_save_name, params)
logging.info(f' saving results to "{save_name}".')
os.makedirs(os.path.dirname(save_name), exist_ok=True)
event_names = [k for k in events.keys() if k != 'samplerate_Hz']
segment_names = [k for k in segments.keys() if k != 'samplerate_Hz' and k != 'noise']
d = {'class_probabilities': class_probabilities,
'segment_names': segment_names,
'segment_probabilities': [segments[segment_name]['probabilities'] for segment_name in segment_names],
'segment_labels': [segments[segment_name]['samples'] for segment_name in segment_names],
'event_names': event_names,
'event_probabilities': [events[event_name]['probabilities'] for event_name in event_names ],
'event_indices': [events[event_name]['seconds'] * events['samplerate_Hz'] for event_name in event_names ],
'samplerate_Hz': params['samplerate_y_Hz'],
}
flammkuchen.save(save_name, d)
from snakemake.shell import shell
logging.basicConfig(level=logging.INFO)
params = snakemake.params[0][snakemake.rule]
for out in snakemake.output:
deepss(snakemake.input[0], save_name=out, model_save_name=params['modelname'])
|
py
|
1a5d6ef837d31e5223faa393eae0df5f25ea1b6f
|
# Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from PyQt5.QtCore import QTimer
from UM.Application import Application
from UM.Scene.SceneNode import SceneNode
from UM.Scene.Iterator.BreadthFirstIterator import BreadthFirstIterator
from UM.Math.Vector import Vector
from UM.Scene.Selection import Selection
from UM.Preferences import Preferences
from cura.Scene.ConvexHullDecorator import ConvexHullDecorator
from cura.Operations import PlatformPhysicsOperation
from cura.Scene import ZOffsetDecorator
import random # used for list shuffling
class PlatformPhysics:
def __init__(self, controller, volume):
super().__init__()
self._controller = controller
self._controller.getScene().sceneChanged.connect(self._onSceneChanged)
self._controller.toolOperationStarted.connect(self._onToolOperationStarted)
self._controller.toolOperationStopped.connect(self._onToolOperationStopped)
self._build_volume = volume
self._enabled = True
self._change_timer = QTimer()
self._change_timer.setInterval(100)
self._change_timer.setSingleShot(True)
self._change_timer.timeout.connect(self._onChangeTimerFinished)
self._move_factor = 1.1 # By how much should we multiply overlap to calculate a new spot?
self._max_overlap_checks = 10 # How many times should we try to find a new spot per tick?
self._minimum_gap = 2 # It is a minimum distance (in mm) between two models, applicable for small models
Preferences.getInstance().addPreference("physics/automatic_push_free", True)
Preferences.getInstance().addPreference("physics/automatic_drop_down", True)
def _onSceneChanged(self, source):
if not source.getMeshData():
return
self._change_timer.start()
def _onChangeTimerFinished(self):
if not self._enabled:
return
root = self._controller.getScene().getRoot()
# Keep a list of nodes that are moving. We use this so that we don't move two intersecting objects in the
# same direction.
transformed_nodes = []
# We try to shuffle all the nodes to prevent "locked" situations, where iteration B inverts iteration A.
# By shuffling the order of the nodes, this might happen a few times, but at some point it will resolve.
nodes = list(BreadthFirstIterator(root))
# Only check nodes inside build area.
nodes = [node for node in nodes if (hasattr(node, "_outside_buildarea") and not node._outside_buildarea)]
random.shuffle(nodes)
for node in nodes:
if node is root or not isinstance(node, SceneNode) or node.getBoundingBox() is None:
continue
bbox = node.getBoundingBox()
# Move it downwards if bottom is above platform
move_vector = Vector()
if Preferences.getInstance().getValue("physics/automatic_drop_down") and not (node.getParent() and node.getParent().callDecoration("isGroup") or node.getParent() != root) and node.isEnabled(): #If an object is grouped, don't move it down
z_offset = node.callDecoration("getZOffset") if node.getDecorator(ZOffsetDecorator.ZOffsetDecorator) else 0
move_vector = move_vector.set(y = -bbox.bottom + z_offset)
# If there is no convex hull for the node, start calculating it and continue.
if not node.getDecorator(ConvexHullDecorator):
node.addDecorator(ConvexHullDecorator())
# only push away objects if this node is a printing mesh
if not node.callDecoration("isNonPrintingMesh") and Preferences.getInstance().getValue("physics/automatic_push_free"):
# Check for collisions between convex hulls
for other_node in BreadthFirstIterator(root):
# Ignore root, ourselves and anything that is not a normal SceneNode.
if other_node is root or not issubclass(type(other_node), SceneNode) or other_node is node or other_node.callDecoration("getBuildPlateNumber") != node.callDecoration("getBuildPlateNumber"):
continue
# Ignore collisions of a group with it's own children
if other_node in node.getAllChildren() or node in other_node.getAllChildren():
continue
# Ignore collisions within a group
if other_node.getParent() and node.getParent() and (other_node.getParent().callDecoration("isGroup") is not None or node.getParent().callDecoration("isGroup") is not None):
continue
# Ignore nodes that do not have the right properties set.
if not other_node.callDecoration("getConvexHull") or not other_node.getBoundingBox():
continue
if other_node in transformed_nodes:
continue # Other node is already moving, wait for next pass.
if other_node.callDecoration("isNonPrintingMesh"):
continue
overlap = (0, 0) # Start loop with no overlap
current_overlap_checks = 0
# Continue to check the overlap until we no longer find one.
while overlap and current_overlap_checks < self._max_overlap_checks:
current_overlap_checks += 1
head_hull = node.callDecoration("getConvexHullHead")
if head_hull: # One at a time intersection.
overlap = head_hull.translate(move_vector.x, move_vector.z).intersectsPolygon(other_node.callDecoration("getConvexHull"))
if not overlap:
other_head_hull = other_node.callDecoration("getConvexHullHead")
if other_head_hull:
overlap = node.callDecoration("getConvexHull").translate(move_vector.x, move_vector.z).intersectsPolygon(other_head_hull)
if overlap:
# Moving ensured that overlap was still there. Try anew!
move_vector = move_vector.set(x = move_vector.x + overlap[0] * self._move_factor,
z = move_vector.z + overlap[1] * self._move_factor)
else:
# Moving ensured that overlap was still there. Try anew!
move_vector = move_vector.set(x = move_vector.x + overlap[0] * self._move_factor,
z = move_vector.z + overlap[1] * self._move_factor)
else:
own_convex_hull = node.callDecoration("getConvexHull")
other_convex_hull = other_node.callDecoration("getConvexHull")
if own_convex_hull and other_convex_hull:
overlap = own_convex_hull.translate(move_vector.x, move_vector.z).intersectsPolygon(other_convex_hull)
if overlap: # Moving ensured that overlap was still there. Try anew!
temp_move_vector = move_vector.set(x = move_vector.x + overlap[0] * self._move_factor,
z = move_vector.z + overlap[1] * self._move_factor)
# if the distance between two models less than 2mm then try to find a new factor
if abs(temp_move_vector.x - overlap[0]) < self._minimum_gap and abs(temp_move_vector.y - overlap[1]) < self._minimum_gap:
temp_x_factor = (abs(overlap[0]) + self._minimum_gap) / overlap[0] if overlap[0] != 0 else 0 # find x move_factor, like (3.4 + 2) / 3.4 = 1.58
temp_y_factor = (abs(overlap[1]) + self._minimum_gap) / overlap[1] if overlap[1] != 0 else 0 # find y move_factor
temp_scale_factor = temp_x_factor if abs(temp_x_factor) > abs(temp_y_factor) else temp_y_factor
move_vector = move_vector.set(x = move_vector.x + overlap[0] * temp_scale_factor,
z = move_vector.z + overlap[1] * temp_scale_factor)
else:
move_vector = temp_move_vector
else:
# This can happen in some cases if the object is not yet done with being loaded.
# Simply waiting for the next tick seems to resolve this correctly.
overlap = None
if not Vector.Null.equals(move_vector, epsilon = 1e-5):
transformed_nodes.append(node)
op = PlatformPhysicsOperation.PlatformPhysicsOperation(node, move_vector)
op.push()
# After moving, we have to evaluate the boundary checks for nodes
build_volume = Application.getInstance().getBuildVolume()
build_volume.updateNodeBoundaryCheck()
def _onToolOperationStarted(self, tool):
self._enabled = False
def _onToolOperationStopped(self, tool):
# Selection tool should not trigger an update.
if tool.getPluginId() == "SelectionTool":
return
if tool.getPluginId() == "TranslateTool":
for node in Selection.getAllSelectedObjects():
if node.getBoundingBox().bottom < 0:
if not node.getDecorator(ZOffsetDecorator.ZOffsetDecorator):
node.addDecorator(ZOffsetDecorator.ZOffsetDecorator())
node.callDecoration("setZOffset", node.getBoundingBox().bottom)
else:
if node.getDecorator(ZOffsetDecorator.ZOffsetDecorator):
node.removeDecorator(ZOffsetDecorator.ZOffsetDecorator)
self._enabled = True
self._onChangeTimerFinished()
|
py
|
1a5d6f147af39e540e33ad45d4fd3573d52d764f
|
import json
import pytz
import re
from collections import OrderedDict, defaultdict
from django.contrib import messages
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.urls import RegexURLResolver, Resolver404
from django.utils.translation import ugettext_lazy as _
from couchdbkit import ResourceConflict, ResourceNotFound
from dimagi.utils.web import json_response
from corehq import privileges, toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.decorators import (
safe_cached_download,
safe_download,
)
from corehq.apps.app_manager.exceptions import (
AppManagerException,
FormNotFoundException,
ModuleNotFoundException,
)
from corehq.apps.app_manager.models import Application
from corehq.apps.app_manager.tasks import autogenerate_build
from corehq.apps.app_manager.util import (
add_odk_profile_after_build,
get_latest_enabled_versions_per_profile,
)
from corehq.apps.app_manager.views.utils import back_to_main, get_langs
from corehq.apps.builds.jadjar import convert_XML_To_J2ME
from corehq.apps.hqmedia.views import DownloadMultimediaZip
from corehq.util.soft_assert import soft_assert
from corehq.util.timezones.conversions import ServerTime
from corehq.util.view_utils import set_file_download
BAD_BUILD_MESSAGE = _("Sorry: this build is invalid. Try deleting it and rebuilding. "
"If error persists, please report an issue")
def _get_build_profile_id(request):
profile = request.GET.get('profile')
if profile in request.app.build_profiles:
return profile
else:
return None
@safe_download
def download_odk_profile(request, domain, app_id):
"""
See ApplicationBase.create_profile
"""
if not request.app.copy_of:
username = request.GET.get('username', 'unknown user')
autogenerate_build(request.app, username)
else:
request._always_allow_browser_caching = True
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_profile(is_odk=True, build_profile_id=profile),
content_type="commcare/profile"
)
@safe_download
def download_odk_media_profile(request, domain, app_id):
if not request.app.copy_of:
username = request.GET.get('username', 'unknown user')
autogenerate_build(request.app, username)
else:
request._always_allow_browser_caching = True
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_profile(is_odk=True, with_media=True, build_profile_id=profile),
content_type="commcare/profile"
)
@safe_cached_download
def download_suite(request, domain, app_id):
"""
See Application.create_suite
"""
if not request.app.copy_of:
request.app.set_form_versions()
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_suite(build_profile_id=profile)
)
@safe_cached_download
def download_media_suite(request, domain, app_id):
"""
See Application.create_media_suite
"""
if not request.app.copy_of:
request.app.set_media_versions()
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_media_suite(build_profile_id=profile)
)
@safe_cached_download
def download_app_strings(request, domain, app_id, lang):
"""
See Application.create_app_strings
"""
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_app_strings(lang, build_profile_id=profile)
)
@safe_cached_download
def download_xform(request, domain, app_id, module_id, form_id):
"""
See Application.fetch_xform
"""
profile = _get_build_profile_id(request)
try:
return HttpResponse(
request.app.fetch_xform(module_id, form_id, build_profile_id=profile)
)
except (IndexError, ModuleNotFoundException):
raise Http404()
except AppManagerException:
form_unique_id = request.app.get_module(module_id).get_form(form_id).unique_id
response = validate_form_for_build(request, domain, app_id, form_unique_id, ajax=False)
response.status_code = 404
return response
@safe_cached_download
def download_jad(request, domain, app_id):
"""
See ApplicationBase.create_jadjar_from_build_files
"""
app = request.app
if not app.copy_of:
app.set_media_versions()
jad, _ = app.create_jadjar_from_build_files()
try:
response = HttpResponse(jad)
except Exception:
messages.error(request, BAD_BUILD_MESSAGE)
return back_to_main(request, domain, app_id=app_id)
set_file_download(response, "CommCare.jad")
response["Content-Type"] = "text/vnd.sun.j2me.app-descriptor"
response["Content-Length"] = len(jad)
return response
@safe_cached_download
def download_jar(request, domain, app_id):
"""
See ApplicationBase.create_jadjar_from_build_files
This is the only view that will actually be called
in the process of downloading a complete CommCare.jar
build (i.e. over the air to a phone).
"""
response = HttpResponse(content_type="application/java-archive")
app = request.app
if not app.copy_of:
app.set_media_versions()
_, jar = app.create_jadjar_from_build_files()
set_file_download(response, 'CommCare.jar')
response['Content-Length'] = len(jar)
try:
response.write(jar)
except Exception:
messages.error(request, BAD_BUILD_MESSAGE)
return back_to_main(request, domain, app_id=app_id)
return response
@safe_cached_download
def download_raw_jar(request, domain, app_id):
"""
See ApplicationBase.fetch_jar
"""
response = HttpResponse(
request.app.fetch_jar()
)
response['Content-Type'] = "application/java-archive"
return response
class DownloadCCZ(DownloadMultimediaZip):
name = 'download_ccz'
compress_zip = True
include_index_files = True
@property
def zip_name(self):
return 'commcare_v{}.ccz'.format(self.app.version)
def check_before_zipping(self):
if self.app.is_remote_app():
self.include_multimedia_files = False
super(DownloadCCZ, self).check_before_zipping()
@safe_cached_download
def download_file(request, domain, app_id, path):
download_target_version = request.GET.get('download_target_version') == 'true'
if download_target_version:
parts = path.split('.')
assert len(parts) == 2
target = Application.get(app_id).commcare_flavor
assert target != 'none'
path = parts[0] + '-' + target + '.' + parts[1]
if path == "app.json":
return JsonResponse(request.app.to_json())
content_type_map = {
'ccpr': 'commcare/profile',
'jad': 'text/vnd.sun.j2me.app-descriptor',
'jar': 'application/java-archive',
'xml': 'application/xml',
'txt': 'text/plain',
}
try:
content_type = content_type_map[path.split('.')[-1]]
except KeyError:
content_type = None
response = HttpResponse(content_type=content_type)
if request.GET.get('download') == 'true':
response['Content-Disposition'] = "attachment; filename={}".format(path)
build_profile_id = _get_build_profile_id(request)
build_profile_access = domain_has_privilege(domain, privileges.BUILD_PROFILES)
if path in ('CommCare.jad', 'CommCare.jar'):
set_file_download(response, path)
full_path = path
elif build_profile_id and build_profile_id in request.app.build_profiles and build_profile_access:
full_path = 'files/%s/%s' % (build_profile_id, path)
else:
full_path = 'files/%s' % path
def resolve_path(path):
return RegexURLResolver(
r'^', 'corehq.apps.app_manager.download_urls').resolve(path)
def create_build_files(build_profile_id=None):
request.app.create_build_files(build_profile_id=build_profile_id)
request.app.save()
def create_build_files_if_necessary_handling_conflicts(is_retry=False):
try:
try:
# look for file guaranteed to exist if profile is created
request.app.fetch_attachment('files/{id}/profile.xml'.format(id=build_profile_id))
except ResourceNotFound:
create_build_files(build_profile_id)
except ResourceConflict:
if is_retry:
raise
request.app = Application.get(request.app.get_id)
create_build_files_if_necessary_handling_conflicts(True)
try:
assert request.app.copy_of
# create build files for default profile if they were not created during initial build
# or for language profiles for which build files have not been created yet
try:
payload = request.app.fetch_attachment(full_path)
except ResourceNotFound:
if not build_profile_id:
create_build_files()
elif build_profile_id in request.app.build_profiles and build_profile_access:
create_build_files_if_necessary_handling_conflicts()
else:
raise
payload = request.app.fetch_attachment(full_path)
if path in ['profile.xml', 'media_profile.xml']:
payload = convert_XML_To_J2ME(payload, path, request.app.use_j2me_endpoint)
response.write(payload)
if path in ['profile.ccpr', 'media_profile.ccpr'] and request.app.last_released:
last_released = request.app.last_released.replace(microsecond=0) # mobile doesn't want microseconds
last_released = ServerTime(last_released).user_time(pytz.UTC).done().isoformat()
response['X-CommCareHQ-AppReleasedOn'] = last_released
response['Content-Length'] = len(response.content)
return response
except (ResourceNotFound, AssertionError):
if request.app.copy_of:
if request.META.get('HTTP_USER_AGENT') == 'bitlybot':
raise Http404()
elif path == 'profile.ccpr':
# legacy: should patch build to add odk profile
# which wasn't made on build for a long time
add_odk_profile_after_build(request.app)
request.app.save()
return download_file(request, domain, app_id, path)
elif path in ('CommCare.jad', 'CommCare.jar'):
if not request.app.build_spec.supports_j2me():
raise Http404()
request.app.create_jadjar_from_build_files(save=True)
try:
request.app.save(increment_version=False)
except ResourceConflict:
# Likely that somebody tried to download the jad and jar
# files for the first time simultaneously.
pass
return download_file(request, domain, app_id, path)
else:
try:
resolve_path(path)
except Resolver404:
# ok this was just a url that doesn't exist
pass
else:
# this resource should exist but doesn't
_assert = soft_assert('@'.join(['jschweers', 'dimagi.com']))
_assert(False, 'Expected build resource %s not found' % path)
raise Http404()
try:
callback, callback_args, callback_kwargs = resolve_path(path)
except Resolver404:
raise Http404()
return callback(request, domain, app_id, *callback_args, **callback_kwargs)
@safe_download
def download_profile(request, domain, app_id):
"""
See ApplicationBase.create_profile
"""
if not request.app.copy_of:
username = request.GET.get('username', 'unknown user')
autogenerate_build(request.app, username)
else:
request._always_allow_browser_caching = True
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_profile(build_profile_id=profile)
)
@safe_download
def download_media_profile(request, domain, app_id):
if not request.app.copy_of:
username = request.GET.get('username', 'unknown user')
autogenerate_build(request.app, username)
else:
request._always_allow_browser_caching = True
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_profile(with_media=True, build_profile_id=profile)
)
@safe_cached_download
def download_practice_user_restore(request, domain, app_id):
if not request.app.copy_of:
autogenerate_build(request.app, request.user.username)
return HttpResponse(
request.app.create_practice_user_restore()
)
@safe_download
def download_index(request, domain, app_id):
"""
A landing page, mostly for debugging, that has links the jad and jar as well as
all the resource files that will end up zipped into the jar.
"""
files = defaultdict(list)
try:
for file_ in source_files(request.app):
form_filename = re.search(r'modules-(\d+)\/forms-(\d+)', file_[0])
if form_filename:
module_id, form_id = form_filename.groups()
module = request.app.get_module(module_id)
form = module.get_form(form_id)
section_name = "m{} - {}".format(
module_id,
", ".join(["({}) {}".format(lang, name)
for lang, name in module.name.items()])
)
files[section_name].append({
'name': file_[0],
'source': file_[1],
'readable_name': "f{} - {}".format(
form_id,
", ".join(["({}) {}".format(lang, name)
for lang, name in form.name.items()])
),
})
else:
files[None].append({
'name': file_[0],
'source': file_[1],
'readable_name': None,
})
except Exception:
messages.error(
request,
_(
"We were unable to get your files "
"because your Application has errors. "
"Please click <strong>Make New Version</strong> "
"for feedback on how to fix these errors."
),
extra_tags='html'
)
enabled_build_profiles = []
latest_enabled_build_profiles = {}
if request.app.is_released and toggles.RELEASE_BUILDS_PER_PROFILE.enabled(domain):
latest_enabled_build_profiles = get_latest_enabled_versions_per_profile(request.app.copy_of)
enabled_build_profiles = [_id for _id, version in latest_enabled_build_profiles.items()
if version == request.app.version]
return render(request, "app_manager/download_index.html", {
'app': request.app,
'files': OrderedDict(sorted(files.items(), key=lambda x: x[0] or '')),
'supports_j2me': request.app.build_spec.supports_j2me(),
'enabled_build_profiles': enabled_build_profiles,
'latest_enabled_build_profiles': latest_enabled_build_profiles,
})
def validate_form_for_build(request, domain, app_id, form_unique_id, ajax=True):
app = get_app(domain, app_id)
try:
form = app.get_form(form_unique_id)
except FormNotFoundException:
# this can happen if you delete the form from another page
raise Http404()
errors = form.validate_for_build()
lang, langs = get_langs(request, app)
if ajax and "blank form" in [error.get('type') for error in errors]:
response_html = ""
else:
response_html = render_to_string("app_manager/partials/build_errors.html", {
'app': app,
'build_errors': errors,
'not_actual_build': True,
'domain': domain,
'langs': langs,
})
if ajax:
return json_response({
'error_html': response_html,
})
else:
return HttpResponse(response_html)
def download_index_files(app, build_profile_id=None):
if app.copy_of:
prefix = 'files/'
if build_profile_id is not None:
prefix += build_profile_id + '/'
needed_for_CCZ = lambda path: path.startswith(prefix)
else:
profiles = set(app.build_profiles)
needed_for_CCZ = lambda path: (path.startswith(prefix) and
path.split('/')[1] not in profiles)
if not (prefix + 'profile.ccpr') in app.blobs:
# profile hasnt been built yet
app.create_build_files(build_profile_id=build_profile_id)
app.save()
files = [(path[len(prefix):], app.fetch_attachment(path))
for path in app.blobs if needed_for_CCZ(path)]
else:
files = list(app.create_all_files().items())
files = [
(name, build_file if isinstance(build_file, str) else build_file.decode('utf-8'))
for (name, build_file) in files
]
return sorted(files)
def source_files(app):
"""
Return the app's source files, including the app json.
Return format is a list of tuples where the first item in the tuple is a
file name and the second is the file contents.
"""
if not app.copy_of:
app.set_media_versions()
files = download_index_files(app)
app_json = json.dumps(
app.to_json(), sort_keys=True, indent=4, separators=(',', ': ')
)
files.append(
("app.json", app_json)
)
return sorted(files)
|
py
|
1a5d6f41b14660162f020aec327f1f091ff4359a
|
"""
unit tests for the fileserver runner
"""
import os
import salt.loader
import salt.runners.fileserver as fileserver
import salt.utils.files
from tests.support.helpers import with_tempdir
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class DummyFS:
"""
Dummy object to provide the attributes needed to run unit tests
"""
def __init__(self, backends):
self.backends = backends
def keys(self):
return ["{}.envs".format(x) for x in self.backends]
class FileserverTest(TestCase, LoaderModuleMockMixin):
"""
Validate the cache runner
"""
def setup_loader_modules(self):
return {fileserver: {"__opts__": {"extension_modules": ""}}}
def _make_file_lists_cache(self, cachedir, backends):
"""
Create some dummy files to represent file list caches, as well as other
files that aren't file list caches, so that we can confirm that *only*
the cache files are touched. Create a dir for each configured backend,
as well as for the roots backend (which is *not* configured as a
backend in this test), so that we can ensure that its cache is left
alone.
"""
for back in backends:
back_cachedir = os.path.join(cachedir, "file_lists", back)
# Make file_lists cachedir
os.makedirs(os.path.join(back_cachedir))
# Touch a couple files
for filename in ("base.p", "dev.p", "foo.txt"):
with salt.utils.files.fopen(os.path.join(back_cachedir, filename), "w"):
pass
@with_tempdir()
def test_clear_file_list_cache_vcs(self, cachedir):
"""
Test that VCS backends are cleared irrespective of whether they are
configured as gitfs/git, hgfs/hg, svnfs/svn.
"""
# Mixture of VCS backends specified with and without "fs" at the end,
# to confirm that the correct dirs are cleared.
backends = ["gitfs", "hg", "svnfs"]
opts = {
"fileserver_backend": backends,
"cachedir": cachedir,
}
mock_fs = DummyFS(backends)
self._make_file_lists_cache(cachedir, backends + ["roots"])
with patch.dict(fileserver.__opts__, opts), patch.object(
salt.loader, "fileserver", MagicMock(return_value=mock_fs)
):
cleared = fileserver.clear_file_list_cache()
# Make sure the return data matches what you'd expect
expected = {
"gitfs": ["base", "dev"],
"hg": ["base", "dev"],
"svnfs": ["base", "dev"],
}
assert cleared == expected, cleared
# Trust, but verify! Check that the correct files are actually gone
assert not os.path.exists(
os.path.join(cachedir, "file_lists", "gitfs", "base.p")
)
assert not os.path.exists(
os.path.join(cachedir, "file_lists", "gitfs", "dev.p")
)
assert not os.path.exists(os.path.join(cachedir, "file_lists", "hg", "base.p"))
assert not os.path.exists(
os.path.join(cachedir, "file_lists", "gitfs", "dev.p")
)
assert not os.path.exists(os.path.join(cachedir, "file_lists", "hg", "base.p"))
assert not os.path.exists(
os.path.join(cachedir, "file_lists", "svnfs", "dev.p")
)
# These files *should* exist and shouldn't have been cleaned
assert os.path.exists(os.path.join(cachedir, "file_lists", "gitfs", "foo.txt"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "hg", "foo.txt"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "svnfs", "foo.txt"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "roots", "base.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "roots", "dev.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "roots", "foo.txt"))
@with_tempdir()
def test_clear_file_list_cache_vcs_limited(self, cachedir):
"""
Test the arguments to limit what is cleared
"""
# Mixture of VCS backends specified with and without "fs" at the end,
# to confirm that the correct dirs are cleared.
backends = ["gitfs", "hg", "svnfs"]
opts = {
"fileserver_backend": backends,
"cachedir": cachedir,
}
mock_fs = DummyFS(backends)
self._make_file_lists_cache(cachedir, backends + ["roots"])
with patch.dict(fileserver.__opts__, opts), patch.object(
salt.loader, "fileserver", MagicMock(return_value=mock_fs)
):
cleared = fileserver.clear_file_list_cache(saltenv="base", backend="gitfs")
expected = {"gitfs": ["base"]}
assert cleared == expected, cleared
# Trust, but verify! Check that the correct files are actually gone
assert not os.path.exists(
os.path.join(cachedir, "file_lists", "gitfs", "base.p")
)
# These files *should* exist and shouldn't have been cleaned
assert os.path.exists(os.path.join(cachedir, "file_lists", "gitfs", "dev.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "gitfs", "foo.txt"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "hg", "base.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "hg", "dev.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "hg", "foo.txt"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "svnfs", "base.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "svnfs", "dev.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "svnfs", "foo.txt"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "roots", "base.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "roots", "dev.p"))
assert os.path.exists(os.path.join(cachedir, "file_lists", "roots", "foo.txt"))
|
py
|
1a5d6ff7c029444015a3e7c3e1a81393e3d42a97
|
"""
Base class for the reward priority queues
Copyright (c) 2021 Elix, Inc.
"""
import random
from functools import total_ordering
from itertools import product
from typing import Any, List, Optional, Tuple
import numpy as np
@total_ordering
class StorageElement:
def __init__(
self,
smile: str,
score: float,
expert_id: Optional[int] = None,
):
self.smile = smile
self.score = score
self.expert_id = expert_id
def __eq__(self, other):
return np.isclose(self.score, other.score)
def __lt__(self, other):
return self.score < other.score
def __hash__(self):
return hash(self.smile)
class MaxRewardPriorityMemory:
def __init__(
self,
) -> None:
self.elements: List[StorageElement] = []
def __len__(self) -> int:
return len(self.elements)
def add_list(
self,
smiles: List[str],
scores: List[float],
expert_id: Optional[int] = None,
) -> None:
new_elements = [
StorageElement(
smile=smile,
score=score,
expert_id=expert_id,
)
for smile, score in zip(smiles, scores)
]
self.elements.extend(new_elements)
self.elements = list(set(self.elements))
def get_elements(
self,
) -> Tuple[List[str], List[float], List[Any]]:
return unravel_elements(self.elements)
def squeeze_by_rank(self, top_k: int) -> None:
top_k = min(top_k, len(self.elements))
self.elements = sorted(self.elements, reverse=True)[:top_k]
def sample_batch(self, batch_size: int) -> Tuple[List[str], List[float], List[Any]]:
sampled_elements = random.choices(population=self.elements, k=batch_size)
return unravel_elements(sampled_elements)
def unravel_elements(
elements: List[StorageElement],
) -> Tuple[List[str], List[float], List[Any]]:
return tuple( # type: ignore
map(
list,
zip(
*[
(element.smile, element.score, element.expert_id)
for element in elements
]
),
)
)
|
py
|
1a5d72faf8c57fff61b33c54deaad39f6fe5a0a3
|
import numpy as np
import cgen as c
from sympy import Or, Max, Not
from devito.data import FULL
from devito.ir import (DummyEq, Conditional, Dereference, Expression, ExpressionBundle,
List, Prodder, ParallelIteration, ParallelBlock, While,
FindSymbols, FindNodes, Return, COLLAPSED, VECTORIZED, Transformer,
IsPerfectIteration, retrieve_iteration_tree, filter_iterations)
from devito.symbolics import CondEq, DefFunction, INT
from devito.parameters import configuration
from devito.passes.iet.engine import iet_pass
from devito.tools import as_tuple, is_integer, prod
from devito.types import PointerArray, Symbol, NThreadsMixin
__all__ = ['Ompizer', 'OpenMPIteration', 'ParallelTree']
def ncores():
return configuration['platform'].cores_physical
def nhyperthreads():
return configuration['platform'].threads_per_core
class OpenMPRegion(ParallelBlock):
def __init__(self, body, private=None):
# Normalize and sanity-check input. A bit ugly, but it makes everything
# much simpler to manage and reconstruct
body = as_tuple(body)
assert len(body) == 1
body = body[0]
assert body.is_List
if isinstance(body, ParallelTree):
partree = body
elif body.is_List:
assert len(body.body) == 1 and isinstance(body.body[0], ParallelTree)
assert len(body.footer) == 0
partree = body.body[0]
partree = partree._rebuild(prefix=(List(header=body.header,
body=partree.prefix)))
header = OpenMPRegion._make_header(partree.nthreads, private)
super(OpenMPRegion, self).__init__(header=header, body=partree)
@property
def partree(self):
return self.body[0]
@property
def root(self):
return self.partree.root
@property
def nthreads(self):
return self.partree.nthreads
@classmethod
def _make_header(cls, nthreads, private=None):
private = ('private(%s)' % ','.join(private)) if private else ''
return c.Pragma('omp parallel num_threads(%s) %s' % (nthreads.name, private))
class OpenMPIteration(ParallelIteration):
def __init__(self, *args, **kwargs):
pragmas, kwargs = self._make_header(**kwargs)
properties = as_tuple(kwargs.pop('properties', None))
properties += (COLLAPSED(kwargs.get('ncollapse', 1)),)
self.schedule = kwargs.pop('schedule', None)
self.parallel = kwargs.pop('parallel', False)
self.ncollapse = kwargs.pop('ncollapse', None)
self.chunk_size = kwargs.pop('chunk_size', None)
self.nthreads = kwargs.pop('nthreads', None)
self.reduction = kwargs.pop('reduction', None)
super(OpenMPIteration, self).__init__(*args, pragmas=pragmas,
properties=properties, **kwargs)
@classmethod
def _make_header(cls, **kwargs):
kwargs.pop('pragmas', None)
construct = cls._make_construct(**kwargs)
clauses = cls._make_clauses(**kwargs)
header = c.Pragma(' '.join([construct] + clauses))
return (header,), kwargs
@classmethod
def _make_construct(cls, parallel=False, **kwargs):
if parallel:
return 'omp parallel for'
else:
return 'omp for'
@classmethod
def _make_clauses(cls, ncollapse=None, chunk_size=None, nthreads=None,
reduction=None, schedule=None, **kwargs):
clauses = []
clauses.append('collapse(%d)' % (ncollapse or 1))
if chunk_size is not False:
clauses.append('schedule(%s,%s)' % (schedule or 'dynamic',
chunk_size or 1))
if nthreads:
clauses.append('num_threads(%s)' % nthreads)
if reduction:
args = []
for i in reduction:
if i.is_Indexed:
f = i.function
bounds = []
for k, d in zip(i.indices, f.dimensions):
if k.is_Number:
bounds.append('[%s]' % k)
else:
# OpenMP expects a range as input of reduction,
# such as reduction(+:f[0:f_vec->size[1]])
bounds.append('[0:%s]' % f._C_get_field(FULL, d).size)
args.append('%s%s' % (i.name, ''.join(bounds)))
else:
args.append(str(i))
clauses.append('reduction(+:%s)' % ','.join(args))
return clauses
class ParallelTree(List):
"""
This class is to group together a parallel for-loop with some setup
statements, for example:
.. code-block:: C
int chunk_size = ...
#pragma omp ... schedule(..., chunk_size)
for (int i = ...)
{
...
}
"""
_traversable = ['prefix', 'body']
def __init__(self, prefix, body, nthreads=None):
# Normalize and sanity-check input
body = as_tuple(body)
assert len(body) == 1 and body[0].is_Iteration
self.prefix = as_tuple(prefix)
self.nthreads = nthreads
super(ParallelTree, self).__init__(body=body)
def __getattr__(self, name):
if 'body' in self.__dict__:
# During unpickling, `__setattr__` calls `__getattr__(..., 'body')`,
# which would cause infinite recursion if we didn't check whether
# 'body' is present or not
return getattr(self.body[0], name)
raise AttributeError
@property
def functions(self):
return as_tuple(self.nthreads)
@property
def root(self):
return self.body[0]
class ThreadedProdder(Conditional, Prodder):
_traversable = []
def __init__(self, prodder):
# Atomic-ize any single-thread Prodders in the parallel tree
condition = CondEq(Ompizer.lang['thread-num'], 0)
# Prod within a while loop until all communications have completed
# In other words, the thread delegated to prodding is entrapped for as long
# as it's required
prod_until = Not(DefFunction(prodder.name, [i.name for i in prodder.arguments]))
then_body = List(header=c.Comment('Entrap thread until comms have completed'),
body=While(prod_until))
Conditional.__init__(self, condition, then_body)
Prodder.__init__(self, prodder.name, prodder.arguments, periodic=prodder.periodic)
class Ompizer(object):
lang = {
'simd-for': c.Pragma('omp simd'),
'simd-for-aligned': lambda i, j: c.Pragma('omp simd aligned(%s:%d)' % (i, j)),
'atomic': c.Pragma('omp atomic update'),
'thread-num': DefFunction('omp_get_thread_num')
}
"""
Shortcuts for the OpenMP language.
"""
_Region = OpenMPRegion
_Iteration = OpenMPIteration
def __init__(self, sregistry, options, key=None):
"""
Parameters
----------
sregistry : SymbolRegistry
The symbol registry, to quickly access the special symbols that may
appear in the IET (e.g., `sregistry.threadid`, `sregistry.nthreads`).
options : dict
The optimization options. Accepted: ['par-collapse-ncores',
'par-collapse-work', 'par-chunk-nonaffine', 'par-dynamic-work', 'par-nested']
* 'par-collapse-ncores': use a collapse clause if the number of
available physical cores is greater than this threshold.
* 'par-collapse-work': use a collapse clause if the trip count of the
collapsable Iterations is statically known to exceed this threshold.
* 'par-chunk-nonaffine': coefficient to adjust the chunk size in
non-affine parallel Iterations.
* 'par-dynamic-work': use dynamic scheduling if the operation count per
iteration exceeds this threshold. Otherwise, use static scheduling.
* 'par-nested': nested parallelism if the number of hyperthreads per core
is greater than this threshold.
key : callable, optional
Return True if an Iteration can be parallelized, False otherwise.
"""
self.sregistry = sregistry
self.collapse_ncores = options['par-collapse-ncores']
self.collapse_work = options['par-collapse-work']
self.chunk_nonaffine = options['par-chunk-nonaffine']
self.dynamic_work = options['par-dynamic-work']
self.nested = options['par-nested']
if key is not None:
self.key = key
else:
self.key = lambda i: i.is_ParallelRelaxed and not i.is_Vectorized
@property
def nthreads(self):
return self.sregistry.nthreads
@property
def nthreads_nested(self):
return self.sregistry.nthreads_nested
@property
def nthreads_nonaffine(self):
return self.sregistry.nthreads_nonaffine
@property
def threadid(self):
return self.sregistry.threadid
def _find_collapsable(self, root, candidates):
collapsable = []
if ncores() >= self.collapse_ncores:
for n, i in enumerate(candidates[1:], 1):
# The Iteration nest [root, ..., i] must be perfect
if not IsPerfectIteration(depth=i).visit(root):
break
# The OpenMP specification forbids collapsed loops to use iteration
# variables in initializer expressions. E.g., the following is forbidden:
#
# #pragma omp ... collapse(2)
# for (i = ... )
# for (j = i ...)
# ...
#
# Here, we make sure this won't happen
if any(j.dim in i.symbolic_min.free_symbols for j in candidates[:n]):
break
# Also, we do not want to collapse vectorizable Iterations
if i.is_Vectorized:
break
# Would there be enough work per parallel iteration?
nested = candidates[n+1:]
if nested:
try:
work = prod([int(j.dim.symbolic_size) for j in nested])
if work < self.collapse_work:
break
except TypeError:
pass
collapsable.append(i)
return collapsable
@classmethod
def _make_tid(cls, tid):
return c.Initializer(c.Value(tid._C_typedata, tid.name), cls.lang['thread-num'])
def _make_reductions(self, partree, collapsed):
if not any(i.is_ParallelAtomic for i in collapsed):
return partree
# Collect expressions inducing reductions
exprs = FindNodes(Expression).visit(partree)
exprs = [i for i in exprs if i.is_Increment and not i.is_ForeignExpression]
reduction = [i.output for i in exprs]
if (all(i.is_Affine for i in collapsed)
or all(not i.is_Indexed for i in reduction)):
# Introduce reduction clause
mapper = {partree.root: partree.root._rebuild(reduction=reduction)}
else:
# Introduce one `omp atomic` pragma for each increment
mapper = {i: i._rebuild(pragmas=self.lang['atomic']) for i in exprs}
partree = Transformer(mapper).visit(partree)
return partree
def _make_threaded_prodders(self, partree):
mapper = {i: ThreadedProdder(i) for i in FindNodes(Prodder).visit(partree)}
partree = Transformer(mapper).visit(partree)
return partree
def _make_partree(self, candidates, nthreads=None):
"""Parallelize the `candidates` Iterations attaching suitable OpenMP pragmas."""
assert candidates
root = candidates[0]
# Get the collapsable Iterations
collapsable = self._find_collapsable(root, candidates)
ncollapse = 1 + len(collapsable)
# Prepare to build a ParallelTree
if all(i.is_Affine for i in candidates):
bundles = FindNodes(ExpressionBundle).visit(root)
sops = sum(i.ops for i in bundles)
if sops >= self.dynamic_work:
schedule = 'dynamic'
else:
schedule = 'static'
if nthreads is None:
# pragma omp for ... schedule(..., 1)
nthreads = self.nthreads
body = OpenMPIteration(schedule=schedule, ncollapse=ncollapse,
**root.args)
else:
# pragma omp parallel for ... schedule(..., 1)
body = OpenMPIteration(schedule=schedule, parallel=True,
ncollapse=ncollapse, nthreads=nthreads,
**root.args)
prefix = []
else:
# pragma omp for ... schedule(..., expr)
assert nthreads is None
nthreads = self.nthreads_nonaffine
chunk_size = Symbol(name='chunk_size')
body = OpenMPIteration(ncollapse=ncollapse, chunk_size=chunk_size,
**root.args)
niters = prod([root.symbolic_size] + [j.symbolic_size for j in collapsable])
value = INT(Max(niters / (nthreads*self.chunk_nonaffine), 1))
prefix = [Expression(DummyEq(chunk_size, value, dtype=np.int32))]
# Create a ParallelTree
partree = ParallelTree(prefix, body, nthreads=nthreads)
collapsed = [partree] + collapsable
return root, partree, collapsed
def _make_parregion(self, partree, parrays):
arrays = [i for i in FindSymbols().visit(partree) if i.is_Array]
# Detect thread-private arrays on the heap and "map" them to shared
# vector-expanded (one entry per thread) Arrays
heap_private = [i for i in arrays if i._mem_heap and i._mem_local]
heap_globals = []
for i in heap_private:
if i in parrays:
pi = parrays[i]
else:
pi = parrays.setdefault(i, PointerArray(name=self.sregistry.make_name(),
dimensions=(self.threadid,),
array=i))
heap_globals.append(Dereference(i, pi))
if heap_globals:
prefix = List(header=self._make_tid(self.threadid),
body=heap_globals + list(partree.prefix),
footer=c.Line())
partree = partree._rebuild(prefix=prefix)
return self._Region(partree)
def _make_guard(self, partree, collapsed):
# Do not enter the parallel region if the step increment is 0; this
# would raise a `Floating point exception (core dumped)` in some OpenMP
# implementations. Note that using an OpenMP `if` clause won't work
cond = [CondEq(i.step, 0) for i in collapsed if isinstance(i.step, Symbol)]
cond = Or(*cond)
if cond != False: # noqa: `cond` may be a sympy.False which would be == False
partree = List(body=[Conditional(cond, Return()), partree])
return partree
def _make_nested_partree(self, partree):
# Apply heuristic
if nhyperthreads() <= self.nested:
return partree
# Note: there might be multiple sub-trees amenable to nested parallelism,
# hence we loop over all of them
#
# for (i = ... ) // outer parallelism
# for (j0 = ...) // first source of nested parallelism
# ...
# for (j1 = ...) // second source of nested parallelism
# ...
mapper = {}
for tree in retrieve_iteration_tree(partree):
outer = tree[:partree.ncollapsed]
inner = tree[partree.ncollapsed:]
# Heuristic: nested parallelism is applied only if the top nested
# parallel Iteration iterates *within* the top outer parallel Iteration
# (i.e., the outer is a loop over blocks, while the nested is a loop
# within a block)
candidates = []
for i in inner:
if self.key(i) and any(is_integer(j.step-i.symbolic_size) for j in outer):
candidates.append(i)
elif candidates:
# If there's at least one candidate but `i` doesn't honor the
# heuristic above, then we break, as the candidates must be
# perfectly nested
break
if not candidates:
continue
# Introduce nested parallelism
subroot, subpartree, _ = self._make_partree(candidates, self.nthreads_nested)
mapper[subroot] = subpartree
partree = Transformer(mapper).visit(partree)
return partree
def _make_parallel(self, iet):
mapper = {}
parrays = {}
for tree in retrieve_iteration_tree(iet):
# Get the omp-parallelizable Iterations in `tree`
candidates = filter_iterations(tree, key=self.key)
if not candidates:
continue
# Outer parallelism
root, partree, collapsed = self._make_partree(candidates)
if partree is None or root in mapper:
continue
# Nested parallelism
partree = self._make_nested_partree(partree)
# Handle reductions
partree = self._make_reductions(partree, collapsed)
# Atomicize and optimize single-thread prodders
partree = self._make_threaded_prodders(partree)
# Wrap within a parallel region, declaring private and shared variables
parregion = self._make_parregion(partree, parrays)
# Protect the parallel region in case of 0-valued step increments
parregion = self._make_guard(parregion, collapsed)
mapper[root] = parregion
iet = Transformer(mapper).visit(iet)
# The new arguments introduced by this pass
args = [i for i in FindSymbols().visit(iet) if isinstance(i, (NThreadsMixin))]
for n in FindNodes(Dereference).visit(iet):
args.extend([(n.array, True), n.parray])
return iet, {'args': args, 'includes': ['omp.h']}
@iet_pass
def make_parallel(self, iet):
"""
Create a new IET with shared-memory parallelism via OpenMP pragmas.
"""
return self._make_parallel(iet)
@iet_pass
def make_simd(self, iet, **kwargs):
"""
Create a new IET with SIMD parallelism via OpenMP pragmas.
"""
simd_reg_size = kwargs.pop('simd_reg_size')
mapper = {}
for tree in retrieve_iteration_tree(iet):
candidates = [i for i in tree if i.is_Parallel]
# As long as there's an outer level of parallelism, the innermost
# PARALLEL Iteration gets vectorized
if len(candidates) < 2:
continue
candidate = candidates[-1]
# Construct OpenMP SIMD pragma
aligned = [j for j in FindSymbols('symbolics').visit(candidate)
if j.is_DiscreteFunction]
if aligned:
simd = self.lang['simd-for-aligned']
simd = as_tuple(simd(','.join([j.name for j in aligned]),
simd_reg_size))
else:
simd = as_tuple(self.lang['simd-for'])
pragmas = candidate.pragmas + simd
# Add VECTORIZED property
properties = list(candidate.properties) + [VECTORIZED]
mapper[candidate] = candidate._rebuild(pragmas=pragmas, properties=properties)
iet = Transformer(mapper).visit(iet)
return iet, {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.