filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_12945 | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcls.models.builder import NECKS
from mmcls.models.necks import GlobalAveragePooling as _GlobalAveragePooling
@NECKS.register_module(force=True)
class GlobalAveragePooling(_GlobalAveragePooling):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
"""
def __init__(self, kernel_size=None, stride=None):
super(GlobalAveragePooling, self).__init__()
if kernel_size is None and stride is None:
self.gap = nn.AdaptiveAvgPool2d((1, 1))
else:
self.gap = nn.AvgPool2d(kernel_size, stride)
|
the-stack_0_12946 | # Copyright 2019 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from gcp_common import BaseTest, event_data
class MLModelTest(BaseTest):
def test_models_query(self):
project_id = "cloud-custodian"
session_factory = self.replay_flight_data(
'ml-models-query', project_id)
policy = self.load_policy(
{
'name': 'ml-models-query',
'resource': 'gcp.ml-model'
},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_models_get(self):
project_id = 'cloud-custodian'
name = "test_model"
factory = self.replay_flight_data('ml-model-get', project_id=project_id)
p = self.load_policy({
'name': 'ml-model-get',
'resource': 'gcp.ml-model',
'mode': {
'type': 'gcp-audit',
'methods': ['google.cloud.ml.v1.ModelService.CreateModel']
}
}, session_factory=factory)
exec_mode = p.get_execution_mode()
event = event_data('ml-model-create.json')
models = exec_mode.run(event, None)
self.assertIn(name, models[0]['name'])
class MLJobTest(BaseTest):
def test_jobs_query(self):
project_id = 'cloud-custodian'
session_factory = self.replay_flight_data(
'ml-jobs-query', project_id)
policy = self.load_policy(
{
'name': 'ml-jobs-query',
'resource': 'gcp.ml-job'
},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_jobs_get(self):
project_id = 'cloud-custodian'
name = "test_job"
factory = self.replay_flight_data('ml-job-get', project_id=project_id)
p = self.load_policy({
'name': 'ml-job-get',
'resource': 'gcp.ml-job',
'mode': {
'type': 'gcp-audit',
'methods': ['google.cloud.ml.v1.JobService.CreateJob']
}
}, session_factory=factory)
exec_mode = p.get_execution_mode()
event = event_data('ml-job-create.json')
jobs = exec_mode.run(event, None)
self.assertIn(name, jobs[0]['jobId'])
|
the-stack_0_12949 | """
Provides install path infomation.
"""
import os
from esys.lsm.util.pathSearcher import PathSearcher
installDir = "/home/daniel/Documents/fing/esys-particle/src/danielfrascarelli-git/esys-particle"
binDir = os.path.join(installDir, "bin")
libDir = os.path.join(installDir, "lib")
pythonPkgDir = "/home/daniel/Documents/fing/esys-particle/src/danielfrascarelli-git/esys-particle/lib/python2.7/site-packages"
esysPkgDir = os.path.join(pythonPkgDir, "esys")
lsmPkgDir = os.path.join(esysPkgDir, "lsm")
pkgName = "ESyS-Particle"
version = "2.3.5"
pkgHomePageUrl = "https://launchpad.net/esys-particle/"
pkgDataDir = "/home/daniel/Documents/fing/esys-particle/src/danielfrascarelli-git/esys-particle/share/esys-particle"
povrayExe = "no"
_haveVtk = False
_havePovray = False
def getPovrayExePath():
"""
Attempts to return the absolute path of the "povray" executable
using the "PATH" environment variable. If the exe can't be found
on the "PATH" then this function returns the "povray" path which
was found during installation. This function is a workaround for
for the SGI MPT mpirun, which seems to alter the user "PATH"
environment.
"""
absPath=PathSearcher().find("povray")
if ((absPath == None) or (absPath == "")):
absPath = povrayExe
return absPath
def getDataFilePath(dataFileName):
"""
Returns path for specified data file. Looks on path
C{L{pkgDataDir}:Data:.}
"""
return PathSearcher(pkgDataDir+":Data:.").which(dataFileName)
def haveVtk():
return _haveVtk
def havePovray():
return _havePovray
|
the-stack_0_12950 | from raptiformica.actions.slave import assimilate_machine
from tests.testcase import TestCase
class TestAssimilateMachine(TestCase):
def setUp(self):
self.log = self.set_up_patch('raptiformica.actions.slave.log')
self.download_artifacts = self.set_up_patch('raptiformica.actions.slave.download_artifacts')
self.advertise = self.set_up_patch('raptiformica.actions.slave.advertise')
self.ensure_route_to_new_neighbour = self.set_up_patch(
'raptiformica.actions.slave.ensure_route_to_new_neighbour'
)
def test_assimilate_machine_logs_assimilating_machine_message(self):
assimilate_machine('1.2.3.4', port=2222)
self.assertTrue(self.log.info.called)
def test_assimilate_machine_downloads_artifacts(self):
assimilate_machine('1.2.3.4', port=2222)
self.download_artifacts.assert_called_once_with('1.2.3.4', port=2222)
def test_assimilate_machine_sets_advertised_host_and_port_on_remote_machine(self):
assimilate_machine('1.2.3.4', port=2222)
self.advertise.assert_called_once_with('1.2.3.4', port=2222)
def test_assimilate_machine_ensures_route_to_new_neighbour(self):
assimilate_machine('1.2.3.4', port=2222)
self.ensure_route_to_new_neighbour.assert_called_once_with(
'1.2.3.4', port=2222,
compute_checkout_uuid=None
)
def test_assimilate_machine_update_ensures_route_to_new_neighbour_with_optional_uuid(self):
assimilate_machine('1.2.3.4', port=2222, uuid='some_uuid_1234')
self.ensure_route_to_new_neighbour.assert_called_once_with(
'1.2.3.4', port=2222,
compute_checkout_uuid='some_uuid_1234'
)
|
the-stack_0_12951 | import unittest
from six.moves import StringIO
import time
from robot import utils
from robot.utils.asserts import *
from robot.output.filelogger import FileLogger
from robot.utils.robottime import TimestampCache
class _FakeTimeCache(TimestampCache):
def __init__(self):
self.fake = time.mktime((2006, 6, 13, 8, 37, 42, 0, 0, 1)) + 0.123
TimestampCache.__init__(self)
def _get_epoch(self):
return self.fake
class TestFileLogger(unittest.TestCase):
def setUp(self):
utils.robottime.TIMESTAMP_CACHE = _FakeTimeCache()
FileLogger._get_writer = lambda *args: StringIO()
self.logger = FileLogger('whatever', 'INFO')
def tearDown(self):
utils.robottime.TIMESTAMP_CACHE = TimestampCache()
def test_write(self):
self.logger.write('my message', 'INFO')
expected = '20060613 08:37:42.123 | INFO | my message\n'
self._verify_message(expected)
self.logger.write('my 2nd msg\nwith 2 lines', 'ERROR')
expected += '20060613 08:37:42.123 | ERROR | my 2nd msg\nwith 2 lines\n'
self._verify_message(expected)
def test_write_helpers(self):
self.logger.info('my message')
expected = '20060613 08:37:42.123 | INFO | my message\n'
self._verify_message(expected)
self.logger.warn('my 2nd msg\nwith 2 lines')
expected += '20060613 08:37:42.123 | WARN | my 2nd msg\nwith 2 lines\n'
self._verify_message(expected)
def test_set_level(self):
self.logger.write('msg', 'DEBUG')
self._verify_message('')
self.logger.set_level('DEBUG')
self.logger.write('msg', 'DEBUG')
self._verify_message('20060613 08:37:42.123 | DEBUG | msg\n')
def _verify_message(self, expected):
assert_equals(self.logger._writer.getvalue(), expected)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_12952 | #!/usr/bin/env python3
_DEFAULT_DEPENDENCIES = [
"packages/data/**/*",
"packages/common/**/*",
"packages/course-landing/**/*",
"packages/{{site}}/**/*",
"yarn.lock",
]
_COURSE_LANDING_DEPENDENCIES = [
"packages/data/training/sessions.yml",
"packages/data/training/recommendations/**/*",
"packages/data/training/recommendations/**/*",
"packages/data/training/pictures/**/*",
"packages/common/**/*",
"packages/course-landing/**/*",
"packages/{{site}}/**/*",
"yarn.lock",
]
_ONDREJSIKA_THEME_DEPENDENCIES = [
"packages/data/**/*",
"packages/common/**/*",
"packages/ondrejsika-theme/**/*",
"packages/{{site}}/**/*",
"yarn.lock",
]
_ONDREJSIKA_SINGLEPAGE_DEPENDENCIES = _ONDREJSIKA_THEME_DEPENDENCIES + [
"packages/ondrejsika-singlepage/**/*",
]
PROD_SITES = {
"trainera.de": {
"dependencies": _ONDREJSIKA_THEME_DEPENDENCIES,
"cloudflare_workers": True,
},
"ondrej-sika.com": {
"dependencies": _ONDREJSIKA_THEME_DEPENDENCIES,
"cloudflare_workers": True,
},
"ondrej-sika.cz": {
"dependencies": _ONDREJSIKA_THEME_DEPENDENCIES,
"cloudflare_workers": True,
},
"ondrej-sika.de": {
"dependencies": _ONDREJSIKA_SINGLEPAGE_DEPENDENCIES,
"cloudflare_workers": True,
},
"trainera.cz": {
"dependencies": _ONDREJSIKA_THEME_DEPENDENCIES,
"cloudflare_workers": True,
},
"skolenie.kubernetes.sk": {
"dependencies": _COURSE_LANDING_DEPENDENCIES,
},
"training.kubernetes.is": {
"dependencies": _COURSE_LANDING_DEPENDENCIES,
},
"training.kubernetes.lu": {
"dependencies": _COURSE_LANDING_DEPENDENCIES,
},
"cal-api.sika.io": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"ccc.oxs.cz": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"sika.blog": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"static.sika.io": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"sikahq.com": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"ondrejsika.is": {
"dependencies": _ONDREJSIKA_SINGLEPAGE_DEPENDENCIES,
"cloudflare_workers": True,
},
"skoleni.io": {
"dependencies": _DEFAULT_DEPENDENCIES,
"cloudflare_workers": True,
},
}
ALL_SITES = {}
ALL_SITES.update(PROD_SITES)
PRIORITY_SITES = (
"ondrej-sika.cz",
"ondrej-sika.com",
"trainera.cz",
"skoleni.io",
"trainera.de",
)
SUFFIX = ".panda.k8s.oxs.cz"
SITES = ALL_SITES.keys()
out = []
out.append(
"""# Don't edit this file maually
# This file is generated by ./generate-gitlab-ci.py
image: sikalabs/ci
stages:
- start
- build_docker_priority
- deploy_dev_priority
- deploy_prod_priority
- build_docker
- deploy_dev
- deploy_prod
variables:
DOCKER_BUILDKIT: '1'
GIT_CLEAN_FLAGS: "-ffdx -e node_modules -e .yarn-cache"
start:
stage: start
script: echo "start job - you can't create empty child pipeline"
"""
)
def generate_dependencies(site):
if site not in ALL_SITES:
return """ - packages/data/**/*
- packages/common/**/*
- packages/course-landing/**/*
- packages/{{site}}/**/*
- yarn.lock""".replace(
"{{site}}", site
)
return "\n".join(
(" - " + line).replace("{{site}}", site)
for line in ALL_SITES[site]["dependencies"]
)
for site in SITES:
if site in ALL_SITES and ALL_SITES[site].get("cloudflare_workers"):
pass
else:
out.append(
"""
%(site)s build docker:
stage: build_docker%(priority_suffix)s
image: sikalabs/ci-node
needs: []
variables:
GIT_CLEAN_FLAGS: -ffdx -e node_modules -e .yarn-cache
script:
- yarn --cache-folder .yarn-cache
- rm -rf packages/%(site)s/out
- mkdir -p packages/%(site)s/public/api
- slu static-api version --set-git-clean --set-git-ref $CI_COMMIT_REF_NAME -e CI_PIPELINE_ID=$CI_PIPELINE_ID -e "GITLAB_USER_LOGIN=$GITLAB_USER_LOGIN" -e "CI_COMMIT_TITLE=$CI_COMMIT_TITLE" > packages/%(site)s/public/api/version.json
- yarn run static-%(site)s
- docker login $CI_REGISTRY -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD
- cp ci/docker/* packages/%(site)s/
- docker build -t $CI_REGISTRY_IMAGE/%(site)s:$CI_COMMIT_SHORT_SHA packages/%(site)s
- rm packages/%(site)s/Dockerfile
- rm packages/%(site)s/nginx-site.conf
- docker push $CI_REGISTRY_IMAGE/%(site)s:$CI_COMMIT_SHORT_SHA
except:
variables:
- $EXCEPT_BUILD
- $EXCEPT_BUILD_DOCKER
only:
changes:
%(dependencies)s
"""
% {
"site": site,
"priority_suffix": "_priority" if site in PRIORITY_SITES else "",
"dependencies": generate_dependencies(site),
}
)
if site in PROD_SITES:
if PROD_SITES[site].get("cloudflare_workers"):
out.append(
"""
%(site)s prod deploy cloudflare:
image: sikalabs/ci-node
stage: deploy_prod%(priority_suffix)s
script:
- yarn --cache-folder .yarn-cache
- yarn --cache-folder .yarn-cache add @cloudflare/wrangler -W
- rm -rf packages/%(site)s/out
- mkdir -p packages/%(site)s/public/api
- git status
- slu static-api version --set-git-clean --set-git-ref $CI_COMMIT_REF_NAME -e CI_PIPELINE_ID=$CI_PIPELINE_ID -e "GITLAB_USER_LOGIN=$GITLAB_USER_LOGIN" -e "CI_COMMIT_TITLE=$CI_COMMIT_TITLE" > packages/%(site)s/public/api/version.json
- yarn run deploy-%(site)s
except:
variables:
- $EXCEPT_DEPLOY
- $EXCEPT_DEPLOY_CLOUDFLARE
- $EXCEPT_DEPLOY_K8S
- $EXCEPT_DEPLOY_PROD
- $EXCEPT_DEPLOY_PROD_K8S
only:
refs:
- master
changes:
%(dependencies)s
environment:
name: k8s/prod/%(site)s
url: https://%(site)s
dependencies: []
"""
% {
"site": site,
"suffix": SUFFIX,
"name": site.replace(".", "-"),
"priority_suffix": "_priority" if site in PRIORITY_SITES else "",
"dependencies": generate_dependencies(site),
}
)
else:
out.append(
"""
%(site)s prod deploy k8s:
needs:
- %(site)s build docker
stage: deploy_prod%(priority_suffix)s
variables:
GIT_STRATEGY: none
KUBECONFIG: .kubeconfig
script:
- docker login $CI_REGISTRY -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD
- docker pull $CI_REGISTRY_IMAGE/%(site)s:$CI_COMMIT_SHORT_SHA
- docker tag $CI_REGISTRY_IMAGE/%(site)s:$CI_COMMIT_SHORT_SHA $CI_REGISTRY_IMAGE/%(site)s
- docker push $CI_REGISTRY_IMAGE/%(site)s
except:
variables:
- $EXCEPT_DEPLOY
- $EXCEPT_DEPLOY_K8S
- $EXCEPT_DEPLOY_PROD
- $EXCEPT_DEPLOY_PROD_K8S
only:
refs:
- master
changes:
%(dependencies)s
environment:
name: k8s/prod/%(site)s
url: https://%(site)s
kubernetes:
namespace: default
dependencies: []
"""
% {
"site": site,
"suffix": SUFFIX,
"name": site.replace(".", "-"),
"priority_suffix": "_priority" if site in PRIORITY_SITES else "",
"dependencies": generate_dependencies(site),
}
)
with open(".gitlab-ci.generated.yml", "w") as f:
f.write("".join(out))
|
the-stack_0_12954 | #!/usr/bin/env python3.4
"""
kill_python.py,
copyright (c) 2015 by Stefan Lehmann
"""
import os
import psutil
PROC = "python.exe"
my_pid = os.getpid()
i = 0
for p in psutil.process_iter():
if p.name() == PROC and p.pid != my_pid:
i += 1
p.kill()
print("Killed {} instances of process '{}'.".format(i, PROC))
|
the-stack_0_12955 | """Common configure functions for vlan"""
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def config_vlan(device, vlanid):
""" Configures a VLAN on Interface or Device
e.g.
vlan 666
Args:
device (`obj`): Device object
vlanid (`str`): Vlan id
Return:
None
Raise:
SubCommandFailure: Failed configuring interface
"""
configs = []
configs.append("vlan {vlanid}".format(vlanid=vlanid))
configs.append("no shutdown")
try:
device.configure(configs)
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not configure vlan {vlanid}, Error: {error}'.format(
vlanid=vlanid, error=e)
)
def config_ip_on_vlan(device, vlan_id, ipv4_address=None,
subnetmask=None, ipv6_address=None,
ipv6_prefix_len=None):
"""Configure an IPv4/IPv6 address on a vlan
Args:
device (`obj`): Device object
vlanid (`str`): Vlan id
ipv4_address (`str`): IPv4 address
subnetmask (`str`): Subnet mask to be used for IPv4 address
ipv6_address (`str`): Ipv6 address
ipv6_prefix_len (`int`): length of IPv6 prefix
Return:
None
Raise:
SubCommandFailure: Failed to configure Ipv4/Ipv6 address on vlan
"""
try:
if ipv4_address and subnetmask:
device.configure([f'interface vlan {vlan_id}',
f'ip address {ipv4_address} {subnetmask}'])
if ipv6_address and ipv6_prefix_len:
device.configure([f'interface vlan {vlan_id}',
'ipv6 enable',
f'ipv6 address {ipv6_address}/{ipv6_prefix_len}'])
except SubCommandFailure as e:
raise SubCommandFailure(
f'Could not configure Ipv4/Ipv6 address on vlan {vlan_id}, '
f'Error: {e}'
)
def unconfig_vlan(device, vlanid):
""" vlan on Interface or Device configuration removal
Args:
device (`obj`): Device object
vlanid (`str`): Vlan id
Return:
None
Raise:
SubCommandFailure: Failed configuring interface
"""
try:
device.configure("no vlan {vlanid}".format(vlanid=vlanid))
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not remove vlan {vlanid}, Error: {error}'.format(
vlanid=vlanid, error=e)
)
def config_vlan_tag_native(device):
""" Configure vlan dot1q tag native
Args:
device (`obj`): Device object
Return:
None
Raise:
SubCommandFailure: Failed configuring device
"""
try:
device.configure("vlan dot1q tag native")
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not configure vlan dot1q tag native, Error: {error}'.format(
error=e)
)
def configure_vlan_vpls(device, vlanid):
""" Config vpls on vlan
Args:
device (`obj`): Device object
vlanid (`str`): Vlan id
Return:
None
Raise:
SubCommandFailure: Failed configuring interface
"""
try:
device.configure(
[
"vlan configuration {vlanid}".format(vlanid=vlanid),
"member vfi vpls",
"vlan dot1q tag native",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not configure vpls on vlan {vlanid}, Error: {error}'.format(
vlanid=vlanid, error=e)
)
def configure_vtp_mode(device,mode):
""" Configures global VTP mode
Args:
device ('obj'): device to use
mode ('str'): VTP mode (i.e transparent, client, server)
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure('vtp mode {mode}'.format(mode=mode))
except SubCommandFailure:
raise SubCommandFailure(
'Could not configure VTP mode'
)
def configure_pvlan_svi_mapping(device, svi_vlan, mapping_vlan):
""" Configures Private Vlan Mapping on SVI
Args:
device ('obj'): device to use
svi_vlan ('str'): SVI interface
mapping_vlan ('str'): Private vlan to map to
Returns:
None
Raises:
SubCommandFailure
"""
# Initialize list variable
config_list = []
config_list.append("interface {svi_vlan}".format(svi_vlan=svi_vlan))
config_list.append("private-vlan mapping {mapping_vlan}".format(mapping_vlan=mapping_vlan))
try:
device.configure(config_list)
except SubCommandFailure:
raise SubCommandFailure(
'Could not configure PVLAN-mapping'
)
def configure_pvlan_primary(device, primary_vlan, secondary_vlan=None):
""" Configures Primary Private Vlan
Args:
device ('obj'): device to use
primary_vlan ('str'): Primary private vlan
secondary_vlan ('str',optional): Secondary isolated/community vlan
Returns:
None
Raises:
SubCommandFailure
"""
config_list = []
# vlan 100
# private-vlan primary
config_list.append("vlan {primary_vlan} \n"
"private-vlan primary".format(primary_vlan=primary_vlan))
# private-vlan association 101
if secondary_vlan != None:
config_list.append("private-vlan association {secondary_vlan}".format(secondary_vlan=secondary_vlan))
try:
device.configure(config_list)
except SubCommandFailure:
raise SubCommandFailure(
'Could not configure Primary Pvlan'
)
def configure_pvlan_type(device,vlan,pvlan_type):
""" Configures Isolated Private Vlan
Args:
device ('obj'): device to use
vlan ('str'): Vlan id
pvlan_type ('str'): Private vlan type (i.e isolated, primary, community)
Returns:
None
Raises:
SubCommandFailure
"""
# Initialize list variable
config_list = []
config_list.append("vlan {vlan}".format(vlan=vlan))
config_list.append("private-vlan {pvlan_type}".format(pvlan_type=pvlan_type))
try:
device.configure(config_list)
except SubCommandFailure:
raise SubCommandFailure(
'Could not configure Primary Pvlan'
) |
the-stack_0_12956 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a VM with user specified disks attached to it."""
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def DiskName(context, diskobj):
return context.env['deployment'] + '-disk-' + diskobj['name']
def GenerateConfig(context):
"""Creates configuration."""
resources = []
project = context.env['project']
# create disks resources
for disk_obj in context.properties['disks']:
resources.append({'name': DiskName(context, disk_obj),
'type': 'compute.v1.disk',
'properties': {
'zone': context.properties['zone'],
'sizeGb': str(disk_obj['sizeGb']),
'type': ''.join([COMPUTE_URL_BASE,
'projects/', project, '/zones/',
context.properties['zone'],
'/diskTypes/', disk_obj['diskType']])
}
})
disks = []
disks.append({'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'diskName': project + '-boot',
'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',
'debian-cloud/global/images/',
'family/debian-8'])
}
})
for disk_obj in context.properties['disks']:
disks.append({'deviceName': DiskName(context, disk_obj),
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', DiskName(context, disk_obj),
'.selfLink)']),
'autoDelete': True})
# create vm with disks
resources.append({'name': context.env['deployment'] + '-vm',
'type': 'compute.v1.instance',
'properties': {
'zone': context.properties['zone'],
'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',
project, '/zones/',
context.properties['zone'],
'/machineTypes/f1-micro']),
'networkInterfaces': [{
'network': ''.join([COMPUTE_URL_BASE,
'projects/', project,
'/global/networks/default']),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'}],
}],
'disks': disks
}
})
return {'resources': resources}
|
the-stack_0_12958 | """
-*- coding: utf-8 -*-
@github{
title = {KoSpeech: Open Source Project for Korean End-to-End Automatic Speech Recognition in PyTorch},
author = {Soohwan Kim, Seyoung Bae, Cheolhwang Won, Suwon Park},
link = {https://github.com/sooftware/KoSpeech},
year = {2020}
}
"""
import sys
import argparse
import random
import warnings
import torch
from torch import optim, nn
sys.path.append('..')
from kospeech.data.data_loader import split_dataset, load_data_list
from kospeech.optim.loss import CrossEntrypyLoss
from kospeech.optim.lr_scheduler import RampUpLR
from kospeech.optim.optimizer import Optimizer
from kospeech.trainer.supervised_trainer import SupervisedTrainer
from kospeech.model_builder import build_model
from kospeech.opts import print_opts, build_train_opts, build_model_opts, build_preprocess_opts
from kospeech.utils import PAD_token, char2id, check_envirionment
def train(opt):
random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
device = check_envirionment(opt.use_cuda)
if not opt.resume:
audio_paths, script_paths = load_data_list(opt.data_list_path, opt.dataset_path)
epoch_time_step, trainset_list, validset = split_dataset(opt, audio_paths, script_paths)
model = build_model(opt, device)
optimizer = optim.Adam(model.module.parameters(), lr=opt.init_lr, weight_decay=1e-05)
if opt.rampup_period > 0:
scheduler = RampUpLR(optimizer, opt.init_lr, opt.high_plateau_lr, opt.rampup_period)
optimizer = Optimizer(optimizer, scheduler, opt.rampup_period, opt.max_grad_norm)
else:
optimizer = Optimizer(optimizer, None, 0, opt.max_grad_norm)
criterion = CrossEntrypyLoss(len(char2id), PAD_token, opt.label_smoothing, dim=-1).to(device)
else:
trainset_list = None
validset = None
model = None
optimizer = None
criterion = None
epoch_time_step = None
trainer = SupervisedTrainer(optimizer=optimizer, criterion=criterion, trainset_list=trainset_list,
validset=validset, num_workers=opt.num_workers,
high_plateau_lr=opt.high_plateau_lr, low_plateau_lr=opt.low_plateau_lr,
decay_threshold=opt.decay_threshold, exp_decay_period=opt.exp_decay_period,
device=device, teacher_forcing_step=opt.teacher_forcing_step,
min_teacher_forcing_ratio=opt.min_teacher_forcing_ratio, print_every=opt.print_every,
save_result_every=opt.save_result_every, checkpoint_every=opt.checkpoint_every,
architecture=opt.architecture)
model = trainer.train(model=model, batch_size=opt.batch_size, epoch_time_step=epoch_time_step,
num_epochs=opt.num_epochs, teacher_forcing_ratio=opt.teacher_forcing_ratio, resume=opt.resume)
return model
def _get_parser():
""" Get arguments parser """
parser = argparse.ArgumentParser(description='KoSpeech')
parser.add_argument('--mode', type=str, default='train')
build_preprocess_opts(parser)
build_model_opts(parser)
build_train_opts(parser)
return parser
def main():
warnings.filterwarnings('ignore')
parser = _get_parser()
opt = parser.parse_args()
print_opts(opt, opt.mode)
train(opt)
if __name__ == '__main__':
main()
|
the-stack_0_12959 | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.api.urlfetch import DownloadError
from library import login
from boto.ec2.connection import *
class AlleVolumesLoeschenDefinitiv(webapp.RequestHandler):
def get(self):
mobile = self.request.get('mobile')
if mobile != "true":
mobile = "false"
# Den Usernamen erfahren
username = users.get_current_user()
conn_region, regionname = login(username)
try:
# Liste mit den Volumes
liste_volumes = conn_region.get_all_volumes()
except EC2ResponseError:
# Wenn es nicht klappt...
fehlermeldung = "10"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
except DownloadError:
# Diese Exception hilft gegen diese beiden Fehler:
# DownloadError: ApplicationError: 2 timed out
# DownloadError: ApplicationError: 5
fehlermeldung = "8"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
else:
# Wenn es geklappt hat...
# Anzahl der Volumes in der Liste
laenge_liste_volumes = len(liste_volumes)
for i in range(laenge_liste_volumes):
try:
# Volume entfernen
conn_region.delete_volume(liste_volumes[i].id)
except EC2ResponseError:
# Wenn es nicht klappt...
fehlermeldung = "26"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
except DownloadError:
# Diese Exception hilft gegen diese beiden Fehler:
# DownloadError: ApplicationError: 2 timed out
# DownloadError: ApplicationError: 5
fehlermeldung = "8"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
fehlermeldung = "27"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
|
the-stack_0_12961 | import json
from collections import defaultdict
from typing import List
from sqlalchemy import desc
from sqlalchemy.future import select
from app.crud.test_case.ConstructorDao import ConstructorDao
from app.crud.test_case.TestCaseAssertsDao import TestCaseAssertsDao
from app.crud.test_case.TestCaseDirectory import PityTestcaseDirectoryDao
from app.crud.test_case.TestcaseDataDao import PityTestcaseDataDao
from app.models import Session, DatabaseHelper, async_session
from app.models.constructor import Constructor
from app.models.schema.testcase_schema import TestCaseForm
from app.models.test_case import TestCase
from app.utils.logger import Log
class TestCaseDao(object):
log = Log("TestCaseDao")
@staticmethod
async def list_test_case(directory_id: int = None, name: str = "", create_user: str = None):
try:
filters = [TestCase.deleted_at == None]
if directory_id:
parents = await PityTestcaseDirectoryDao.get_directory_son(directory_id)
filters = [TestCase.deleted_at == None, TestCase.directory_id.in_(parents)]
if name:
filters.append(TestCase.name.like(f"%{name}%"))
if create_user:
filters.append(TestCase.create_user == create_user)
async with async_session() as session:
sql = select(TestCase).where(*filters).order_by(TestCase.name.asc())
result = await session.execute(sql)
return result.scalars().all()
except Exception as e:
TestCaseDao.log.error(f"获取测试用例失败: {str(e)}")
raise Exception(f"获取测试用例失败: {str(e)}")
@staticmethod
async def get_test_case_by_directory_id(directory_id: int):
try:
async with async_session() as session:
sql = select(TestCase).where(TestCase.deleted_at == None,
TestCase.directory_id == directory_id).order_by(TestCase.name.asc())
result = await session.execute(sql)
ans = []
case_map = dict()
for item in result.scalars():
ans.append({"title": item.name, "key": "testcase_{}".format(item.id), "children": []})
case_map[item.id]=item.name
return ans, case_map
except Exception as e:
TestCaseDao.log.error(f"获取测试用例失败: {str(e)}")
raise Exception(f"获取测试用例失败: {str(e)}")
@staticmethod
def get_tree(case_list):
result = defaultdict(list)
# 获取目录->用例的映射关系
for cs in case_list:
result[cs.catalogue].append(cs)
keys = sorted(result.keys())
tree = [dict(key=f"cat_{key}",
children=[{"key": f"case_{child.id}", "title": child.name,
"total": TestCaseDao.get_case_children_length(child.id),
"children": TestCaseDao.get_case_children(child.id)} for child in result[key]],
title=key, total=len(result[key])) for key in keys]
return tree
@staticmethod
def get_case_children(case_id: int):
data, err = TestCaseAssertsDao.list_test_case_asserts(case_id)
if err:
raise err
return [dict(key=f"asserts_{d.id}", title=d.name, case_id=case_id) for d in data]
@staticmethod
def get_case_children_length(case_id: int):
data, err = TestCaseAssertsDao.list_test_case_asserts(case_id)
if err:
raise err
return len(data)
@staticmethod
def insert_test_case(test_case, user):
"""
:param user: 创建人
:param test_case: 测试用例
:return:
"""
try:
with Session() as session:
data = session.query(TestCase).filter_by(name=test_case.get("name"),
directory_id=test_case.get("directory_id"),
deleted_at=None).first()
if data is not None:
raise Exception("用例已存在")
cs = TestCase(**test_case, create_user=user)
session.add(cs)
session.commit()
session.refresh(cs)
return cs.id
except Exception as e:
TestCaseDao.log.error(f"添加用例失败: {str(e)}")
raise Exception(f"添加用例失败: {str(e)}")
@staticmethod
def update_test_case(test_case: TestCaseForm, user):
"""
:param user: 修改人
:param test_case: 测试用例
:return:
"""
try:
with Session() as session:
data = session.query(TestCase).filter_by(id=test_case.id, deleted_at=None).first()
if data is None:
raise Exception("用例不存在")
DatabaseHelper.update_model(data, test_case, user)
session.commit()
session.refresh(data)
return data
except Exception as e:
TestCaseDao.log.error(f"编辑用例失败: {str(e)}")
raise Exception(f"编辑用例失败: {str(e)}")
@staticmethod
async def query_test_case(case_id: int) -> dict:
try:
async with async_session() as session:
sql = select(TestCase).where(TestCase.id == case_id, TestCase.deleted_at == None)
result = await session.execute(sql)
data = result.scalars().first()
if data is None:
raise Exception("用例不存在")
# 获取断言部分
asserts, _ = await TestCaseAssertsDao.async_list_test_case_asserts(data.id)
# 获取数据构造器
constructors = await ConstructorDao.list_constructor(case_id)
constructors_case = await TestCaseDao.query_test_case_by_constructors(constructors)
test_data = await PityTestcaseDataDao.list_testcase_data(case_id)
return dict(asserts=asserts, constructors=constructors, case=data, constructors_case=constructors_case,
test_data=test_data)
except Exception as e:
TestCaseDao.log.error(f"查询用例失败: {str(e)}")
raise Exception(f"查询用例失败: {str(e)}")
@staticmethod
async def query_test_case_by_constructors(constructors: List[Constructor]):
try:
# 找到所有用例名称为
constructors = [json.loads(x.constructor_json).get("case_id") for x in constructors if x.type == 0]
async with async_session() as session:
sql = select(TestCase).where(TestCase.id.in_(constructors), TestCase.deleted_at == None)
result = await session.execute(sql)
data = result.scalars().all()
return {x.id: x for x in data}
except Exception as e:
TestCaseDao.log.error(f"查询用例失败: {str(e)}")
raise Exception(f"查询用例失败: {str(e)}")
@staticmethod
async def async_query_test_case(case_id) -> [TestCase, str]:
try:
async with async_session() as session:
result = await session.execute(
select(TestCase).where(TestCase.id == case_id, TestCase.deleted_at == None))
data = result.scalars().first()
if data is None:
return None, "用例不存在"
return data, None
except Exception as e:
TestCaseDao.log.error(f"查询用例失败: {str(e)}")
return None, f"查询用例失败: {str(e)}"
@staticmethod
def list_testcase_tree(projects) -> [List, dict]:
try:
result = []
project_map = {}
project_index = {}
for p in projects:
project_map[p.id] = p.name
result.append({
"label": p.name,
"value": p.id,
"key": p.id,
"children": [],
})
project_index[p.id] = len(result) - 1
with Session() as session:
data = session.query(TestCase).filter(TestCase.project_id.in_(project_map.keys()),
TestCase.deleted_at == None).all()
for d in data:
result[project_index[d.project_id]]["children"].append({
"label": d.name,
"value": d.id,
"key": d.id,
})
return result
except Exception as e:
TestCaseDao.log.error(f"获取用例列表失败: {str(e)}")
raise Exception("获取用例列表失败")
@staticmethod
def select_constructor(case_id: int):
"""
通过case_id获取用例构造数据
:param case_id:
:return:
"""
try:
with Session() as session:
data = session.query(Constructor).filter_by(case_id=case_id, deleted_at=None).order_by(
desc(Constructor.created_at)).all()
return data
except Exception as e:
TestCaseDao.log.error(f"查询构造数据失败: {str(e)}")
@staticmethod
async def async_select_constructor(case_id: int) -> List[Constructor]:
"""
异步获取用例构造数据
:param case_id:
:return:
"""
try:
async with async_session() as session:
sql = select(Constructor).where(Constructor.case_id == case_id,
Constructor.deleted_at == None).order_by(Constructor.created_at)
data = await session.execute(sql)
return data.scalars().all()
except Exception as e:
TestCaseDao.log.error(f"查询构造数据失败: {str(e)}")
@staticmethod
async def collect_data(case_id: int, data: List):
"""
收集以case_id为前置条件的数据(后置暂时不支持)
:param data:
:param case_id:
:return:
"""
# 先获取数据构造器(前置条件)
pre = dict(id=f"pre_{case_id}", label="前置条件", children=list())
await TestCaseDao.collect_constructor(case_id, pre)
data.append(pre)
# 获取断言
asserts = dict(id=f"asserts_{case_id}", label="断言", children=list())
await TestCaseDao.collect_asserts(case_id, asserts)
data.append(asserts)
@staticmethod
async def collect_constructor(case_id, parent):
constructors = await TestCaseDao.async_select_constructor(case_id)
for c in constructors:
temp = dict(id=f"constructor_{c.id}", label=f"{c.name}", children=list())
if c.type == 0:
# 说明是用例,继续递归
temp["label"] = "[CASE]: " + temp["label"]
json_data = json.loads(c.constructor_json)
await TestCaseDao.collect_data(json_data.get("case_id"), temp.get("children"))
elif c.type == 1:
temp["label"] = "[SQL]: " + temp["label"]
elif c.type == 2:
temp["label"] = "[REDIS]: " + temp["label"]
# 否则正常添加数据
parent.get("children").append(temp)
@staticmethod
async def collect_asserts(case_id, parent):
asserts, err = await TestCaseAssertsDao.async_list_test_case_asserts(case_id)
if err:
raise Exception("获取断言数据失败")
for a in asserts:
temp = dict(id=f"assert_{a.id}", label=f"{a.name}", children=list())
parent.get("children").append(temp)
@staticmethod
async def get_xmind_data(case_id: int):
result = dict()
data = await TestCaseDao.query_test_case(case_id)
cs = data.get("case")
# 开始解析测试数据
result.update(dict(id=f"case_{case_id}", label=f"{cs.name}({cs.id})"))
children = list()
await TestCaseDao.collect_data(case_id, children)
result["children"] = children
return result
|
the-stack_0_12962 | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
#admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^blog/', include('blog.foo.urls')),
(r'^$', 'Account.views.index'),
(r'^test/$', 'Account.views.test'),
(r'^random_number/$', 'Account.views.random_number'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
|
the-stack_0_12963 | import pandas as pd
import numpy as np
import os
import json
from datetime import date
def getFilename(subject_data):
"""
Given the subject_data field from a row of one of our SpaceFluff dataframes, extract the name of the object being classified
by extracting the 'Filename'|'image'|'IMAGE' field".
To be used with df[column].apply()
@returns {string} filename of the object being classified, including the extension '_insp.png'
"""
keys = list(subject_data.values())[0].keys()
accessKey = (
"Filename" if "Filename" in keys else "image" if "image" in keys else "IMAGE" if "IMAGE" in keys else None)
if accessKey:
return list(subject_data.values())[0][accessKey][:-9]
else:
print("No filename found!")
def getMetadataValue(metadata, field):
'''
@param metadata metadata column from a row in a SpaceFluff dataframe
@param {string} field: 'retired' | 'already_seen'
@returns {boolean} value of `field` within the row's metadata column
'''
return metadata['subject_selection_state'][field]
def parseTime(created_at):
'''
@param {df column} created_at: df['created_at'] column
'''
return pd.to_datetime(created_at, format="%Y-%m-%d %H:%M:%S %Z")
def getGroupSize(group):
'''
@param {pd.core.frame.DataFrame} pandas dataframe group
@returns number of rows in group (corresponds to number of columns in case of parsed SpaceFluff dataframe)
'''
return group.shape[0]
def extract_task_value(task_index, row):
try:
return row[task_index]['value']
except:
return
def percentageVotesForAnswer(counts, answer):
'''
@param counts: a df column like {galaxy: 15, group of objects (cluster): 10, something else/empty center: 2}
@paramanswer: one of the keys of `counts`
'''
totalVotes = sum(counts.values())
if not answer in counts.keys():
return 0
votesForAnswer = counts[answer]
return round(100*votesForAnswer/totalVotes, 1)
def extractTaskValue(annotations, task):
'''
@param {list} annotations: annotations column for a row in a SpaceFluff dataframe
@param {string} task: one of 'Ti', where i \in 0,2,1,3,4,5,9
@returns {string | None} value the user provided for the given task, or None
'''
filtered = list(filter(lambda x: x['task'] == task, annotations))
if len(filtered) > 0:
return filtered[0]['value']
def extract_retired_info(subject_data):
'''
@param subject_data: (dataframe 'subject_data' column)
'''
return list(subject_data.values())[0]["retired"]
def get_power_users(df, vote_count_threshold):
"""
@param df: parsed dataframe where each row is a single classification
@param {int} vote_count_threshold: return only users that made at least this many valid classifications
"""
groupby_username = df[['user_name']].groupby(['user_name'])
groupby_username_filtered = groupby_username.filter(lambda x: x.shape[0] >= vote_count_threshold)
grouped = groupby_username_filtered.groupby(['user_name'])
filtered_usernames_and_votes = []
for username, vote_count in grouped:
filtered_usernames_and_votes.append({
"username": username,
"votes": len(vote_count)
})
return filtered_usernames_and_votes
def get_task_0_value_counts(row):
"Get task 0 value counts for one row of a group of classifications"
row = list(row)
# value_counts = {answer: 0 for answer in answer_types}
value_counts = {}
for vote in row:
if value_counts.get(vote):
value_counts[vote] += 1
else:
value_counts[vote] = 1
return value_counts, len(row)
def as_array(lst):
'Turn a Python list into a NumPy array'
if type(lst) == np.ndarray:
return lst
return np.array(lst)
def get_running_vote_fraction(df):
"""
Returns a list of
(% votes by users that case <= n votes)/total votes
as a function of n
@param df: `df`-like dataframe, where each row corresponds to a single classification made by a single user
"""
users_and_classification_counts = []
for k, v in df.groupby('user_name').groups.items():
users_and_classification_counts.append({
'username': k,
'classifications': len(v)
})
cls_per_user = [entry['classifications'] for entry in users_and_classification_counts]
total_votes = sum(cls_per_user) # total number of votes made
sorted_vote_counts = sorted(cls_per_user) # sorted list of number of classifications per user
# create dictionary with keys: # votes per user, values: # users that cast that amount of votes
countDict = {}
for entry in sorted_vote_counts:
countDict[entry] = countDict.get(entry, 0) + 1
fractions = []
for vote_count, occurrence_rate in countDict.items():
fractions.append([vote_count, vote_count*occurrence_rate/total_votes, occurrence_rate])
counts, fractions, users_included = as_array(fractions).T
# create a running fraction of total votes cast in a single loop
running_fraction = []
for i, fr in enumerate(fractions):
if i == 0:
val = fr
else:
val = fr+running_fraction[i-1]
running_fraction.append(val)
return [
users_and_classification_counts,
cls_per_user,
counts,
running_fraction
] |
the-stack_0_12964 | from __future__ import absolute_import, print_function, unicode_literals
import re
import sys
from django.conf import settings as django_settings
from django.http import Http404, HttpResponseRedirect
from django.utils.cache import add_never_cache_headers
def redirect_request_processor(page, request):
"""
Returns a ``HttpResponseRedirect`` instance if the current page says
a redirect should happen.
"""
target = page.get_redirect_to_target(request)
if target:
if request._feincms_extra_context.get('extra_path', '/') == '/':
return HttpResponseRedirect(target)
raise Http404()
def extra_context_request_processor(page, request):
"""
Fills ``request._feincms_extra_context`` with a few useful variables.
"""
request._feincms_extra_context.update({
# XXX This variable name isn't accurate anymore.
'in_appcontent_subpage': False,
'extra_path': '/',
})
url = page.get_absolute_url()
if request.path != url:
request._feincms_extra_context.update({
'in_appcontent_subpage': True,
'extra_path': re.sub(
'^' + re.escape(url.rstrip('/')),
'',
request.path,
),
})
def frontendediting_request_processor(page, request):
"""
Sets the frontend editing state in the cookie depending on the
``frontend_editing`` GET parameter and the user's permissions.
"""
if not 'frontend_editing' in request.GET:
return
response = HttpResponseRedirect(request.path)
if request.user.has_module_perms('page'):
try:
enable_fe = int(request.GET['frontend_editing']) > 0
except ValueError:
enable_fe = False
if enable_fe:
response.set_cookie(str('frontend_editing'), enable_fe)
else:
response.delete_cookie(str('frontend_editing'))
# Redirect to cleanup URLs
return response
def frontendediting_response_processor(page, request, response):
# Add never cache headers in case frontend editing is active
if (hasattr(request, 'COOKIES')
and request.COOKIES.get('frontend_editing', False)):
if hasattr(response, 'add_post_render_callback'):
response.add_post_render_callback(add_never_cache_headers)
else:
add_never_cache_headers(response)
def etag_request_processor(page, request):
"""
Short-circuits the request-response cycle if the ETag matches.
"""
# XXX is this a performance concern? Does it create a new class
# every time the processor is called or is this optimized to a static
# class??
class DummyResponse(dict):
"""
This is a dummy class with enough behaviour of HttpResponse so we
can use the condition decorator without too much pain.
"""
def has_header(page, what):
return False
def dummy_response_handler(*args, **kwargs):
return DummyResponse()
def etagger(request, page, *args, **kwargs):
etag = page.etag(request)
return etag
def lastmodifier(request, page, *args, **kwargs):
lm = page.last_modified()
return lm
# Unavailable in Django 1.0 -- the current implementation of ETag support
# requires Django 1.1 unfortunately.
from django.views.decorators.http import condition
# Now wrap the condition decorator around our dummy handler:
# the net effect is that we will be getting a DummyResponse from
# the handler if processing is to continue and a non-DummyResponse
# (should be a "304 not modified") if the etag matches.
rsp = condition(etag_func=etagger, last_modified_func=lastmodifier)(
dummy_response_handler)(request, page)
# If dummy then don't do anything, if a real response, return and
# thus shortcut the request processing.
if not isinstance(rsp, DummyResponse):
return rsp
def etag_response_processor(page, request, response):
"""
Response processor to set an etag header on outgoing responses.
The Page.etag() method must return something valid as etag content
whenever you want an etag header generated.
"""
etag = page.etag(request)
if etag is not None:
response['ETag'] = '"' + etag + '"'
def debug_sql_queries_response_processor(verbose=False, file=sys.stderr):
"""
Attaches a handler which prints the query count (and optionally all
individual queries which have been executed) on the console. Does nothing
if ``DEBUG = False``.
Example::
from feincms.module.page import models, processors
models.Page.register_response_processor(
processors.debug_sql_queries_response_processor(verbose=True),
)
"""
if not django_settings.DEBUG:
return lambda page, request, response: None
def processor(page, request, response):
from django.db import connection
print_sql = lambda x: x
try:
import sqlparse
print_sql = lambda x: sqlparse.format(
x, reindent=True, keyword_case='upper')
except:
pass
if verbose:
print("-" * 60, file=file)
time = 0.0
i = 0
for q in connection.queries:
i += 1
if verbose:
print("%d : [%s]\n%s\n" % (
i, q['time'], print_sql(q['sql'])), file=file)
time += float(q['time'])
print("-" * 60, file=file)
print("Total: %d queries, %.3f ms" % (i, time), file=file)
print("-" * 60, file=file)
return processor
|
the-stack_0_12965 | import requests
from data import ui
def consultar(token='25d800a8b8e8b99d77c809567aa291b8',self=0):
Sair = False
while(Sair == False):
if self == 1:
ip_input = ''
else:
ip_input = ui.input_dialog()
if len(ip_input) < 1:
ui.error_dialog('Insira algo para consultar.');break
try:
api=requests.get('http://ipwhois.app/json/'+ip_input).json()
#lat = api['latitude']
#lon = api['longitude']
#api2 = requests.get('http://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={token[2]}')
except:
msg = "erro no servidor"
try:
msg=f'''
IP: {api['ip']}
TIPO: {api['type']}
CONTINENTE: {api['continent']}
C?DIGO DO CONTINENTE: {api['continent_code']}
PAIS: {api['country']}
C?DIGO DO PA?S: {api['country']}
CAPITAL DO PAIS: {api['country_capital']}
C?DIGO TELEF?NICO DO PA?S: {api['country_phone']}
PAISES VIZINHOS: {api['country_neighbours']}
REGI?O: {api['region']}
CIDADE: {api['city']}
LATITUDE: {api['latitude']}
LONGITUDE: {api['longitude']}
ASN: {api['asn']}
ORG: {api['org']}
ISP: {api['isp']}
HOR?RIO PADR?O: {api['timezone']}
NOME DO HOR?RIO PADR?O: {api['timezone_name']}
GMT: {api['timezone_gmt']}
MOEDA: {api['currency']}
CODIGO DA MOEDA: {api['currency_code']}
SIMBOLO DA MOEDA: {api['currency_symbol']}
'''
#TEMPERATURA: {api2["weather"][0]["main"]}
except:
msg = 'Ip invalido.'
choice = int(ui.dialog_choice(msg))
if choice == 1:
pass
elif choice == 2:
Sair = True
else:
ui.error_dialog() |
the-stack_0_12966 | from typing import List, Tuple
from chiabip158 import PyBIP158
from cryptodoge.types.blockchain_format.coin import Coin
from cryptodoge.types.blockchain_format.sized_bytes import bytes32
from cryptodoge.types.full_block import FullBlock
from cryptodoge.types.header_block import HeaderBlock
from cryptodoge.types.name_puzzle_condition import NPC
from cryptodoge.util.condition_tools import created_outputs_for_conditions_dict
def get_block_header(block: FullBlock, tx_addition_coins: List[Coin], removals_names: List[bytes32]) -> HeaderBlock:
# Create filter
byte_array_tx: List[bytes32] = []
addition_coins = tx_addition_coins + list(block.get_included_reward_coins())
if block.is_transaction_block():
for coin in addition_coins:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for name in removals_names:
byte_array_tx.append(bytearray(name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter: bytes = bytes(bip158.GetEncoded())
return HeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.challenge_chain_ip_proof,
block.reward_chain_sp_proof,
block.reward_chain_ip_proof,
block.infused_challenge_chain_ip_proof,
block.foliage,
block.foliage_transaction_block,
encoded_filter,
block.transactions_info,
)
def additions_for_npc(npc_list: List[NPC]) -> List[Coin]:
additions: List[Coin] = []
for npc in npc_list:
for coin in created_outputs_for_conditions_dict(npc.condition_dict, npc.coin_name):
additions.append(coin)
return additions
def tx_removals_and_additions(npc_list: List[NPC]) -> Tuple[List[bytes32], List[Coin]]:
"""
Doesn't return farmer and pool reward.
"""
removals: List[bytes32] = []
additions: List[Coin] = []
# build removals list
if npc_list is None:
return [], []
for npc in npc_list:
removals.append(npc.coin_name)
additions.extend(additions_for_npc(npc_list))
return removals, additions
|
the-stack_0_12967 | class A:
def __init__(self, gpioPort):
self.gpioPort = gpioPort
def p(self):
print(self.gpioPort)
class B(A):
pass
B(12).p()
C = type('C', (A,), dict({}))
print(C)
C(14).p()
def value(value=None):
if value == None:
return 'get_value'
else:
print(value)
def m(a, b, c=None, d=None):
print(a, b, c, d)
m(1, 2)
print(value('set_value'))
print(value())
class A1:
@property
def p(self):
return {"a": 10}
class AA(A1):
@property
def p(self):
return super().p
a = AA()
print(a.p) |
the-stack_0_12968 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Basic classes to contain rstWeb objects and methods to calculate their attributes
Author: Amir Zeldes
"""
class NODE:
def __init__(self, id, left, right, parent, depth, kind, text, relname, relkind):
"""Basic class to hold all nodes (EDU, span and multinuc) in structure.py and while importing"""
self.id = id
self.parent = parent
self.left = left
self.right = right
self.depth = depth
self.kind = kind #edu, multinuc or span node
self.text = text #text of an edu node; empty for spans/multinucs
self.relname = relname
self.relkind = relkind #rst (a.k.a. satellite), multinuc or span relation
self.sortdepth = depth
class SEGMENT:
def __init__(self, id, text):
""" Class used by segment.py to represent EDUs, NOT used by the structurer in structure.py"""
self.id = id
self.text = text
self.tokens = text.split(" ")
def get_depth(orig_node, probe_node, nodes, doc=None, project=None, user=None):
"""
Calculate graphical nesting depth of a node based on the node list graph.
Note that RST parentage without span/multinuc does NOT increase depth.
"""
if probe_node.parent != "0":
try:
parent = nodes[probe_node.parent]
except KeyError:
# Parent node does not exist, set parent to 0
from modules.rstweb_sql import update_parent
if doc is not None and project is not None and user is not None:
update_parent(probe_node.id,"0",doc,project,user)
return
else:
raise KeyError("Node ID " + probe_node.id + " has non existing parent " + probe_node.parent + " and user not set in function\n")
if parent.kind != "edu" and (probe_node.relname == "span" or parent.kind == "multinuc" and probe_node.relkind =="multinuc"):
orig_node.depth += 1
orig_node.sortdepth +=1
elif parent.kind == "edu":
orig_node.sortdepth += 1
get_depth(orig_node, parent, nodes, doc=doc, project=project, user=user)
def get_left_right(node_id, nodes, min_left, max_right, rel_hash):
"""
Calculate leftmost and rightmost EDU covered by a NODE object. For EDUs this is the number of the EDU
itself. For spans and multinucs, the leftmost and rightmost child dominated by the NODE is found recursively.
"""
if nodes[node_id].parent != "0" and node_id != "0":
parent = nodes[nodes[node_id].parent]
if min_left > nodes[node_id].left or min_left == 0:
if nodes[node_id].left != 0:
min_left = nodes[node_id].left
if max_right < nodes[node_id].right or max_right == 0:
max_right = nodes[node_id].right
if nodes[node_id].relname == "span":
if parent.left > min_left or parent.left == 0:
parent.left = min_left
if parent.right < max_right:
parent.right = max_right
elif nodes[node_id].relname in rel_hash:
if parent.kind == "multinuc" and rel_hash[nodes[node_id].relname] =="multinuc":
if parent.left > min_left or parent.left == 0:
parent.left = min_left
if parent.right < max_right:
parent.right = max_right
get_left_right(parent.id, nodes, min_left, max_right, rel_hash)
|
the-stack_0_12972 | import jax
from jax import numpy as jnp
# import numpy as np
from tabcorr.tabcorr import *
class JaxTabCorr(TabCorr):
def predict(self, model, separate_gal_type=False, **occ_kwargs):
"""
Predicts the number density and correlation function for a certain
model.
Parameters
----------
model : HodModelFactory
Instance of ``halotools.empirical_models.HodModelFactory``
describing the model for which predictions are made.
separate_gal_type : boolean, optional
If True, the return values are dictionaries divided by each galaxy
types contribution to the output result.
**occ_kwargs : dict, optional
Keyword arguments passed to the ``mean_occupation`` functions
of the model.
Returns
-------
ngal : numpy.array or dict
Array or dictionary of arrays containing the number densities for
each galaxy type stored in self.gal_type. The total galaxy number
density is the sum of all elements of this array.
xi : numpy.array or dict
Array or dictionary of arrays storing the prediction for the
correlation function.
"""
try:
assert (sorted(model.gal_types) == sorted(
['centrals', 'satellites']))
except AssertionError:
raise RuntimeError('The model instance must only have centrals ' +
'and satellites as galaxy types. Check the ' +
'gal_types attribute of the model instance.')
try:
assert (model._input_model_dictionary['centrals_occupation']
.prim_haloprop_key == self.attrs['prim_haloprop_key'])
assert (model._input_model_dictionary['satellites_occupation']
.prim_haloprop_key == self.attrs['prim_haloprop_key'])
except AssertionError:
raise RuntimeError('Mismatch in the primary halo properties of ' +
'the model and the TabCorr instance.')
try:
if hasattr(model._input_model_dictionary['centrals_occupation'],
'sec_haloprop_key'):
assert (model._input_model_dictionary['centrals_occupation']
.sec_haloprop_key == self.attrs['sec_haloprop_key'])
if hasattr(model._input_model_dictionary['satellites_occupation'],
'sec_haloprop_key'):
assert (model._input_model_dictionary['satellites_occupation']
.sec_haloprop_key == self.attrs['sec_haloprop_key'])
except AssertionError:
raise RuntimeError('Mismatch in the secondary halo properties ' +
'of the model and the TabCorr instance.')
# TODO: Figure out how to add back in the redshift sanity check in a JAX-friendly way?
# ====================================================================================
# try:
# assert abs(model.redshift - self.attrs['redshift']) < 0.05
# except AssertionError:
# raise RuntimeError('Mismatch in the redshift of the model and ' +
# 'the TabCorr instance.')
mean_occupation = jnp.zeros(len(self.gal_type["gal_type"]))
mask = self.gal_type["gal_type"] == "centrals"
mean_occupation = jax.ops.index_update(
mean_occupation, mask,
model.mean_occupation_centrals(
prim_haloprop=self.gal_type["prim_haloprop"][mask],
sec_haloprop_percentile=self.gal_type["sec_haloprop_percentile"][mask],
**occ_kwargs)
)
mean_occupation = jax.ops.index_update(
mean_occupation, ~mask,
model.mean_occupation_satellites(
prim_haloprop=self.gal_type["prim_haloprop"][~mask],
sec_haloprop_percentile=self.gal_type["sec_haloprop_percentile"][~mask],
**occ_kwargs)
)
return jaxtabcorr_predict(
mean_occupation,
self.gal_type["gal_type"] == "centrals",
self.gal_type["prim_haloprop"].data,
self.gal_type["sec_haloprop_percentile"].data,
self.gal_type["n_h"].data, self.tpcf_matrix,
self.tpcf_shape, self.attrs["mode"] == "cross",
separate_gal_type)
def jaxtabcorr_predict(mean_occupation, is_centrals, prim_haloprop,
sec_haloprop_percentile, n_h, tpcf_matrix,
tpcf_shape, do_cross, separate_gal_type):
ngal = mean_occupation * n_h
if not do_cross:
ngal_sq = jnp.outer(ngal, ngal)
ngal_sq = 2 * ngal_sq - jnp.diag(jnp.diag(ngal_sq))
ngal_sq = jax_symmetric_matrix_to_array(ngal_sq)
xi = tpcf_matrix * ngal_sq / jnp.sum(ngal_sq)
else:
xi = tpcf_matrix * ngal / jnp.sum(ngal)
if not separate_gal_type:
ngal = jnp.sum(ngal)
xi = jnp.sum(xi, axis=1).reshape(tpcf_shape)
return ngal, xi
else:
ngal_dict = {}
xi_dict = {}
for gal_type, key in [(True, "centrals"), (False, "satellites")]:
mask = is_centrals == gal_type
ngal_type = jnp.where(mask, ngal, 0)
ngal_dict[key] = jnp.sum(ngal_type) # <-- TODO: this will break
if not do_cross:
for gal_type_1, gal_type_2, name in [(True, True, "centrals-centrals"),
(True, False, "centrals-satellites"),
(False, False, "satellites-satellites")]:
mask = jax_symmetric_matrix_to_array(jnp.outer(
gal_type_1 == is_centrals,
gal_type_2 == is_centrals) |
jnp.outer(
gal_type_2 == is_centrals,
gal_type_1 == is_centrals))
xi_dict[name] = jnp.sum(xi * mask, axis=1).reshape(tpcf_shape)
else:
for gal_type, key in [(True, "centrals"), (False, "satellites")]:
mask = is_centrals == gal_type
xi_dict[gal_type] = jnp.sum(
xi * mask, axis=1).reshape(tpcf_shape)
return ngal_dict, xi_dict
static_args = ["tpcf_shape", "do_cross", "separate_gal_type"]
jaxtabcorr_predict = jax.jit(jaxtabcorr_predict,
static_argnames=static_args)
def jax_symmetric_matrix_to_array(matrix):
# Assertions not allowed by jit :(
# try:
# assert matrix.shape[0] == matrix.shape[1]
# assert np.all(matrix == matrix.T)
# except AssertionError:
# raise RuntimeError('The matrix you provided is not symmetric.')
n_dim = matrix.shape[0]
sel = jnp.zeros((n_dim**2 + n_dim) // 2, dtype=int)
for i in range(matrix.shape[0]):
sel = jax.ops.index_update(
sel, slice((i*(i+1))//2, (i*(i+1))//2+(i+1)),
jnp.arange(i*n_dim, i*n_dim + i + 1))
# sel[(i*(i+1))//2:(i*(i+1))//2+(i+1)] = jnp.arange(
# i*n_dim, i*n_dim + i + 1)
return matrix.ravel()[sel] |
the-stack_0_12973 | import datetime
class Employee:
raise_amount = 1.04 # Class variable
num_of_employees = 0
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@company.com'
Employee.num_of_employees += 1
def fullname(self):
return self.first + ' ' + self.last
def apply_raise(self):
# Can also use Employee.raise_amount
self.pay = int(self.pay * self.raise_amount)
@classmethod
def set_raise_amount(cls, amount):
cls.raise_amount = amount
# class method as alternative constructor
@classmethod
def from_string(cls, emp_str):
"construct a new employee from string kebab case"
first_name, last_name, pay = emp_str.split('-')
# calling constructor with cls keyword, denoting class
return cls(first_name, last_name, pay)
@staticmethod
def is_work_day_with_string(day):
work_days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday']
for d in work_days:
if d.upper() == day.upper():
print('work day')
return True
print('not a work day')
return False
@staticmethod
def is_work_day(day):
if day.weekday() == 5 or day.weekday() == 6:
print('not a work day')
return False
else:
print('work day')
return True
# creating employees by parsing kebab-case string
emp_str1 = 'John-Doe-7000'
# parsing
emp1 = Employee.from_string(emp_str1)
emp2 = Employee('Burak', 'Aksoy', 3000)
print(emp1.__dict__)
print(emp2.__dict__)
# regular methods pass self def my_func(self),
# class methods pass cls def class_func(cls),
# However, static methods do not pass anyhting
print(Employee.is_work_day_with_string('monday'))
print(Employee.is_work_day_with_string('saturday'))
my_date = datetime.date.today()
print(my_date)
print(Employee.is_work_day(my_date))
|
the-stack_0_12975 | #
# Handler library for Linux IaaS
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JSON def:
HandlerEnvironment.json
[{
"name": "ExampleHandlerLinux",
"seqNo": "seqNo",
"version": "1.0",
"handlerEnvironment": {
"logFolder": "<your log folder location>",
"configFolder": "<your config folder location>",
"statusFolder": "<your status folder location>",
"heartbeatFile": "<your heartbeat file location>",
}
}]
Example ./config/1.settings
"{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings":
"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}"
Example HeartBeat
{
"version": 1.0,
"heartbeat" : {
"status": "ready",
"code": 0,
"Message": "Sample Handler running. Waiting for a new configuration from user."
}
}
Example Status Report:
[{"version":"1.0","timestampUTC":"2014-05-29T04:20:13Z","status":{"name":"Chef Extension Handler","operation":"chef-client-run","status":"success","code":0,"formattedMessage":{"lang":"en-US","message":"Chef-client run success"}}}]
"""
import os
import os.path
import sys
import re
import imp
import base64
import json
import tempfile
import time
from os.path import join
import Utils.WAAgentUtil
from Utils.WAAgentUtil import waagent
import logging
import logging.handlers
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from common import CommonVariables
import platform
import subprocess
import datetime
import Utils.Status
from MachineIdentity import MachineIdentity
import ExtensionErrorCodeHelper
import traceback
DateTimeFormat = "%Y-%m-%dT%H:%M:%SZ"
class HandlerContext:
def __init__(self,name):
self._name = name
self._version = '0.0'
return
class HandlerUtility:
telemetry_data = {}
serializable_telemetry_data = []
ExtErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success
SnapshotConsistency = Utils.Status.SnapshotConsistencyType.none
HealthStatusCode = -1
def __init__(self, log, error, short_name):
self._log = log
self._error = error
self.log_message = ""
self._short_name = short_name
self.patching = None
self.storageDetailsObj = None
self.partitioncount = 0
self.logging_file = None
def _get_log_prefix(self):
return '[%s-%s]' % (self._context._name, self._context._version)
def _get_current_seq_no(self, config_folder):
seq_no = -1
cur_seq_no = -1
freshest_time = None
for subdir, dirs, files in os.walk(config_folder):
for file in files:
try:
if(file.endswith('.settings')):
cur_seq_no = int(os.path.basename(file).split('.')[0])
if(freshest_time == None):
freshest_time = os.path.getmtime(join(config_folder,file))
seq_no = cur_seq_no
else:
current_file_m_time = os.path.getmtime(join(config_folder,file))
if(current_file_m_time > freshest_time):
freshest_time = current_file_m_time
seq_no = cur_seq_no
except ValueError:
continue
return seq_no
def get_last_seq(self):
if(os.path.isfile('mrseq')):
seq = waagent.GetFileContents('mrseq')
if(seq):
return int(seq)
return -1
def exit_if_same_seq(self):
current_seq = int(self._context._seq_no)
last_seq = self.get_last_seq()
if(current_seq == last_seq):
self.log("the sequence number are same, so skip, current:" + str(current_seq) + "== last:" + str(last_seq))
self.update_settings_file()
sys.exit(0)
def log(self, message,level='Info'):
try:
self.log_with_no_try_except(message, level)
except IOError:
pass
except Exception as e:
try:
errMsg='Exception in hutil.log'
self.log_with_no_try_except(errMsg, 'Warning')
except Exception as e:
pass
def log_with_no_try_except(self, message, level='Info'):
WriteLog = self.get_strvalue_from_configfile('WriteLog','True')
if (WriteLog == None or WriteLog == 'True'):
if sys.version_info > (3,):
if self.logging_file is not None:
self.log_py3(message)
else:
pass
else:
self._log(self._get_log_prefix() + message)
message = "{0} {1} {2} \n".format(str(datetime.datetime.now()) , level , message)
self.log_message = self.log_message + message
def log_py3(self, msg):
if type(msg) is not str:
msg = str(msg, errors="backslashreplace")
msg = str(datetime.datetime.now()) + " " + str(self._get_log_prefix()) + msg + "\n"
try:
with open(self.logging_file, "a+") as C :
C.write(msg)
except IOError:
pass
def error(self, message):
self._error(self._get_log_prefix() + message)
def fetch_log_message(self):
return self.log_message
def _parse_config(self, ctxt):
config = None
try:
config = json.loads(ctxt)
except:
self.error('JSON exception decoding ' + ctxt)
if config == None:
self.error("JSON error processing settings file:" + ctxt)
else:
handlerSettings = config['runtimeSettings'][0]['handlerSettings']
if 'protectedSettings' in handlerSettings and \
"protectedSettingsCertThumbprint" in handlerSettings and \
handlerSettings['protectedSettings'] is not None and \
handlerSettings["protectedSettingsCertThumbprint"] is not None:
protectedSettings = handlerSettings['protectedSettings']
thumb = handlerSettings['protectedSettingsCertThumbprint']
cert = waagent.LibDir + '/' + thumb + '.crt'
pkey = waagent.LibDir + '/' + thumb + '.prv'
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
waagent.SetFileContents(f.name,config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])
cleartxt = None
if 'NS-BSD' in platform.system():
# base64 tool is not available with NSBSD, use openssl
cleartxt = waagent.RunGetOutput(self.patching.openssl_path + " base64 -d -A -in " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1]
else:
cleartxt = waagent.RunGetOutput(self.patching.base64_path + " -d " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1]
jctxt = {}
try:
jctxt = json.loads(cleartxt)
except:
self.error('JSON exception decoding ' + cleartxt)
handlerSettings['protectedSettings'] = jctxt
self.log('Config decoded correctly.')
return config
def do_parse_context(self, operation):
self.operation = operation
_context = self.try_parse_context()
getWaagentPathUsed = Utils.WAAgentUtil.GetPathUsed()
if(getWaagentPathUsed == 0):
self.log("waagent old path is used")
else:
self.log("waagent new path is used")
if not _context:
self.log("maybe no new settings file found")
sys.exit(0)
return _context
def try_parse_context(self):
self._context = HandlerContext(self._short_name)
handler_env = None
config = None
ctxt = None
code = 0
try:
# get the HandlerEnvironment.json. According to the extension handler
# spec, it is always in the ./ directory
self.log('cwd is ' + os.path.realpath(os.path.curdir))
handler_env_file = './HandlerEnvironment.json'
if not os.path.isfile(handler_env_file):
self.error("Unable to locate " + handler_env_file)
return None
ctxt = waagent.GetFileContents(handler_env_file)
if ctxt == None :
self.error("Unable to read " + handler_env_file)
try:
handler_env = json.loads(ctxt)
except:
pass
if handler_env == None :
self.log("JSON error processing " + handler_env_file)
return None
if type(handler_env) == list:
handler_env = handler_env[0]
self._context._name = handler_env['name']
self._context._version = str(handler_env['version'])
self._context._config_dir = handler_env['handlerEnvironment']['configFolder']
self._context._log_dir = handler_env['handlerEnvironment']['logFolder']
self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')
self.logging_file=self._context._log_file
self._context._shell_log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'shell.log')
self._change_log_file()
self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']
self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']
self._context._seq_no = self._get_current_seq_no(self._context._config_dir)
if self._context._seq_no < 0:
self.error("Unable to locate a .settings file!")
return None
self._context._seq_no = str(self._context._seq_no)
self.log('sequence number is ' + self._context._seq_no)
self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status')
self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')
self.log("setting file path is" + self._context._settings_file)
ctxt = None
ctxt = waagent.GetFileContents(self._context._settings_file)
if ctxt == None :
error_msg = 'Unable to read ' + self._context._settings_file + '. '
self.error(error_msg)
return None
else:
if(self.operation is not None and self.operation.lower() == "enable"):
# we should keep the current status file
self.backup_settings_status_file(self._context._seq_no)
self._context._config = self._parse_config(ctxt)
except Exception as e:
errorMsg = "Unable to parse context, error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.log(errorMsg, 'Error')
raise
return self._context
def _change_log_file(self):
self.log("Change log file to " + self._context._log_file)
waagent.LoggerInit(self._context._log_file,'/dev/stdout')
self._log = waagent.Log
self._error = waagent.Error
def save_seq(self):
self.set_last_seq(self._context._seq_no)
self.log("set most recent sequence number to " + self._context._seq_no)
def set_last_seq(self,seq):
waagent.SetFileContents('mrseq', str(seq))
'''
Sample /etc/azure/vmbackup.conf
[SnapshotThread]
seqsnapshot = 1
isanysnapshotfailed = False
UploadStatusAndLog = True
WriteLog = True
seqsnapshot valid values(0-> parallel snapshot, 1-> programatically set sequential snapshot , 2-> customer set it for sequential snapshot)
'''
def get_value_from_configfile(self, key):
global backup_logger
value = None
configfile = '/etc/azure/vmbackup.conf'
try :
if os.path.exists(configfile):
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread',key):
value = config.get('SnapshotThread',key)
except Exception as e:
pass
return value
def get_strvalue_from_configfile(self, key, default):
value = self.get_value_from_configfile(key)
if value == None or value == '':
value = default
try :
value_str = str(value)
except ValueError :
self.log('Not able to parse the read value as string, falling back to default value', True, 'Warning')
value = default
return value
def get_intvalue_from_configfile(self, key, default):
value = default
value = self.get_value_from_configfile(key)
if value == None or value == '':
value = default
try :
value_int = int(value)
except ValueError :
self.log('Not able to parse the read value as int, falling back to default value', True, 'Warning')
value = default
return int(value)
def set_value_to_configfile(self, key, value):
configfile = '/etc/azure/vmbackup.conf'
try :
self.log('setting ' + str(key) + 'in config file to ' + str(value) , 'Info')
if not os.path.exists(os.path.dirname(configfile)):
os.makedirs(os.path.dirname(configfile))
config = ConfigParsers.RawConfigParser()
if os.path.exists(configfile):
config.read(configfile)
if config.has_section('SnapshotThread'):
if config.has_option('SnapshotThread', key):
config.remove_option('SnapshotThread', key)
else:
config.add_section('SnapshotThread')
else:
config.add_section('SnapshotThread')
config.set('SnapshotThread', key, value)
with open(configfile, 'w') as config_file:
config.write(config_file)
except Exception as e:
errorMsg = " Unable to set config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.log(errorMsg, 'Warning')
return value
def get_machine_id(self):
machine_id_file = "/etc/azure/machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123358B"
machine_id = ""
try:
if not os.path.exists(os.path.dirname(machine_id_file)):
os.makedirs(os.path.dirname(machine_id_file))
if os.path.exists(machine_id_file):
file_pointer = open(machine_id_file, "r")
machine_id = file_pointer.readline()
file_pointer.close()
else:
mi = MachineIdentity()
machine_id = mi.stored_identity()[1:-1]
file_pointer = open(machine_id_file, "w")
file_pointer.write(machine_id)
file_pointer.close()
except Exception as e:
errMsg = 'Failed to retrieve the unique machine id with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg, 'Error')
self.log("Unique Machine Id : {0}".format(machine_id))
return machine_id
def get_total_used_size(self):
try:
df = subprocess.Popen(["df" , "-k" , "--output=source,fstype,size,used,avail,pcent,target"], stdout=subprocess.PIPE)
'''
Sample output of the df command
Filesystem Type 1K-blocks Used Avail Use% Mounted on
/dev/sda2 xfs 52155392 3487652 48667740 7% /
devtmpfs devtmpfs 7170976 0 7170976 0% /dev
tmpfs tmpfs 7180624 0 7180624 0% /dev/shm
tmpfs tmpfs 7180624 760496 6420128 11% /run
tmpfs tmpfs 7180624 0 7180624 0% /sys/fs/cgroup
/dev/sda1 ext4 245679 151545 76931 67% /boot
/dev/sdb1 ext4 28767204 2142240 25140628 8% /mnt/resource
/dev/mapper/mygroup-thinv1 xfs 1041644 33520 1008124 4% /bricks/brick1
/dev/mapper/mygroup-85197c258a54493da7880206251f5e37_0 xfs 1041644 33520 1008124 4% /run/gluster/snaps/85197c258a54493da7880206251f5e37/brick2
/dev/mapper/mygroup2-thinv2 xfs 15717376 5276944 10440432 34% /tmp/test
/dev/mapper/mygroup2-63a858543baf4e40a3480a38a2f232a0_0 xfs 15717376 5276944 10440432 34% /run/gluster/snaps/63a858543baf4e40a3480a38a2f232a0/brick2
tmpfs tmpfs 1436128 0 1436128 0% /run/user/1000
//Centos72test/cifs_test cifs 52155392 4884620 47270772 10% /mnt/cifs_test2
'''
process_wait_time = 30
while(process_wait_time >0 and df.poll() is None):
time.sleep(1)
process_wait_time -= 1
output = df.stdout.read()
output = output.split("\n")
total_used = 0
total_used_network_shares = 0
total_used_gluster = 0
network_fs_types = []
for i in range(1,len(output)-1):
device, fstype, size, used, available, percent, mountpoint = output[i].split()
self.log("Device name : {0} fstype : {1} size : {2} used space in KB : {3} available space : {4} mountpoint : {5}".format(device,fstype,size,used,available,mountpoint))
if "fuse" in fstype.lower() or "nfs" in fstype.lower() or "cifs" in fstype.lower():
if fstype not in network_fs_types :
network_fs_types.append(fstype)
self.log("Not Adding as network-drive, Device name : {0} used space in KB : {1} fstype : {2}".format(device,used,fstype))
total_used_network_shares = total_used_network_shares + int(used)
elif (mountpoint.startswith('/run/gluster/snaps/')):
self.log("Not Adding Device name : {0} used space in KB : {1} mount point : {2}".format(device,used,mountpoint))
total_used_gluster = total_used_gluster + int(used)
else:
self.log("Adding Device name : {0} used space in KB : {1} mount point : {2}".format(device,used,mountpoint))
total_used = total_used + int(used) #return in KB
if not len(network_fs_types) == 0:
HandlerUtility.add_to_telemetery_data("networkFSTypeInDf",str(network_fs_types))
HandlerUtility.add_to_telemetery_data("totalUsedNetworkShare",str(total_used_network_shares))
self.log("Total used space in Bytes of network shares : {0}".format(total_used_network_shares * 1024))
if total_used_gluster !=0 :
HandlerUtility.add_to_telemetery_data("glusterFSSize",str(total_used_gluster))
self.log("Total used space in Bytes : {0}".format(total_used * 1024))
return total_used * 1024,False #Converting into Bytes
except Exception as e:
errMsg = 'Unable to fetch total used space with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
return 0,True
def get_storage_details(self,total_size,failure_flag):
self.storageDetailsObj = Utils.Status.StorageDetails(self.partitioncount, total_size, False, failure_flag)
self.log("partition count : {0}, total used size : {1}, is storage space present : {2}, is size computation failed : {3}".format(self.storageDetailsObj.partitionCount, self.storageDetailsObj.totalUsedSizeInBytes, self.storageDetailsObj.isStoragespacePresent, self.storageDetailsObj.isSizeComputationFailed))
return self.storageDetailsObj
def SetExtErrorCode(self, extErrorCode):
if self.ExtErrorCode == ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success :
self.ExtErrorCode = extErrorCode
def SetSnapshotConsistencyType(self, snapshotConsistency):
self.SnapshotConsistency = snapshotConsistency
def SetHealthStatusCode(self, healthStatusCode):
self.HealthStatusCode = healthStatusCode
def do_status_json(self, operation, status, sub_status, status_code, message, telemetrydata, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj,total_size,failure_flag):
tstamp = time.strftime(DateTimeFormat, time.gmtime())
formattedMessage = Utils.Status.FormattedMessage("en-US",message)
stat_obj = Utils.Status.StatusObj(self._context._name, operation, status, sub_status, status_code, formattedMessage, telemetrydata, self.get_storage_details(total_size,failure_flag), self.get_machine_id(), taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj)
top_stat_obj = Utils.Status.TopLevelStatus(self._context._version, tstamp, stat_obj)
return top_stat_obj
def get_extension_version(self):
try:
cur_dir = os.getcwd()
cur_extension = cur_dir.split("/")[-1]
extension_version = cur_extension.split("-")[-1]
return extension_version
except Exception as e:
errMsg = 'Failed to retrieve the Extension version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
extension_version="Unknown"
return extension_version
def get_wala_version(self):
try:
file_pointer = open('/var/log/waagent.log','r')
waagent_version = ''
for line in file_pointer:
if 'Azure Linux Agent Version' in line:
waagent_version = line.split(':')[-1]
if waagent_version[:-1]=="": #for removing the trailing '\n' character
waagent_version = self.get_wala_version_from_command()
return waagent_version
else:
waagent_version = waagent_version[:-1].split("-")[-1] #getting only version number
return waagent_version
except Exception as e:
errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
waagent_version="Unknown"
return waagent_version
def get_wala_version_from_command(self):
try:
cur_dir = os.getcwd()
os.chdir("..")
p = subprocess.Popen(['/usr/sbin/waagent', '-version'], stdout=subprocess.PIPE)
process_wait_time = 30
while(process_wait_time > 0 and p.poll() is None):
time.sleep(1)
process_wait_time -= 1
out = p.stdout.read()
out = str(out)
if "Goal state agent: " in out:
waagent_version = out.split("Goal state agent: ")[1].strip()
else:
out = out.split(" ")
waagent = out[0]
waagent_version = waagent.split("-")[-1] #getting only version number
os.chdir(cur_dir)
return waagent_version
except Exception as e:
errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
waagent_version="Unknown"
return waagent_version
def get_dist_info(self):
try:
if 'FreeBSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
return "FreeBSD",release
if 'NS-BSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
return "NS-BSD", release
if 'linux_distribution' in dir(platform):
distinfo = list(platform.linux_distribution(full_distribution_name=0))
# remove trailing whitespace in distro name
if(distinfo[0] == ''):
osfile= open("/etc/os-release", "r")
for line in osfile:
lists=str(line).split("=")
if(lists[0]== "NAME"):
distroname = lists[1].split("\"")
if(lists[0]=="VERSION"):
distroversion = lists[1].split("\"")
osfile.close()
return distroname[1]+"-"+distroversion[1],platform.release()
distinfo[0] = distinfo[0].strip()
return distinfo[0]+"-"+distinfo[1],platform.release()
else:
distinfo = platform.dist()
return distinfo[0]+"-"+distinfo[1],platform.release()
except Exception as e:
errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
return "Unkonwn","Unkonwn"
def substat_new_entry(self,sub_status,code,name,status,formattedmessage):
sub_status_obj = Utils.Status.SubstatusObj(code,name,status,formattedmessage)
sub_status.append(sub_status_obj)
return sub_status
def timedelta_total_seconds(self, delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
@staticmethod
def add_to_telemetery_data(key,value):
HandlerUtility.telemetry_data[key]=value
def add_telemetry_data(self):
os_version,kernel_version = self.get_dist_info()
HandlerUtility.add_to_telemetery_data("guestAgentVersion",self.get_wala_version_from_command())
HandlerUtility.add_to_telemetery_data("extensionVersion",self.get_extension_version())
HandlerUtility.add_to_telemetery_data("osVersion",os_version)
HandlerUtility.add_to_telemetery_data("kernelVersion",kernel_version)
def convert_telemetery_data_to_bcm_serializable_format(self):
HandlerUtility.serializable_telemetry_data = []
for k,v in HandlerUtility.telemetry_data.items():
each_telemetry_data = {}
each_telemetry_data["Value"] = v
each_telemetry_data["Key"] = k
HandlerUtility.serializable_telemetry_data.append(each_telemetry_data)
def do_status_report(self, operation, status, status_code, message, taskId = None, commandStartTimeUTCTicks = None, snapshot_info = None,total_size = 0,failure_flag = True ):
self.log("{0},{1},{2},{3}".format(operation, status, status_code, message))
sub_stat = []
stat_rept = []
self.add_telemetry_data()
snapshotTelemetry = ""
if CommonVariables.snapshotCreator in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.snapshotCreator , HandlerUtility.telemetry_data[CommonVariables.snapshotCreator])
if CommonVariables.hostStatusCodePreSnapshot in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.hostStatusCodePreSnapshot , HandlerUtility.telemetry_data[CommonVariables.hostStatusCodePreSnapshot])
if CommonVariables.hostStatusCodeDoSnapshot in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.hostStatusCodeDoSnapshot , HandlerUtility.telemetry_data[CommonVariables.hostStatusCodeDoSnapshot])
if CommonVariables.statusBlobUploadError in HandlerUtility.telemetry_data.keys():
message = "{0} {1}={2}, ".format(message , CommonVariables.statusBlobUploadError , HandlerUtility.telemetry_data[CommonVariables.statusBlobUploadError])
message = message + snapshotTelemetry
vm_health_obj = Utils.Status.VmHealthInfoObj(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode], int(self.ExtErrorCode))
consistencyTypeStr = CommonVariables.consistency_crashConsistent
if (self.SnapshotConsistency != Utils.Status.SnapshotConsistencyType.crashConsistent):
if (status_code == CommonVariables.success_appconsistent):
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.applicationConsistent
consistencyTypeStr = CommonVariables.consistency_applicationConsistent
elif (status_code == CommonVariables.success):
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.fileSystemConsistent
consistencyTypeStr = CommonVariables.consistency_fileSystemConsistent
else:
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.none
consistencyTypeStr = CommonVariables.consistency_none
HandlerUtility.add_to_telemetery_data("consistencyType", consistencyTypeStr)
extensionResponseObj = Utils.Status.ExtensionResponse(message, self.SnapshotConsistency, "")
message = str(json.dumps(extensionResponseObj, cls = ComplexEncoder))
self.convert_telemetery_data_to_bcm_serializable_format()
stat_rept = self.do_status_json(operation, status, sub_stat, status_code, message, HandlerUtility.serializable_telemetry_data, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj, total_size,failure_flag)
time_delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
time_span = self.timedelta_total_seconds(time_delta) * 1000
date_place_holder = 'e2794170-c93d-4178-a8da-9bc7fd91ecc0'
stat_rept.timestampUTC = date_place_holder
date_string = r'\/Date(' + str((int)(time_span)) + r')\/'
stat_rept = "[" + json.dumps(stat_rept, cls = ComplexEncoder) + "]"
stat_rept = stat_rept.replace(date_place_holder,date_string)
# Add Status as sub-status for Status to be written on Status-File
sub_stat = self.substat_new_entry(sub_stat,'0',stat_rept,'success',None)
if self.get_public_settings()[CommonVariables.vmType].lower() == CommonVariables.VmTypeV2.lower() and CommonVariables.isTerminalStatus(status) :
status = CommonVariables.status_success
stat_rept_file = self.do_status_json(operation, status, sub_stat, status_code, message, None, taskId, commandStartTimeUTCTicks, None, None,total_size,failure_flag)
stat_rept_file = "[" + json.dumps(stat_rept_file, cls = ComplexEncoder) + "]"
# rename all other status files, or the WALA would report the wrong
# status file.
# because the wala choose the status file with the highest sequence
# number to report.
return stat_rept, stat_rept_file
def write_to_status_file(self, stat_rept_file):
try:
if self._context._status_file:
with open(self._context._status_file,'w+') as f:
f.write(stat_rept_file)
except Exception as e:
errMsg = 'Status file creation failed with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
def is_status_file_exists(self):
try:
if os.path.exists(self._context._status_file):
return True
else:
return False
except Exception as e:
self.log("exception is getting status file" + traceback.format_exc())
return False
def backup_settings_status_file(self, _seq_no):
self.log("current seq no is " + _seq_no)
for subdir, dirs, files in os.walk(self._context._config_dir):
for file in files:
try:
if(file.endswith('.settings') and file != (_seq_no + ".settings")):
new_file_name = file.replace(".","_")
os.rename(join(self._context._config_dir,file), join(self._context._config_dir,new_file_name))
except Exception as e:
self.log("failed to rename the status file.")
for subdir, dirs, files in os.walk(self._context._status_dir):
for file in files:
try:
if(file.endswith('.status') and file != (_seq_no + ".status")):
new_file_name = file.replace(".","_")
os.rename(join(self._context._status_dir,file), join(self._context._status_dir, new_file_name))
except Exception as e:
self.log("failed to rename the status file.")
def do_exit(self, exit_code, operation,status,code,message):
try:
HandlerUtility.add_to_telemetery_data("extErrorCode", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode]))
self.do_status_report(operation, status,code,message)
except Exception as e:
self.log("Can't update status: " + str(e))
sys.exit(exit_code)
def get_handler_settings(self):
return self._context._config['runtimeSettings'][0]['handlerSettings']
def get_protected_settings(self):
return self.get_handler_settings().get('protectedSettings')
def get_public_settings(self):
return self.get_handler_settings().get('publicSettings')
def is_prev_in_transition(self):
curr_seq = self.get_last_seq()
last_seq = curr_seq - 1
if last_seq >= 0:
self.log("previous status and path: " + str(last_seq) + " " + str(self._context._status_dir))
status_file_prev = os.path.join(self._context._status_dir, str(last_seq) + '_status')
if os.path.isfile(status_file_prev) and os.access(status_file_prev, os.R_OK):
searchfile = open(status_file_prev, "r")
for line in searchfile:
if "Transition" in line:
self.log("transitioning found in the previous status file")
searchfile.close()
return True
searchfile.close()
return False
def get_prev_log(self):
with open(self._context._log_file, "r") as f:
lines = f.readlines()
if(len(lines) > 300):
lines = lines[-300:]
return ''.join(str(x) for x in lines)
else:
return ''.join(str(x) for x in lines)
def get_shell_script_log(self):
lines = ""
try:
with open(self._context._shell_log_file, "r") as f:
lines = f.readlines()
if(len(lines) > 10):
lines = lines[-10:]
return ''.join(str(x) for x in lines)
except Exception as e:
self.log("Can't receive shell log file: " + str(e))
return lines
def update_settings_file(self):
if(self._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings') != None):
del self._context._config['runtimeSettings'][0]['handlerSettings']['protectedSettings']
self.log("removing the protected settings")
waagent.SetFileContents(self._context._settings_file,json.dumps(self._context._config))
def UriHasSpecialCharacters(self, blobs):
uriHasSpecialCharacters = False
if blobs is not None:
for blob in blobs:
blobUri = str(blob.split("?")[0])
if '%' in blobUri:
self.log(blobUri + " URI has special characters")
uriHasSpecialCharacters = True
return uriHasSpecialCharacters
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj,'convertToDictionary'):
return obj.convertToDictionary()
else:
return obj.__dict__
|
the-stack_0_12976 | """
Contains all the config for the Flask App
"""
import os
class BaseConfig:
"""
Base configuration
"""
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
BCRYPT_LOG_ROUNDS = 13
TOKEN_EXPIRATION_DAYS = 1
TOKEN_EXPIRATION_SECONDS = 0
class DevelopmentConfig(BaseConfig):
"""
Development configuration
"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
DEBUG_TB_ENABLED = True
BCRYPT_LOG_ROUNDS = 4
class TestingConfig(BaseConfig):
"""
Testing Configuration
"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_TEST_URL")
DEBUG_TB_ENABLED = True
BCRYPT_LOG_ROUNDS = 4
TOKEN_EXPIRATION_DAYS = 0
TOKEN_EXPIRATION_SECONDS = 3
class ProductionConfig(BaseConfig):
"""
Production configuration
"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
|
the-stack_0_12977 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras import backend as K
from tensorflow.keras import initializers, regularizers, constraints
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow_addons.layers import InstanceNormalization, SpectralNormalization
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import os
import numpy as np
import time
from PIL import Image
from math import log2
import random
from datagen import dataGenerator, printProgressBar
from models import makeGen, makeDisc
class GAN:
def __init__(self, data, test_data, image_size, model_name = "StarGAN", channels=16, size="normal", verbose=False, batch_size = 6, learning_rate = 0.0001):
#data: A list of folder names inside of the data folder to generate data from for training.
#test_data: A list of folder names inside of the data folder to generate data for testing. Can be the same as data. Should be different so that you can more easily see collapse.
#image_size: A tuple or int (if square) depicting the size of the desired size of the images for training. The data generator will resize the images to this size, and the model will train on this size.
#model_name: A name for the model. Used for the folder in which results and checkpoints are saved.
#channels: The number of channels to be used at each step in the model before multiplication. Recommended 16.
#verbose: Whether or not the data generators will create output showing their status while generating the data.
#batch_Size: The batch size for the model.
#learning_rate: The learning rate for the model. The discriminator's will be double this value.
self.MODELNAME = model_name
self.CKPT = os.path.dirname(os.path.realpath(__file__)) + "\\" + self.MODELNAME + "\\checkpoints\\"
self.imagedir = os.path.dirname(os.path.realpath(__file__)) + "\\" + self.MODELNAME
self.verbose = verbose
#Converts an integer into a tuple, ensures an integer or tuple is given.
if((type(image_size) is not tuple)):
if(type(image_size) is int):
self.image_size = (image_size, image_size)
else:
print("Expected tuple (x,y) or int for image size.")
exit()
else:
self.image_size = image_size
#Prints an error message if the dimensions are incompatible with the training of the model.
if((self.image_size[0] % 8 != 0) or (self.image_size[1] % 8 != 0)):
print("Image dimensions must be divisible by 8 for the model to train! Please adjust your image sizes.")
exit()
#Try making each directory, if it fails it generally means the folder already exists, so continue regardless.
try:
os.makedirs(self.imagedir)
except OSError as error:
pass
try:
os.makedirs(self.CKPT)
except OSError as error:
pass
#Create both the training and testing datagenerators using a list of strings
#containing the folder names inside of the data folder.
#The first string will have the first label, and so on.
self.datagens = []
for item in data:
self.datagens.append(dataGenerator(item, self.image_size, verbose = self.verbose, resize=True))
self.testData= []
for item in test_data:
self.testData.append(dataGenerator(item, self.image_size, verbose = self.verbose, resize=True))
#Determine the number of labels in the model.
self.NUMLABELS = len(self.datagens)
#Make the generator and discriminator as specified in models.py either normal or large sized.
self.cha = channels
self.BATCH_SIZE = batch_size
self.gen = makeGen(self.cha, self.NUMLABELS, self.image_size)
self.disc = makeDisc(self.cha, self.NUMLABELS, self.image_size)
#Setup the optimizers
self.discOpt = tf.keras.optimizers.Adam(learning_rate=learning_rate*2, beta_1=0.0, beta_2=0.99)#
self.genOpt = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.0, beta_2=0.99)#
@tf.function
def trainStep(self, images, labels):
#function to train the model.
def grad_loss(samples, output, k=1.0):
#An implementation of a two-sided local gradient penalty.
#Helps to smooth out gradients in the discriminator.
#Not strictly necessary, used to improve stability of discirminator.
init_grad = tf.gradients(output, samples)[0]
squared_grad = tf.square(init_grad)
sum_grad = tf.sqrt(K.sum(squared_grad, axis=[1,2,3]))
penalty = tf.maximum(sum_grad-k, tf.keras.backend.zeros_like(sum_grad))
return tf.reduce_mean(penalty)
with tf.GradientTape() as genTape, tf.GradientTape() as discTape:
#Running data through models
generatedImage = self.gen([images,labels[1]],training=True)
restoredImage = self.gen([generatedImage,labels[0]], training=True)
genfakeOut = K.sum(self.disc([generatedImage],training=True) * labels[1], axis=1) #Multiply by label due to multi-task discriminator.
discRealOut = K.sum(self.disc([images],training=True) * labels[0], axis=1) #Multiply by label due to multi-task discriminator.
#Loss functions
cycleLoss = K.mean(K.abs(images - restoredImage)) * 5
genLoss = K.mean(genfakeOut) + cycleLoss #Due to multi-task discriminator, label comparison and real/fake are done in one with genfakeout.
discLoss = K.mean(K.relu(1.0 - genfakeOut) + K.relu(1.0 + discRealOut)) + K.mean(grad_loss(images, discRealOut)) * 10 #Hinge loss plust gradient penalty.
#Calculate and apply gradients.
genGradients = genTape.gradient(genLoss,self.gen.trainable_variables)
discGradients = discTape.gradient(discLoss,self.disc.trainable_variables)
self.genOpt.apply_gradients(zip(genGradients,self.gen.trainable_variables))
self.discOpt.apply_gradients(zip(discGradients,self.disc.trainable_variables))
return (genLoss, discLoss)
def labelMaker(self, index, maxSize=None, batch=None):
#Creates a one hot vector for the label of the image.
#Index: the index for where the value will be one.
#maxSize: typically the number of labels. How long to make the vector.
#batch: the batch size, or how many labels to produce.
if maxSize == None:
maxSize = self.NUMLABELS
if batch == None:
batch = self.BATCH_SIZE
labels = np.ones([batch]) * index
return to_categorical(labels, num_classes = maxSize)
def train(self, steps = 100000, curStep = 1):
#The train function repeats the training step to train the model.
#steps: The number of steps to train.
#curStep: The step to begin training on. (e.g. In case you load weights from 50000 steps and want to retrain from 50000 steps onward.)
#Setup some variables to compute train time and store loss values.
genLoss = 0
discLoss = 0
trainTime = time.time()
start = time.time()
for step in range(curStep,steps+1):
#Randomly select a source and target label and batch.
randInt = random.randint(0, self.NUMLABELS-1)
batch = self.datagens[randInt].get_batch(self.BATCH_SIZE)
smalllabelsReal = self.labelMaker(randInt)
#Selects a class to convert to.
randInt = (random.randint(1,self.NUMLABELS-1)+randInt) % self.NUMLABELS
smalllabelsNew = self.labelMaker(randInt)
labels = [smalllabelsReal,smalllabelsNew]
stepGenLoss, stepDiscLoss = self.trainStep(batch, labels)
#Print the progress bar so that you may see how far along the model is.
printProgressBar(step % 1000, 999, decimals=2)
#Save variables to compute the average.
genLoss += stepGenLoss
discLoss += stepDiscLoss
if (step) % 1000 == 0:
#At every 1000 steps, generate an image containing all possible conversions, and show the average loss values for the previous 1000 steps.
self.makeImages(step)
print("At step {}. Time for 1000 steps was {} seconds".format(step,time.time()-start))
print("Average generator loss: {}, Average discriminator loss: {}".format((genLoss / 1000.0),(discLoss / 1000.0)))
genLoss = 0
discLoss = 0
start = time.time()
if (step) % 10000 == 0:
#At every 10000 steps, save the weights of the generator and discriminator so they may be loaded.
self.gen.save_weights(self.CKPT + f"{step}GEN.ckpt")
self.disc.save_weights(self.CKPT + f"{step}DISC.ckpt")
print("Gan saved!")
#At the end of training, show the total amount of time it took.
print(f"Time for training was {(time.time() - trainTime) / 60.0} minutes")
return
def makeImages(self, step, numEx = 5):
#A function to create an array of images. The first row is real, the second row is converted to the first class, the third row is converted to the second class, and so on.
#Step: A only used in naming. Typically used to show what step the image was generated on.
imageRows = []
#For each class, translate to each other class.
#Original images will be on the top row in order of label.
#Every row beneath will be a different label.
for i in range(self.NUMLABELS):
batch = self.testData[i].get_batch(numEx)
#Place all of the original images on one row.
rowstart = batch[0]
for k in range(1,numEx):
rowstart = np.append(rowstart, batch[k], 1)
for j in range(self.NUMLABELS):
results = self.gen([batch, self.labelMaker(j, self.NUMLABELS, numEx)], training=False)
if i == j: #Don't convert to your own class! Instead show a black box.
results = np.zeros_like(results)
rowAdd = results[0]
for k in range(1,numEx):
rowAdd = np.append(rowAdd, results[k], 1)
rowstart = np.append(rowstart, rowAdd, 0)
imageRows.append(rowstart)
output = imageRows[0]
for i in range(1,len(imageRows)):
output = np.append(output, imageRows[i], 1) #All originals will be on
output = np.clip(np.squeeze(output), 0.0, 1.0)
plt.imsave(self.imagedir + f"\\{step}.png", output)
def loadSave(self, step):
#A function to load the weights of a trained model. Must be the same size and channels as initialized model.
#Step: What step to laod from.
self.gen.load_weights(self.CKPT + f"{step}GEN.ckpt")
self.disc.load_weights(self.CKPT + f"{step}DISC.ckpt")
def translate(self, image, target):
#Converts a single image to the target class. Returns the translated image.
#image: an array of (imageSize1,imageSize2,channels).
#target: an index for what class to convert to (starting at 0)
image = np.expand_dims(image, 0)
label = self.labelMaker(target, batch=1)
return np.squeeze(self.gen([image, label], training=False), axis=0)
#An example of how you could run the model using class folders "/data/classA_folder/", etc image size 256, model name "StarGAN", channel coefficient of 16, and normal size.
# if __name__ == "__main__":
# data = ["classA_folder", "classB_folder", "classC_folder"] #In this case, class A has an index of 0, B 1, C 2.
# testdata = ["classA_testfolder", "classB_testfolder", "classC_testfolder"]
# starGAN = GAN(data, testdata, 128, "StarGAN", 24)
# starGAN.makeImages(-999)
# starGAN.train(200000)
# exit()
|
the-stack_0_12978 | import numpy as np
from keras_cn_parser_and_analyzer.library.classifiers.cnn_lstm import WordVecCnnLstm
from keras_cn_parser_and_analyzer.library.utility.simple_data_loader import load_text_label_pairs
from keras_cn_parser_and_analyzer.library.utility.text_fit import fit_text
def main():
random_state = 42
np.random.seed(random_state)
output_dir_path = './models'
data_file_path = '../data/training_data'
text_data_model = fit_text(data_file_path, label_type='line_label')
text_label_pairs = load_text_label_pairs(data_file_path, label_type='line_label')
classifier = WordVecCnnLstm()
batch_size = 64
epochs = 20
history = classifier.fit(text_data_model=text_data_model,
model_dir_path=output_dir_path,
text_label_pairs=text_label_pairs,
batch_size=batch_size, epochs=epochs,
test_size=0.3,
random_state=random_state)
if __name__ == '__main__':
main()
|
the-stack_0_12979 | # https://www.hackerrank.com/challenges/xor-se/problem
# An array, , is defined as follows:
# A[0] = 0
# A[x] = A[x-1]^x
# for , where is the symbol for XOR
# You will be given a left and right index . You must determine the XOR sum of the segment of A as
# A[l]^A[l+1]...^A[r].
# For example, A = [0,1,3,0,4,1,7,0,8] . The segment from l=1 to r=4 sums to 1^3^0^4 = 6.
# Print the answer to each question.
# Function Description
# Complete the xorSequence function in the editor below. It should return the integer value calculated.
# xorSequence has the following parameter(s):
# l: the lower index of the range to sum
# r: the higher index of the range to sum
# Input Format
# The first line contains an integer , the number of questions.
# Each of the next lines contains two space-separated integers, and , the inclusive left and right indexes of the
# segment to query.
# Constraints
# 1<=q<=10e5
# 1<l[i]<r[i]<10e15
# Output Format
# On a new line for each test case, print the XOR-Sum of 's elements in the inclusive range between indices and .
# Sample Input 0
# 3
# 2 4
# 2 8
# 5 9
# Sample Output 0
# 7
# 9
# 15
# Explanation 0
# The beginning of our array looks like this: [0,1,3,0,4,1,7,0,8,...]
import math
import os
import random
import re
import sys
# Complete the xorSequence function below.
def xorSequence(l, r):
def A(x):
a = x%8
if(a == 0 or a == 1):
return x
if(a == 2 or a == 3):
return 2
if(a == 4 or a == 5):
return x+2
if(a == 6 or a == 7):
return 0;
ans = A(l-1)^A(r)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
lr = input().split()
l = int(lr[0])
r = int(lr[1])
result = xorSequence(l, r)
fptr.write(str(result) + '\n')
fptr.close()
|
the-stack_0_12980 | from . import DATABASE, log
import os
from flask import Blueprint, render_template, flash
from flask_login import login_required, current_user
views = Blueprint("views", __name__)
@views.route("/")
@views.route("/home")
@login_required
def home():
log.debug("Received a GET request at `/home`")
return render_template("home.html", user=current_user)
@views.route("/files/username=<username>")
@login_required
def get_files(username: str):
# check whether the folder already exists
files = {}
if os.path.isdir(f"{DATABASE}\\{username}"):
os.chdir(f"{DATABASE}\\{username}")
files = {i: f"{DATABASE}\\{username}\\{i}" for i in os.listdir()}
else:
# creating the folder
os.mkdir(f"{DATABASE}/{username}")
return files
@views.route("/get_file/filename=<filename>&username=<username>")
@login_required
def get_specific_file(filename: str, username: str):
if os.path.isdir(f"{DATABASE}\\{username}"):
os.chdir(f"{DATABASE}\\{username}")
if os.path.isfile(filename):
with open(filename, "r") as file:
return file.read()
else:
flash("File not found!", category="error")
|
the-stack_0_12982 | """MLP Merge Model.
A model composed only of a multi-layer perceptron (MLP), which maps
real-valued inputs to real-valued outputs. This model is called an
MLP Merge Model because it takes two inputs and concatenates the second
input with the layer at a specified index. It can be merged with any layer
from the input layer to the last hidden layer.
"""
import tensorflow as tf
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
class MLPMergeModel(Model):
"""MLP Merge Model.
Args:
output_dim (int): Dimension of the network output.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
concat_layer (int): The index of layers at which to concatenate
input_var2 with the network. The indexing works like standard
python list indexing. Index of 0 refers to the input layer
(input_var1) while an index of -1 points to the last hidden
layer. Default parameter points to second layer from the end.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
name='MLPMergeModel',
hidden_sizes=(32, 32),
concat_layer=-2,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
self._concat_layer = concat_layer
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['input_var1', 'input_var2']
# pylint: disable=arguments-differ
def _build(self, state_input, action_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
action_input (tf.Tensor): Tensor input for action.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
input_var2=action_input,
concat_layer=self._concat_layer,
name='mlp_concat',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
|
the-stack_0_12983 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import os
import sys
# Which CoAPthon version to use
if sys.version_info.major == 2:
COAPTHON="CoAPThon"
ZEROCONF="zeroconf2"
else:
COAPTHON="CoAPThon3"
ZEROCONF="zeroconf"
here = path.abspath(path.dirname(__file__))
long_description = """
Control program (and module) for iotsa devices. Allows finding of iotsa devices on the local
WiFi network or in the physical vicinity, inspecting and changing configuration
of those devices and uploading new firmware over the air.
"""
# Get the version number from the iotsa module
VERSION="2.1"
setup(
name='iotsaControl',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=VERSION,
description='Control iotsa devices',
long_description=long_description,
# The project's main homepage.
url='http://www.cwi.nl',
# Author details
author='Jack Jansen',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Communications',
'Topic :: Home Automation',
'Topic :: Internet'
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
#keywords='sample setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["iotsaControl"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["future", "requests", "esptool", ZEROCONF, COAPTHON],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data=package_data,
#include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'iotsaControl=iotsaControl.__main__:main',
],
},
)
|
the-stack_0_12984 | import random
import pytest
import numpy as np
import os
from ray import cloudpickle as pickle
from ray import ray_constants
from ray.actor import ActorClassInheritanceException
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import sys
import tempfile
import datetime
from ray._private.test_utils import (
client_test_enabled,
wait_for_condition,
wait_for_pid_to_exit,
)
from ray.tests.client_test_utils import create_remote_signal_actor
import ray
# NOTE: We have to import setproctitle after ray because we bundle setproctitle
# with ray.
import setproctitle # noqa
@pytest.mark.parametrize("set_enable_auto_connect", ["1", "0"], indirect=True)
def test_caching_actors(shutdown_only, set_enable_auto_connect):
# Test defining actors before ray.init() has been called.
@ray.remote
class Foo:
def __init__(self):
pass
def get_val(self):
return 3
if set_enable_auto_connect == "0":
# Check that we can't actually create actors before ray.init() has
# been called.
with pytest.raises(Exception):
f = Foo.remote()
ray.init(num_cpus=1)
else:
# Actor creation should succeed here because ray.init() auto connection
# is (by default) enabled.
f = Foo.remote()
f = Foo.remote()
assert ray.get(f.get_val.remote()) == 3
# https://github.com/ray-project/ray/issues/20554
def test_not_reusing_task_workers(shutdown_only):
@ray.remote
def create_ref():
ref = ray.put(np.zeros(100_000_000))
return ref
@ray.remote
class Actor:
def __init__(self):
return
def foo(self):
return
ray.init(num_cpus=1, object_store_memory=1000_000_000)
wrapped_ref = create_ref.remote()
print(ray.get(ray.get(wrapped_ref)))
# create_ref worker gets reused as an actor.
a = Actor.remote()
ray.get(a.foo.remote())
# Actor will get force-killed.
del a
# Flush the object store.
for _ in range(10):
ray.put(np.zeros(100_000_000))
# Object has been evicted and owner has died. Throws OwnerDiedError.
print(ray.get(ray.get(wrapped_ref)))
def test_remote_function_within_actor(ray_start_10_cpus):
# Make sure we can use remote funtions within actors.
# Create some values to close over.
val1 = 1
val2 = 2
@ray.remote
def f(x):
return val1 + x
@ray.remote
def g(x):
return ray.get(f.remote(x))
@ray.remote
class Actor:
def __init__(self, x):
self.x = x
self.y = val2
self.object_refs = [f.remote(i) for i in range(5)]
self.values2 = ray.get([f.remote(i) for i in range(5)])
def get_values(self):
return self.x, self.y, self.object_refs, self.values2
def f(self):
return [f.remote(i) for i in range(5)]
def g(self):
return ray.get([g.remote(i) for i in range(5)])
def h(self, object_refs):
return ray.get(object_refs)
actor = Actor.remote(1)
values = ray.get(actor.get_values.remote())
assert values[0] == 1
assert values[1] == val2
assert ray.get(values[2]) == list(range(1, 6))
assert values[3] == list(range(1, 6))
assert ray.get(ray.get(actor.f.remote())) == list(range(1, 6))
assert ray.get(actor.g.remote()) == list(range(1, 6))
assert ray.get(actor.h.remote([f.remote(i) for i in range(5)])) == list(range(1, 6))
def test_define_actor_within_actor(ray_start_10_cpus):
# Make sure we can use remote funtions within actors.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def new_actor(self, z):
@ray.remote
class Actor2:
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
self.actor2 = Actor2.remote(z)
def get_values(self, z):
self.new_actor(z)
return self.x, ray.get(self.actor2.get_value.remote())
actor1 = Actor1.remote(3)
assert ray.get(actor1.get_values.remote(5)) == (3, 5)
def test_use_actor_within_actor(ray_start_10_cpus):
# Make sure we can use actors within actors.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_val(self):
return self.x
@ray.remote
class Actor2:
def __init__(self, x, y):
self.x = x
self.actor1 = Actor1.remote(y)
def get_values(self, z):
return self.x, ray.get(self.actor1.get_val.remote())
actor2 = Actor2.remote(3, 4)
assert ray.get(actor2.get_values.remote(5)) == (3, 4)
def test_use_actor_twice(ray_start_10_cpus):
# Make sure we can call the same actor using different refs.
@ray.remote
class Actor1:
def __init__(self):
self.count = 0
def inc(self):
self.count += 1
return self.count
@ray.remote
class Actor2:
def __init__(self):
pass
def inc(self, handle):
return ray.get(handle.inc.remote())
a = Actor1.remote()
a2 = Actor2.remote()
assert ray.get(a2.inc.remote(a)) == 1
assert ray.get(a2.inc.remote(a)) == 2
def test_define_actor_within_remote_function(ray_start_10_cpus):
# Make sure we can define and actors within remote funtions.
@ray.remote
def f(x, n):
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
actor = Actor1.remote(x)
return ray.get([actor.get_value.remote() for _ in range(n)])
assert ray.get(f.remote(3, 1)) == [3]
assert ray.get([f.remote(i, 20) for i in range(10)]) == [
20 * [i] for i in range(10)
]
def test_use_actor_within_remote_function(ray_start_10_cpus):
# Make sure we can create and use actors within remote funtions.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_values(self):
return self.x
@ray.remote
def f(x):
actor = Actor1.remote(x)
return ray.get(actor.get_values.remote())
assert ray.get(f.remote(3)) == 3
def test_actor_import_counter(ray_start_10_cpus):
# This is mostly a test of the export counters to make sure that when
# an actor is imported, all of the necessary remote functions have been
# imported.
# Export a bunch of remote functions.
num_remote_functions = 50
for i in range(num_remote_functions):
@ray.remote
def f():
return i
@ray.remote
def g():
@ray.remote
class Actor:
def __init__(self):
# This should use the last version of f.
self.x = ray.get(f.remote())
def get_val(self):
return self.x
actor = Actor.remote()
return ray.get(actor.get_val.remote())
assert ray.get(g.remote()) == num_remote_functions - 1
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_actor_method_metadata_cache(ray_start_regular):
class Actor(object):
pass
# The cache of ActorClassMethodMetadata.
cache = ray.actor.ActorClassMethodMetadata._cache
cache.clear()
# Check cache hit during ActorHandle deserialization.
A1 = ray.remote(Actor)
a = A1.remote()
assert len(cache) == 1
cached_data_id = [id(x) for x in list(cache.items())[0]]
for x in range(10):
a = pickle.loads(pickle.dumps(a))
assert len(ray.actor.ActorClassMethodMetadata._cache) == 1
assert [id(x) for x in list(cache.items())[0]] == cached_data_id
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_actor_class_name(ray_start_regular):
@ray.remote
class Foo:
def __init__(self):
pass
Foo.remote()
g = ray.worker.global_worker.gcs_client
actor_keys = g.internal_kv_keys(
b"ActorClass", ray_constants.KV_NAMESPACE_FUNCTION_TABLE
)
assert len(actor_keys) == 1
actor_class_info = pickle.loads(
g.internal_kv_get(actor_keys[0], ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
)
assert actor_class_info["class_name"] == "Foo"
assert "test_actor" in actor_class_info["module"]
def test_actor_exit_from_task(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self):
print("Actor created")
def f(self):
return 0
@ray.remote
def f():
a = Actor.remote()
x_id = a.f.remote()
return [x_id]
x_id = ray.get(f.remote())[0]
print(ray.get(x_id)) # This should not hang.
def test_actor_init_error_propagated(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, error=False):
if error:
raise Exception("oops")
def foo(self):
return "OK"
actor = Actor.remote(error=False)
ray.get(actor.foo.remote())
actor = Actor.remote(error=True)
with pytest.raises(Exception, match=".*oops.*"):
ray.get(actor.foo.remote())
def test_keyword_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, arg0, arg1=1, arg2="a"):
self.arg0 = arg0
self.arg1 = arg1
self.arg2 = arg2
def get_values(self, arg0, arg1=2, arg2="b"):
return self.arg0 + arg0, self.arg1 + arg1, self.arg2 + arg2
actor = Actor.remote(0)
assert ray.get(actor.get_values.remote(1)) == (1, 3, "ab")
actor = Actor.remote(1, 2)
assert ray.get(actor.get_values.remote(2, 3)) == (3, 5, "ab")
actor = Actor.remote(1, 2, "c")
assert ray.get(actor.get_values.remote(2, 3, "d")) == (3, 5, "cd")
actor = Actor.remote(1, arg2="c")
assert ray.get(actor.get_values.remote(0, arg2="d")) == (1, 3, "cd")
assert ray.get(actor.get_values.remote(0, arg2="d", arg1=0)) == (1, 1, "cd")
actor = Actor.remote(1, arg2="c", arg1=2)
assert ray.get(actor.get_values.remote(0, arg2="d")) == (1, 4, "cd")
assert ray.get(actor.get_values.remote(0, arg2="d", arg1=0)) == (1, 2, "cd")
assert ray.get(actor.get_values.remote(arg2="d", arg1=0, arg0=2)) == (3, 2, "cd")
# Make sure we get an exception if the constructor is called
# incorrectly.
with pytest.raises(TypeError):
actor = Actor.remote()
with pytest.raises(TypeError):
actor = Actor.remote(0, 1, 2, arg3=3)
with pytest.raises(TypeError):
actor = Actor.remote(0, arg0=1)
# Make sure we get an exception if the method is called incorrectly.
actor = Actor.remote(1)
with pytest.raises(Exception):
ray.get(actor.get_values.remote())
def test_actor_name_conflict(ray_start_regular_shared):
@ray.remote
class A(object):
def foo(self):
return 100000
a = A.remote()
r = a.foo.remote()
results = [r]
for x in range(10):
@ray.remote
class A(object):
def foo(self):
return x
a = A.remote()
r = a.foo.remote()
results.append(r)
assert ray.get(results) == [100000, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_variable_number_of_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, arg0, arg1=1, *args):
self.arg0 = arg0
self.arg1 = arg1
self.args = args
def get_values(self, arg0, arg1=2, *args):
return self.arg0 + arg0, self.arg1 + arg1, self.args, args
actor = Actor.remote(0)
assert ray.get(actor.get_values.remote(1)) == (1, 3, (), ())
actor = Actor.remote(1, 2)
assert ray.get(actor.get_values.remote(2, 3)) == (3, 5, (), ())
actor = Actor.remote(1, 2, "c")
assert ray.get(actor.get_values.remote(2, 3, "d")) == (3, 5, ("c",), ("d",))
actor = Actor.remote(1, 2, "a", "b", "c", "d")
assert ray.get(actor.get_values.remote(2, 3, 1, 2, 3, 4)) == (
3,
5,
("a", "b", "c", "d"),
(1, 2, 3, 4),
)
@ray.remote
class Actor:
def __init__(self, *args):
self.args = args
def get_values(self, *args):
return self.args, args
a = Actor.remote()
assert ray.get(a.get_values.remote()) == ((), ())
a = Actor.remote(1)
assert ray.get(a.get_values.remote(2)) == ((1,), (2,))
a = Actor.remote(1, 2)
assert ray.get(a.get_values.remote(3, 4)) == ((1, 2), (3, 4))
def test_no_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self):
pass
def get_values(self):
pass
actor = Actor.remote()
assert ray.get(actor.get_values.remote()) is None
def test_no_constructor(ray_start_regular_shared):
@ray.remote
class Actor:
def get_values(self):
pass
actor = Actor.remote()
assert ray.get(actor.get_values.remote()) is None
def test_custom_classes(ray_start_regular_shared):
class Foo:
def __init__(self, x):
self.x = x
@ray.remote
class Actor:
def __init__(self, f2):
self.f1 = Foo(1)
self.f2 = f2
def get_values1(self):
return self.f1, self.f2
def get_values2(self, f3):
return self.f1, self.f2, f3
actor = Actor.remote(Foo(2))
results1 = ray.get(actor.get_values1.remote())
assert results1[0].x == 1
assert results1[1].x == 2
results2 = ray.get(actor.get_values2.remote(Foo(3)))
assert results2[0].x == 1
assert results2[1].x == 2
assert results2[2].x == 3
def test_actor_class_attributes(ray_start_regular_shared):
class Grandparent:
GRANDPARENT = 2
class Parent1(Grandparent):
PARENT1 = 6
class Parent2:
PARENT2 = 7
@ray.remote
class TestActor(Parent1, Parent2):
X = 3
@classmethod
def f(cls):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.X == 3
return 4
def g(self):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.f() == 4
return TestActor.X
t = TestActor.remote()
assert ray.get(t.g.remote()) == 3
def test_actor_static_attributes(ray_start_regular_shared):
class Grandparent:
GRANDPARENT = 2
@staticmethod
def grandparent_static():
assert Grandparent.GRANDPARENT == 2
return 1
class Parent1(Grandparent):
PARENT1 = 6
@staticmethod
def parent1_static():
assert Parent1.PARENT1 == 6
return 2
def parent1(self):
assert Parent1.PARENT1 == 6
class Parent2:
PARENT2 = 7
def parent2(self):
assert Parent2.PARENT2 == 7
@ray.remote
class TestActor(Parent1, Parent2):
X = 3
@staticmethod
def f():
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.X == 3
return 4
def g(self):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.f() == 4
return TestActor.X
t = TestActor.remote()
assert ray.get(t.g.remote()) == 3
def test_decorator_args(ray_start_regular_shared):
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote()
class Actor:
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote(invalid_kwarg=0) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote(num_cpus=0, invalid_kwarg=0) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_gpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1, num_gpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
def test_random_id_generation(ray_start_regular_shared):
@ray.remote
class Foo:
def __init__(self):
pass
# Make sure that seeding numpy does not interfere with the generation
# of actor IDs.
np.random.seed(1234)
random.seed(1234)
f1 = Foo.remote()
np.random.seed(1234)
random.seed(1234)
f2 = Foo.remote()
assert f1._actor_id != f2._actor_id
@pytest.mark.skipif(client_test_enabled(), reason="differing inheritence structure")
def test_actor_inheritance(ray_start_regular_shared):
class NonActorBase:
def __init__(self):
pass
# Test that an actor class can inherit from a non-actor class.
@ray.remote
class ActorBase(NonActorBase):
def __init__(self):
pass
# Test that you can't instantiate an actor class directly.
with pytest.raises(Exception, match="cannot be instantiated directly"):
ActorBase()
# Test that you can't inherit from an actor class.
with pytest.raises(
ActorClassInheritanceException,
match="Inheriting from actor classes is not " "currently supported.",
):
class Derived(ActorBase):
def __init__(self):
pass
def test_multiple_return_values(ray_start_regular_shared):
@ray.remote
class Foo:
def method0(self):
return 1
@ray.method(num_returns=1)
def method1(self):
return 1
@ray.method(num_returns=2)
def method2(self):
return 1, 2
@ray.method(num_returns=3)
def method3(self):
return 1, 2, 3
f = Foo.remote()
id0 = f.method0.remote()
assert ray.get(id0) == 1
id1 = f.method1.remote()
assert ray.get(id1) == 1
id2a, id2b = f.method2.remote()
assert ray.get([id2a, id2b]) == [1, 2]
id3a, id3b, id3c = f.method3.remote()
assert ray.get([id3a, id3b, id3c]) == [1, 2, 3]
def test_options_num_returns(ray_start_regular_shared):
@ray.remote
class Foo:
def method(self):
return 1, 2
f = Foo.remote()
obj = f.method.remote()
assert ray.get(obj) == (1, 2)
obj1, obj2 = f.method.options(num_returns=2).remote()
assert ray.get([obj1, obj2]) == [1, 2]
def test_options_name(ray_start_regular_shared):
@ray.remote
class Foo:
def method(self, name):
assert setproctitle.getproctitle() == f"ray::{name}"
f = Foo.remote()
ray.get(f.method.options(name="foo").remote("foo"))
ray.get(f.method.options(name="bar").remote("bar"))
def test_define_actor(ray_start_regular_shared):
@ray.remote
class Test:
def __init__(self, x):
self.x = x
def f(self, y):
return self.x + y
t = Test.remote(2)
assert ray.get(t.f.remote(1)) == 3
# Make sure that calling an actor method directly raises an exception.
with pytest.raises(Exception):
t.f(1)
def test_actor_deletion(ray_start_regular_shared):
# Make sure that when an actor handles goes out of scope, the actor
# destructor is called.
@ray.remote
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
a = None
wait_for_pid_to_exit(pid)
actors = [Actor.remote() for _ in range(10)]
pids = ray.get([a.getpid.remote() for a in actors])
a = None
actors = None
[wait_for_pid_to_exit(pid) for pid in pids]
def test_actor_method_deletion(ray_start_regular_shared):
@ray.remote
class Actor:
def method(self):
return 1
# Make sure that if we create an actor and call a method on it
# immediately, the actor doesn't get killed before the method is
# called.
assert ray.get(Actor.remote().method.remote()) == 1
def test_distributed_actor_handle_deletion(ray_start_regular_shared):
@ray.remote
class Actor:
def method(self):
return 1
def getpid(self):
return os.getpid()
@ray.remote
def f(actor, signal):
ray.get(signal.wait.remote())
return ray.get(actor.method.remote())
SignalActor = create_remote_signal_actor(ray)
signal = SignalActor.remote()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
# Pass the handle to another task that cannot run yet.
x_id = f.remote(a, signal)
# Delete the original handle. The actor should not get killed yet.
del a
# Once the task finishes, the actor process should get killed.
ray.get(signal.send.remote())
assert ray.get(x_id) == 1
wait_for_pid_to_exit(pid)
def test_multiple_actors(ray_start_regular_shared):
@ray.remote
class Counter:
def __init__(self, value):
self.value = value
def increase(self):
self.value += 1
return self.value
def reset(self):
self.value = 0
num_actors = 5
num_increases = 50
# Create multiple actors.
actors = [Counter.remote(i) for i in range(num_actors)]
results = []
# Call each actor's method a bunch of times.
for i in range(num_actors):
results += [actors[i].increase.remote() for _ in range(num_increases)]
result_values = ray.get(results)
for i in range(num_actors):
v = result_values[(num_increases * i) : (num_increases * (i + 1))]
assert v == list(range(i + 1, num_increases + i + 1))
# Reset the actor values.
[actor.reset.remote() for actor in actors]
# Interweave the method calls on the different actors.
results = []
for j in range(num_increases):
results += [actor.increase.remote() for actor in actors]
result_values = ray.get(results)
for j in range(num_increases):
v = result_values[(num_actors * j) : (num_actors * (j + 1))]
assert v == num_actors * [j + 1]
def test_inherit_actor_from_class(ray_start_regular_shared):
# Make sure we can define an actor by inheriting from a regular class.
# Note that actors cannot inherit from other actors.
class Foo:
def __init__(self, x):
self.x = x
def f(self):
return self.x
def g(self, y):
return self.x + y
@ray.remote
class Actor(Foo):
def __init__(self, x):
Foo.__init__(self, x)
def get_value(self):
return self.f()
actor = Actor.remote(1)
assert ray.get(actor.get_value.remote()) == 1
assert ray.get(actor.g.remote(5)) == 6
def test_get_non_existing_named_actor(ray_start_regular_shared):
with pytest.raises(ValueError):
_ = ray.get_actor("non_existing_actor")
# https://github.com/ray-project/ray/issues/17843
def test_actor_namespace(ray_start_regular_shared):
@ray.remote
class Actor:
def f(self):
return "ok"
a = Actor.options(name="foo", namespace="f1").remote()
with pytest.raises(ValueError):
ray.get_actor(name="foo", namespace="f2")
a1 = ray.get_actor(name="foo", namespace="f1")
assert ray.get(a1.f.remote()) == "ok"
del a
def test_named_actor_cache(ray_start_regular_shared):
"""Verify that named actor cache works well."""
@ray.remote(max_restarts=-1)
class Counter:
def __init__(self):
self.count = 0
def inc_and_get(self):
self.count += 1
return self.count
a = Counter.options(name="hi").remote()
first_get = ray.get_actor("hi")
assert ray.get(first_get.inc_and_get.remote()) == 1
second_get = ray.get_actor("hi")
assert ray.get(second_get.inc_and_get.remote()) == 2
ray.kill(a, no_restart=True)
def actor_removed():
try:
ray.get_actor("hi")
return False
except ValueError:
return True
wait_for_condition(actor_removed)
get_after_restart = Counter.options(name="hi").remote()
assert ray.get(get_after_restart.inc_and_get.remote()) == 1
get_by_name = ray.get_actor("hi")
assert ray.get(get_by_name.inc_and_get.remote()) == 2
def test_named_actor_cache_via_another_actor(ray_start_regular_shared):
"""Verify that named actor cache works well with another actor."""
@ray.remote(max_restarts=0)
class Counter:
def __init__(self):
self.count = 0
def inc_and_get(self):
self.count += 1
return self.count
# The third actor to get named actor. To indicates this cache doesn't
# break getting from the third party.
@ray.remote(max_restarts=0)
class ActorGetter:
def get_actor_count(self, name):
actor = ray.get_actor(name)
return ray.get(actor.inc_and_get.remote())
# Start a actor and get it by name in driver.
a = Counter.options(name="foo").remote()
first_get = ray.get_actor("foo")
assert ray.get(first_get.inc_and_get.remote()) == 1
# Start another actor as the third actor to get named actor.
actor_getter = ActorGetter.remote()
assert ray.get(actor_getter.get_actor_count.remote("foo")) == 2
ray.kill(a, no_restart=True)
def actor_removed():
try:
ray.get_actor("foo")
return False
except ValueError:
return True
wait_for_condition(actor_removed)
# Restart the named actor.
get_after_restart = Counter.options(name="foo").remote()
assert ray.get(get_after_restart.inc_and_get.remote()) == 1
# Get the named actor from the third actor again.
assert ray.get(actor_getter.get_actor_count.remote("foo")) == 2
# Get the named actor by name in driver again.
get_by_name = ray.get_actor("foo")
assert ray.get(get_by_name.inc_and_get.remote()) == 3
def test_wrapped_actor_handle(ray_start_regular_shared):
@ray.remote
class B:
def doit(self):
return 2
@ray.remote
class A:
def __init__(self):
self.b = B.remote()
def get_actor_ref(self):
return [self.b]
a = A.remote()
b_list = ray.get(a.get_actor_ref.remote())
assert ray.get(b_list[0].doit.remote()) == 2
@pytest.mark.skip("This test is just used to print the latency of creating 100 actors.")
def test_actor_creation_latency(ray_start_regular_shared):
# This test is just used to test the latency of actor creation.
@ray.remote
class Actor:
def get_value(self):
return 1
start = datetime.datetime.now()
actor_handles = [Actor.remote() for _ in range(100)]
actor_create_time = datetime.datetime.now()
for actor_handle in actor_handles:
ray.get(actor_handle.get_value.remote())
end = datetime.datetime.now()
print(
"actor_create_time_consume = {}, total_time_consume = {}".format(
actor_create_time - start, end - start
)
)
@pytest.mark.parametrize(
"exit_condition",
[
# "out_of_scope", TODO(edoakes): enable this once fixed.
"__ray_terminate__",
"ray.actor.exit_actor",
"ray.kill",
],
)
def test_atexit_handler(ray_start_regular_shared, exit_condition):
@ray.remote
class A:
def __init__(self, tmpfile, data):
import atexit
def f(*args, **kwargs):
with open(tmpfile, "w") as f:
f.write(data)
f.flush()
atexit.register(f)
def ready(self):
pass
def exit(self):
ray.actor.exit_actor()
data = "hello"
tmpfile = tempfile.NamedTemporaryFile("w+", suffix=".tmp", delete=False)
tmpfile.close()
a = A.remote(tmpfile.name, data)
ray.get(a.ready.remote())
if exit_condition == "out_of_scope":
del a
elif exit_condition == "__ray_terminate__":
ray.wait([a.__ray_terminate__.remote()])
elif exit_condition == "ray.actor.exit_actor":
ray.wait([a.exit.remote()])
elif exit_condition == "ray.kill":
ray.kill(a)
else:
assert False, "Unrecognized condition"
def check_file_written():
with open(tmpfile.name, "r") as f:
if f.read() == data:
return True
return False
# ray.kill() should not trigger atexit handlers, all other methods should.
if exit_condition == "ray.kill":
assert not check_file_written()
else:
wait_for_condition(check_file_written)
os.unlink(tmpfile.name)
def test_return_actor_handle_from_actor(ray_start_regular_shared):
@ray.remote
class Inner:
def ping(self):
return "pong"
@ray.remote
class Outer:
def __init__(self):
self.inner = Inner.remote()
def get_ref(self):
return self.inner
outer = Outer.remote()
inner = ray.get(outer.get_ref.remote())
assert ray.get(inner.ping.remote()) == "pong"
def test_actor_autocomplete(ray_start_regular_shared):
"""
Test that autocomplete works with actors by checking that the builtin dir()
function works as expected.
"""
@ray.remote
class Foo:
def method_one(self) -> None:
pass
class_calls = [fn for fn in dir(Foo) if not fn.startswith("_")]
assert set(class_calls) == {"method_one", "options", "remote"}
f = Foo.remote()
methods = [fn for fn in dir(f) if not fn.startswith("_")]
assert methods == ["method_one"]
all_methods = set(dir(f))
assert all_methods == {"__init__", "method_one", "__ray_terminate__"}
method_options = [fn for fn in dir(f.method_one) if not fn.startswith("_")]
assert set(method_options) == {"options", "remote"}
def test_actor_mro(ray_start_regular_shared):
@ray.remote
class Foo:
def __init__(self, x):
self.x = x
@classmethod
def factory_f(cls, x):
return cls(x)
def get_x(self):
return self.x
obj = Foo.factory_f(1)
assert obj.get_x() == 1
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_0_12985 | #!/usr/bin/env python
# ---------------------------------------------------------------------------------------
# configure.py: Athena++ configuration script in python. Original version by CJW.
#
# When configure.py is run, it uses the command line options and default settings to
# create custom versions of the files Makefile and src/defs.hpp from the template files
# Makefile.in and src/defs.hpp.in respectively.
#
# The following options are implememted:
# -h --help help message
# --prob=name use src/pgen/name.cpp as the problem generator
# --coord=xxx use xxx as the coordinate system
# --eos=xxx use xxx as the equation of state
# --flux=xxx use xxx as the Riemann solver
# --nghost=xxx set NGHOST=xxx
# --nscalars=xxx set NSCALARS=xxx
# -eos_table enable EOS table
# -b enable magnetic fields
# -s enable special relativity
# -g enable general relativity
# -t enable interface frame transformations for GR
# -debug enable debug flags (-g -O0); override other compiler options
# -coverage enable compiler-dependent code coverage flags
# -float enable single precision (default is double)
# -mpi enable parallelization with MPI
# -omp enable parallelization with OpenMP
# -hdf5 enable HDF5 output (requires the HDF5 library)
# --hdf5_path=path path to HDF5 libraries (requires the HDF5 library)
# -fft enable FFT (requires the FFTW library)
# --fftw_path=path path to FFTW libraries (requires the FFTW library)
# --grav=xxx use xxx as the self-gravity solver
# --cxx=xxx use xxx as the C++ compiler (works w/ or w/o -mpi)
# --ccmd=name use name as the command to call the (non-MPI) C++ compiler
# --mpiccmd=name use name as the command to call the MPI C++ compiler
# --gcovcmd=name use name as the command to call the gcov utility
# --cflag=string append string whenever invoking compiler/linker
# --include=path use -Ipath when compiling
# --lib_path=path use -Lpath when linking
# --lib=xxx use -lxxx when linking
# ----------------------------------------------------------------------------------------
# Modules
import argparse
import glob
import re
# Set template and output filenames
makefile_input = 'Makefile.in'
makefile_output = 'Makefile'
defsfile_input = 'src/defs.hpp.in'
defsfile_output = 'src/defs.hpp'
# --- Step 1. Prepare parser, add each of the arguments ------------------
athena_description = (
"Prepare custom Makefile and defs.hpp for compiling Athena++ solver"
)
athena_epilog = (
"Full documentation of options available at "
"https://github.com/PrincetonUniversity/athena-public-version/wiki/Configuring"
)
parser = argparse.ArgumentParser(description=athena_description, epilog=athena_epilog)
# --prob=[name] argument
pgen_directory = 'src/pgen/'
# set pgen_choices to list of .cpp files in src/pgen/
pgen_choices = glob.glob(pgen_directory + '*.cpp')
# remove 'src/pgen/' prefix and '.cpp' extension from each filename
pgen_choices = [choice[len(pgen_directory):-4] for choice in pgen_choices]
parser.add_argument('--prob',
default='shock_tube',
choices=pgen_choices,
help='select problem generator')
# --coord=[name] argument
parser.add_argument(
'--coord',
default='cartesian',
choices=[
'cartesian',
'cylindrical',
'spherical_polar',
'minkowski',
'sinusoidal',
'tilted',
'schwarzschild',
'kerr-schild',
'gr_user'],
help='select coordinate system')
# --eos=[name] argument
parser.add_argument('--eos',
default='adiabatic',
choices=['adiabatic', 'isothermal', 'general/eos_table',
'general/hydrogen', 'general/ideal'],
help='select equation of state')
# --flux=[name] argument
parser.add_argument('--flux',
default='default',
choices=['default', 'hlle', 'hllc', 'lhllc', 'hlld', 'lhlld', 'roe', 'llf'], # noqa
help='select Riemann solver')
# --nghost=[value] argument
parser.add_argument('--nghost',
default='2',
help='set number of ghost zones')
# --nscalars=[value] argument
parser.add_argument('--nscalars',
default='0',
help='set number of passive scalars')
# -b argument
parser.add_argument('-b',
action='store_true',
default=False,
help='enable magnetic field')
# -sts argument
parser.add_argument('-sts',
action='store_true',
default=False,
help='enable super-time-stepping')
# -s argument
parser.add_argument('-s',
action='store_true',
default=False,
help='enable special relativity')
# -g argument
parser.add_argument('-g',
action='store_true',
default=False,
help='enable general relativity')
# -t argument
parser.add_argument('-t',
action='store_true',
default=False,
help='enable interface frame transformations for GR')
# -debug argument
parser.add_argument('-debug',
action='store_true',
default=False,
help='enable debug flags; override other compiler options')
# -coverage argument
parser.add_argument('-coverage',
action='store_true',
default=False,
help='enable compiler-dependent code coverage flag')
# -float argument
parser.add_argument('-float',
action='store_true',
default=False,
help='enable single precision')
# -mpi argument
parser.add_argument('-mpi',
action='store_true',
default=False,
help='enable parallelization with MPI')
# -omp argument
parser.add_argument('-omp',
action='store_true',
default=False,
help='enable parallelization with OpenMP')
# --grav=[name] argument
parser.add_argument('--grav',
default='none',
choices=['none', 'fft'],
help='select self-gravity solver')
# -fft argument
parser.add_argument('-fft',
action='store_true',
default=False,
help='enable FFT')
# --fftw_path argument
parser.add_argument('--fftw_path',
default='',
help='path to FFTW libraries')
# -hdf5 argument
parser.add_argument('-hdf5',
action='store_true',
default=False,
help='enable HDF5 Output')
# -h5double argument
parser.add_argument('-h5double',
action='store_true',
default=False,
help='enable double precision HDF5 output')
# --hdf5_path argument
parser.add_argument('--hdf5_path',
default='',
help='path to HDF5 libraries')
# The main choices for --cxx flag, using "ctype[-suffix]" formatting, where "ctype" is the
# major family/suite/group of compilers and "suffix" may represent variants of the
# compiler version and/or predefined sets of compiler options. The C++ compiler front ends
# are the main supported/documented options and are invoked on the command line, but the C
# front ends are also acceptable selections and are mapped to the matching C++ front end:
# gcc -> g++, clang -> clang++, icc-> icpc
cxx_choices = [
'g++',
'g++-simd',
'icpx',
'icpc',
'icpc-debug',
'icpc-phi',
'cray',
'bgxlc++',
'clang++',
'clang++-simd',
'clang++-apple',
]
def c_to_cpp(arg):
arg = arg.replace('gcc', 'g++', 1)
arg = arg.replace('icc', 'icpc', 1)
arg = arg.replace('icx', 'icpx', 1)
if arg == 'bgxl' or arg == 'bgxlc':
arg = 'bgxlc++'
if arg == 'clang':
arg = 'clang++'
else:
arg = arg.replace('clang-', 'clang++-', 1)
return arg
# --cxx=[name] argument
parser.add_argument(
'--cxx',
default='g++',
type=c_to_cpp,
choices=cxx_choices,
help='select C++ compiler and default set of flags (works w/ or w/o -mpi)')
# --ccmd=[name] argument
parser.add_argument('--ccmd',
default=None,
help='override for command to use to call (non-MPI) C++ compiler')
# --mpiccmd=[name] argument
parser.add_argument('--mpiccmd',
default=None,
help='override for command to use to call MPI C++ compiler')
# --gcovcmd=[name] argument
parser.add_argument('--gcovcmd',
default=None,
help='override for command to use to call Gcov utility in Makefile')
# --cflag=[string] argument
parser.add_argument('--cflag',
default=None,
help='additional string of flags to append to compiler/linker calls')
# --include=[name] arguments
parser.add_argument(
'--include',
default=[],
action='append',
help=('extra path for included header files (-I<path>); can be specified multiple '
'times'))
# --lib_path=[name] arguments
parser.add_argument(
'--lib_path',
default=[],
action='append',
help=('extra path for linked library files (-L<path>); can be specified multiple '
'times'))
# --lib=[name] arguments
parser.add_argument(
'--lib',
default=[],
action='append',
help='name of library to link against (-l<lib>); can be specified multiple times')
# Parse command-line inputs
args = vars(parser.parse_args())
# --- Step 2. Test for incompatible arguments ----------------------------
# Set default flux; HLLD for MHD, HLLC for hydro, HLLE for isothermal hydro or any GR
if args['flux'] == 'default':
if args['g']:
args['flux'] = 'hlle'
elif args['b']:
args['flux'] = 'hlld'
elif args['eos'] == 'isothermal':
args['flux'] = 'hlle'
else:
args['flux'] = 'hllc'
# Check Riemann solver compatibility
if args['flux'] == 'hllc' and args['eos'] == 'isothermal':
raise SystemExit('### CONFIGURE ERROR: HLLC flux cannot be used with isothermal EOS')
if args['flux'] == 'hllc' and args['b']:
raise SystemExit('### CONFIGURE ERROR: HLLC flux cannot be used with MHD')
if args['flux'] == 'lhllc' and args['eos'] == 'isothermal':
raise SystemExit('### CONFIGURE ERROR: LHLLC flux cannot be used with isothermal EOS') # noqa
if args['flux'] == 'lhllc' and args['b']:
raise SystemExit('### CONFIGURE ERROR: LHLLC flux cannot be used with MHD')
if args['flux'] == 'hlld' and not args['b']:
raise SystemExit('### CONFIGURE ERROR: HLLD flux can only be used with MHD')
if args['flux'] == 'lhlld' and args['eos'] == 'isothermal':
raise SystemExit('### CONFIGURE ERROR: LHLLD flux cannot be used with isothermal EOS') # noqa
if args['flux'] == 'lhlld' and not args['b']:
raise SystemExit('### CONFIGURE ERROR: LHLLD flux can only be used with MHD')
# Check relativity
if args['s'] and args['g']:
raise SystemExit('### CONFIGURE ERROR: '
+ 'GR implies SR; the -s option is restricted to pure SR')
if args['t'] and not args['g']:
raise SystemExit('### CONFIGURE ERROR: Frame transformations only apply to GR')
if args['g'] and not args['t'] and args['flux'] not in ('llf', 'hlle'):
raise SystemExit('### CONFIGURE ERROR: Frame transformations required for {0}'
.format(args['flux']))
if args['g'] and args['coord'] in ('cartesian', 'cylindrical', 'spherical_polar'):
raise SystemExit('### CONFIGURE ERROR: GR cannot be used with {0} coordinates'
.format(args['coord']))
if not args['g'] and args['coord'] not in ('cartesian', 'cylindrical', 'spherical_polar'):
raise SystemExit('### CONFIGURE ERROR: '
+ args['coord'] + ' coordinates only apply to GR')
if args['eos'] == 'isothermal':
if args['s'] or args['g']:
raise SystemExit('### CONFIGURE ERROR: '
+ 'Isothermal EOS is incompatible with relativity')
if args['eos'][:8] == 'general/':
if args['s'] or args['g']:
raise SystemExit('### CONFIGURE ERROR: '
+ 'General EOS is incompatible with relativity')
if args['flux'] not in ['hllc', 'hlld']:
raise SystemExit('### CONFIGURE ERROR: '
+ 'General EOS is incompatible with flux ' + args['flux'])
# --- Step 3. Set definitions and Makefile options based on above argument
# Prepare dictionaries of substitutions to be made
definitions = {}
makefile_options = {}
makefile_options['LOADER_FLAGS'] = ''
# --prob=[name] argument
definitions['PROBLEM'] = makefile_options['PROBLEM_FILE'] = args['prob']
# --coord=[name] argument
definitions['COORDINATE_SYSTEM'] = makefile_options['COORDINATES_FILE'] = args['coord']
# --eos=[name] argument
definitions['NON_BAROTROPIC_EOS'] = '0' if args['eos'] == 'isothermal' else '1'
makefile_options['EOS_FILE'] = args['eos']
definitions['EQUATION_OF_STATE'] = args['eos']
# set number of hydro variables for adiabatic/isothermal
definitions['GENERAL_EOS'] = '0'
makefile_options['GENERAL_EOS_FILE'] = 'noop'
definitions['EOS_TABLE_ENABLED'] = '0'
if args['eos'] == 'isothermal':
definitions['NHYDRO_VARIABLES'] = '4'
elif args['eos'] == 'adiabatic':
definitions['NHYDRO_VARIABLES'] = '5'
else:
definitions['GENERAL_EOS'] = '1'
makefile_options['GENERAL_EOS_FILE'] = 'general'
definitions['NHYDRO_VARIABLES'] = '5'
if args['eos'] == 'general/eos_table':
definitions['EOS_TABLE_ENABLED'] = '1'
# --flux=[name] argument
definitions['RSOLVER'] = makefile_options['RSOLVER_FILE'] = args['flux']
# --nghost=[value] argument
definitions['NUMBER_GHOST_CELLS'] = args['nghost']
# --nscalars=[value] argument
definitions['NUMBER_PASSIVE_SCALARS'] = args['nscalars']
# -b argument
# set variety of macros based on whether MHD/hydro or adi/iso are defined
if args['b']:
definitions['MAGNETIC_FIELDS_ENABLED'] = '1'
if definitions['GENERAL_EOS'] != '0':
makefile_options['GENERAL_EOS_FILE'] += '_mhd'
else:
makefile_options['EOS_FILE'] += '_mhd'
definitions['NFIELD_VARIABLES'] = '3'
makefile_options['RSOLVER_DIR'] = 'mhd/'
if args['flux'] == 'hlle' or args['flux'] == 'llf' or args['flux'] == 'roe':
makefile_options['RSOLVER_FILE'] += '_mhd'
if args['eos'] == 'isothermal':
definitions['NWAVE_VALUE'] = '6'
if args['flux'] == 'hlld':
makefile_options['RSOLVER_FILE'] += '_iso'
else:
definitions['NWAVE_VALUE'] = '7'
else:
definitions['MAGNETIC_FIELDS_ENABLED'] = '0'
if definitions['GENERAL_EOS'] != '0':
makefile_options['GENERAL_EOS_FILE'] += '_hydro'
else:
makefile_options['EOS_FILE'] += '_hydro'
definitions['NFIELD_VARIABLES'] = '0'
makefile_options['RSOLVER_DIR'] = 'hydro/'
if args['eos'] == 'isothermal':
definitions['NWAVE_VALUE'] = '4'
else:
definitions['NWAVE_VALUE'] = '5'
# -sts argument
if args['sts']:
definitions['STS_ENABLED'] = '1'
else:
definitions['STS_ENABLED'] = '0'
# -s, -g, and -t arguments
definitions['RELATIVISTIC_DYNAMICS'] = '1' if args['s'] or args['g'] else '0'
definitions['GENERAL_RELATIVITY'] = '1' if args['g'] else '0'
definitions['FRAME_TRANSFORMATIONS'] = '1' if args['t'] else '0'
if args['s']:
makefile_options['EOS_FILE'] += '_sr'
if definitions['GENERAL_EOS'] != '0':
makefile_options['GENERAL_EOS_FILE'] += '_sr'
makefile_options['RSOLVER_FILE'] += '_rel'
if args['g']:
makefile_options['EOS_FILE'] += '_gr'
if definitions['GENERAL_EOS'] != '0':
makefile_options['GENERAL_EOS_FILE'] += '_gr'
makefile_options['RSOLVER_FILE'] += '_rel'
if not args['t']:
makefile_options['RSOLVER_FILE'] += '_no_transform'
# --cxx=[name] argument
if args['cxx'] == 'g++':
# GCC is C++11 feature-complete since v4.8.1 (2013-05-31)
definitions['COMPILER_CHOICE'] = 'g++'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'g++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -std=c++11'
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'g++-simd':
# GCC version >= 4.9, for OpenMP 4.0; version >= 6.1 for OpenMP 4.5 support
definitions['COMPILER_CHOICE'] = 'g++-simd'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'g++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -fopenmp-simd -fwhole-program -flto -ffast-math '
'-march=native -fprefetch-loop-arrays'
# -march=skylake-avx512, skylake, core-avx2
# -mprefer-vector-width=128 # available in gcc-8, but not gcc-7
# -mtune=native, generic, broadwell
# -mprefer-avx128
# -m64 (default)
)
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'icpx':
# Next-gen LLVM-based Intel oneAPI DPC++/C++ Compiler
definitions['COMPILER_CHOICE'] = 'icpx'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'icpx'
makefile_options['PREPROCESSOR_FLAGS'] = ''
# ICX drivers icx and icpx will accept ICC Classic Compiler options or Clang*/LLVM
# Compiler options
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -ipo -xhost -qopenmp-simd '
)
# Currently unsupported, but "options to be supported" according to icpx
# -qnextgen-diag: '-inline-forceinline -qopt-prefetch=4 '
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'icpc':
# ICC is C++11 feature-complete since v15.0 (2014-08-26)
definitions['COMPILER_CHOICE'] = 'icpc'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'icpc'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -ipo -xhost -inline-forceinline -qopenmp-simd -qopt-prefetch=4 '
'-qoverride-limits' # -qopt-report-phase=ipo (does nothing without -ipo)
)
# -qopt-zmm-usage=high' # typically harms multi-core performance on Skylake Xeon
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'icpc-debug':
# Disable IPO, forced inlining, and fast math. Enable vectorization reporting.
# Useful for testing symmetry, SIMD-enabled functions and loops with OpenMP 4.5
definitions['COMPILER_CHOICE'] = 'icpc'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'icpc'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -xhost -qopenmp-simd -fp-model precise -qopt-prefetch=4 '
'-qopt-report=5 -qopt-report-phase=openmp,vec -g -qoverride-limits'
)
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'icpc-phi':
# Cross-compile for Intel Xeon Phi x200 KNL series (unique AVX-512ER and AVX-512FP)
# -xMIC-AVX512: generate AVX-512F, AVX-512CD, AVX-512ER and AVX-512FP
definitions['COMPILER_CHOICE'] = 'icpc'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'icpc'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -ipo -xMIC-AVX512 -inline-forceinline -qopenmp-simd '
'-qopt-prefetch=4 -qoverride-limits'
)
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'cray':
# Cray Compiling Environment 8.4 (2015-09-24) introduces C++11 feature completeness
# (except "alignas"). v8.6 is C++14 feature-complete
definitions['COMPILER_CHOICE'] = 'cray'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'CC'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -h std=c++11 -h aggress -h vector3 -hfp3'
makefile_options['LINKER_FLAGS'] = '-hwp -hpl=obj/lib'
makefile_options['LIBRARY_FLAGS'] = '-lm'
if args['cxx'] == 'bgxlc++':
# IBM XL C/C++ for BG/Q is NOT C++11 feature-complete as of v12.1.0.15 (2017-12-22)
# suppressed messages:
# 1500-036: The NOSTRICT option has the potential to alter the program's semantics
# 1540-1401: An unknown "pragma simd" is specified
# 1586-083: ld option ignored by IPA
# 1586-233: Duplicate definition of symbol ignored
# 1586-267: Inlining of specified subprogram failed due to the presence of a C++
# exception handler
definitions['COMPILER_CHOICE'] = 'bgxlc++'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'bgxlc++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -qhot=level=1:vector -qinline=level=5:auto -qipa=level=1:noobject'
' -qstrict=subnormals -qmaxmem=150000 -qlanglvl=extended0x -qsuppress=1500-036'
' -qsuppress=1540-1401 -qsuppress=1586-083 -qsuppress=1586-233'
' -qsuppress=1586-267'
)
makefile_options['LINKER_FLAGS'] = makefile_options['COMPILER_FLAGS']
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'clang++':
# Clang is C++11 feature-complete since v3.3 (2013-06-17)
definitions['COMPILER_CHOICE'] = 'clang++'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'clang++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -std=c++11'
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'clang++-simd':
# LLVM/Clang version >= 3.9 for most of OpenMP 4.0 and 4.5 (still incomplete; no
# offloading, target/declare simd directives). OpenMP 3.1 fully supported in LLVM 3.7
definitions['COMPILER_CHOICE'] = 'clang++-simd'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'clang++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -std=c++11 -fopenmp-simd'
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'clang++-apple':
# Apple LLVM/Clang: forked version of the open-source LLVM project bundled in macOS
definitions['COMPILER_CHOICE'] = 'clang++-apple'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'clang++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -std=c++11'
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
# -float argument
if args['float']:
definitions['SINGLE_PRECISION_ENABLED'] = '1'
else:
definitions['SINGLE_PRECISION_ENABLED'] = '0'
# -debug argument
if args['debug']:
definitions['DEBUG_OPTION'] = 'DEBUG'
# Completely replace the --cxx= sets of default compiler flags, disable optimization,
# and emit debug symbols in the compiled binaries
if (args['cxx'] == 'g++' or args['cxx'] == 'g++-simd'
or args['cxx'] == 'icpx'
or args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug'
or args['cxx'] == 'clang++' or args['cxx'] == 'clang++-simd'
or args['cxx'] == 'clang++-apple'):
makefile_options['COMPILER_FLAGS'] = '-O0 --std=c++11 -g' # -Og
if args['cxx'] == 'cray':
makefile_options['COMPILER_FLAGS'] = '-O0 -h std=c++11'
if args['cxx'] == 'bgxlc++':
makefile_options['COMPILER_FLAGS'] = '-O0 -g -qlanglvl=extended0x'
if args['cxx'] == 'icpc-phi':
makefile_options['COMPILER_FLAGS'] = '-O0 --std=c++11 -g -xMIC-AVX512'
else:
definitions['DEBUG_OPTION'] = 'NOT_DEBUG'
# -coverage argument
if args['coverage']:
definitions['EXCEPTION_HANDLING_OPTION'] = 'DISABLE_EXCEPTIONS'
# For now, append new compiler flags and don't override --cxx set, but set code to be
# unoptimized (-O0 instead of -O3) to get useful statement annotations. Should we add
# '-g -fopenmp-simd', by default? Don't combine lines when writing source code!
if (args['cxx'] == 'g++' or args['cxx'] == 'g++-simd'):
makefile_options['COMPILER_FLAGS'] += (
' -O0 -fprofile-arcs -ftest-coverage'
' -fno-inline -fno-exceptions -fno-elide-constructors'
)
if (args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug'
or args['cxx'] == 'icpx'
or args['cxx'] == 'icpc-phi'):
makefile_options['COMPILER_FLAGS'] += ' -O0 -prof-gen=srcpos'
if (args['cxx'] == 'clang++' or args['cxx'] == 'clang++-simd'
or args['cxx'] == 'clang++-apple'):
# Clang's "source-based" code coverage feature to produces .profraw output
makefile_options['COMPILER_FLAGS'] += (
' -O0 -fprofile-instr-generate -fcoverage-mapping'
) # use --coverage to produce GCC-compatible .gcno, .gcda output for gcov
if (args['cxx'] == 'cray' or args['cxx'] == 'bgxlc++'):
raise SystemExit(
'### CONFIGURE ERROR: No code coverage avaialbe for selected compiler!')
else:
# Enable C++ try/throw/catch exception handling, by default. Disable only when testing
# code coverage, since it causes Gcov and other tools to report misleadingly low
# branch coverage statstics due to untested throwing of exceptions from function calls
definitions['EXCEPTION_HANDLING_OPTION'] = 'ENABLE_EXCEPTIONS'
# --ccmd=[name] argument
if args['ccmd'] is not None:
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = args['ccmd']
# --gcovcmd=[name] argument (only modifies Makefile target)
if args['gcovcmd'] is not None:
makefile_options['GCOV_COMMAND'] = args['gcovcmd']
else:
makefile_options['GCOV_COMMAND'] = 'gcov'
# -mpi argument
if args['mpi']:
definitions['MPI_OPTION'] = 'MPI_PARALLEL'
if (args['cxx'] == 'g++' or args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug'
or args['cxx'] == 'icpx'
or args['cxx'] == 'icpc-phi' or args['cxx'] == 'g++-simd'
or args['cxx'] == 'clang++' or args['cxx'] == 'clang++-simd'
or args['cxx'] == 'clang++-apple'):
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'mpicxx'
if args['cxx'] == 'cray':
makefile_options['COMPILER_FLAGS'] += ' -h mpi1'
if args['cxx'] == 'bgxlc++':
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'mpixlcxx' # noqa
# --mpiccmd=[name] argument
if args['mpiccmd'] is not None:
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = args['mpiccmd'] # noqa
else:
definitions['MPI_OPTION'] = 'NOT_MPI_PARALLEL'
# -omp argument
if args['omp']:
definitions['OPENMP_OPTION'] = 'OPENMP_PARALLEL'
if (args['cxx'] == 'g++' or args['cxx'] == 'g++-simd' or args['cxx'] == 'clang++'
or args['cxx'] == 'clang++-simd'):
makefile_options['COMPILER_FLAGS'] += ' -fopenmp'
if (args['cxx'] == 'clang++-apple'):
# Apple Clang disables the front end OpenMP driver interface; enable it via the
# preprocessor. Must install LLVM's OpenMP runtime library libomp beforehand
makefile_options['COMPILER_FLAGS'] += ' -Xpreprocessor -fopenmp'
makefile_options['LIBRARY_FLAGS'] += ' -lomp'
if (args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug' or args['cxx'] == 'icpc-phi'
or args['cxx'] == 'icpx'):
makefile_options['COMPILER_FLAGS'] += ' -qopenmp'
if args['cxx'] == 'cray':
makefile_options['COMPILER_FLAGS'] += ' -homp'
if args['cxx'] == 'bgxlc++':
# use thread-safe version of compiler
definitions['COMPILER_COMMAND'] += '_r'
makefile_options['COMPILER_COMMAND'] += '_r'
makefile_options['COMPILER_FLAGS'] += ' -qsmp'
else:
definitions['OPENMP_OPTION'] = 'NOT_OPENMP_PARALLEL'
if args['cxx'] == 'cray':
makefile_options['COMPILER_FLAGS'] += ' -hnoomp'
if (args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug' or args['cxx'] == 'icpc-phi'
or args['cxx'] == 'icpx'):
# suppressed messages:
# 3180: pragma omp not recognized
makefile_options['COMPILER_FLAGS'] += ' -diag-disable 3180'
# --grav argument
if args['grav'] == "none":
definitions['SELF_GRAVITY_ENABLED'] = '0'
else:
if args['grav'] == "fft":
definitions['SELF_GRAVITY_ENABLED'] = '1'
if not args['fft']:
raise SystemExit(
'### CONFIGURE ERROR: FFT Poisson solver only be used with FFT')
# -fft argument
makefile_options['MPIFFT_FILE'] = ' '
definitions['FFT_OPTION'] = 'NO_FFT'
if args['fft']:
definitions['FFT_OPTION'] = 'FFT'
if args['fftw_path'] != '':
makefile_options['PREPROCESSOR_FLAGS'] += ' -I{0}/include'.format(
args['fftw_path'])
makefile_options['LINKER_FLAGS'] += ' -L{0}/lib'.format(args['fftw_path'])
if args['omp']:
makefile_options['LIBRARY_FLAGS'] += ' -lfftw3_omp'
if args['mpi']:
makefile_options['MPIFFT_FILE'] = ' $(wildcard src/fft/plimpton/*.cpp)'
makefile_options['LIBRARY_FLAGS'] += ' -lfftw3'
# -hdf5 argument
if args['hdf5']:
definitions['HDF5_OPTION'] = 'HDF5OUTPUT'
if args['hdf5_path'] != '':
makefile_options['PREPROCESSOR_FLAGS'] += ' -I{0}/include'.format(
args['hdf5_path'])
makefile_options['LINKER_FLAGS'] += ' -L{0}/lib'.format(args['hdf5_path'])
if (args['cxx'] == 'g++' or args['cxx'] == 'g++-simd'
or args['cxx'] == 'cray' or args['cxx'] == 'icpc'
or args['cxx'] == 'icpx'
or args['cxx'] == 'icpc-debug' or args['cxx'] == 'icpc-phi'
or args['cxx'] == 'clang++' or args['cxx'] == 'clang++-simd'
or args['cxx'] == 'clang++-apple'):
makefile_options['LIBRARY_FLAGS'] += ' -lhdf5'
if args['cxx'] == 'bgxlc++':
makefile_options['PREPROCESSOR_FLAGS'] += (
' -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_BSD_SOURCE'
' -I/soft/libraries/hdf5/1.10.0/cnk-xl/current/include'
' -I/bgsys/drivers/ppcfloor/comm/include')
makefile_options['LINKER_FLAGS'] += (
' -L/soft/libraries/hdf5/1.10.0/cnk-xl/current/lib'
' -L/soft/libraries/alcf/current/xl/ZLIB/lib')
makefile_options['LIBRARY_FLAGS'] += ' -lhdf5 -lz -lm'
else:
definitions['HDF5_OPTION'] = 'NO_HDF5OUTPUT'
# -h5double argument (does nothing if no -hdf5)
if args['h5double']:
definitions['H5_DOUBLE_PRECISION_ENABLED'] = '1'
else:
definitions['H5_DOUBLE_PRECISION_ENABLED'] = '0'
# --cflag=[string] argument
if args['cflag'] is not None:
makefile_options['COMPILER_FLAGS'] += ' '+args['cflag']
# --include=[name] arguments
for include_path in args['include']:
makefile_options['COMPILER_FLAGS'] += ' -I'+include_path
# --lib_path=[name] arguments
for library_path in args['lib_path']:
makefile_options['LINKER_FLAGS'] += ' -L'+library_path
# --lib=[name] arguments
for library_name in args['lib']:
makefile_options['LIBRARY_FLAGS'] += ' -l'+library_name
# Assemble all flags of any sort given to compiler
definitions['COMPILER_FLAGS'] = ' '.join(
[makefile_options[opt+'_FLAGS'] for opt in
['PREPROCESSOR', 'COMPILER', 'LINKER', 'LIBRARY']])
# --- Step 4. Create new files, finish up --------------------------------
# Terminate all filenames with .cpp extension
makefile_options['PROBLEM_FILE'] += '.cpp'
makefile_options['COORDINATES_FILE'] += '.cpp'
makefile_options['EOS_FILE'] += '.cpp'
makefile_options['GENERAL_EOS_FILE'] += '.cpp'
makefile_options['RSOLVER_FILE'] += '.cpp'
# Read templates
with open(defsfile_input, 'r') as current_file:
defsfile_template = current_file.read()
with open(makefile_input, 'r') as current_file:
makefile_template = current_file.read()
# Make substitutions
for key, val in definitions.items():
defsfile_template = re.sub(r'@{0}@'.format(key), val, defsfile_template)
for key, val in makefile_options.items():
makefile_template = re.sub(r'@{0}@'.format(key), val, makefile_template)
# Write output files
with open(defsfile_output, 'w') as current_file:
current_file.write(defsfile_template)
with open(makefile_output, 'w') as current_file:
current_file.write(makefile_template)
# Finish with diagnostic output
# To match show_config.cpp output: use 2 space indent for option, value string starts on
# column 30
self_grav_string = 'OFF'
if args['grav'] == 'fft':
self_grav_string = 'FFT'
print('Your Athena++ distribution has now been configured with the following options:')
print(' Problem generator: ' + args['prob'])
print(' Coordinate system: ' + args['coord'])
print(' Equation of state: ' + args['eos'])
print(' Riemann solver: ' + args['flux'])
print(' Magnetic fields: ' + ('ON' if args['b'] else 'OFF'))
print(' Number of scalars: ' + args['nscalars'])
print(' Special relativity: ' + ('ON' if args['s'] else 'OFF'))
print(' General relativity: ' + ('ON' if args['g'] else 'OFF'))
print(' Frame transformations: ' + ('ON' if args['t'] else 'OFF'))
print(' Self-Gravity: ' + self_grav_string)
print(' Super-Time-Stepping: ' + ('ON' if args['sts'] else 'OFF'))
print(' Debug flags: ' + ('ON' if args['debug'] else 'OFF'))
print(' Code coverage flags: ' + ('ON' if args['coverage'] else 'OFF'))
print(' Linker flags: ' + makefile_options['LINKER_FLAGS'] + ' '
+ makefile_options['LIBRARY_FLAGS'])
print(' Floating-point precision: ' + ('single' if args['float'] else 'double'))
print(' Number of ghost cells: ' + args['nghost'])
print(' MPI parallelism: ' + ('ON' if args['mpi'] else 'OFF'))
print(' OpenMP parallelism: ' + ('ON' if args['omp'] else 'OFF'))
print(' FFT: ' + ('ON' if args['fft'] else 'OFF'))
print(' HDF5 output: ' + ('ON' if args['hdf5'] else 'OFF'))
if args['hdf5']:
print(' HDF5 precision: ' + ('double' if args['h5double'] else 'single'))
print(' Compiler: ' + args['cxx'])
print(' Compilation command: ' + makefile_options['COMPILER_COMMAND'] + ' '
+ makefile_options['PREPROCESSOR_FLAGS'] + ' ' + makefile_options['COMPILER_FLAGS'])
|
the-stack_0_12988 | import tensorflow as tf
from tensorflow.python.ops.array_ops import fake_quant_with_min_max_vars
a = tf.Variable([0.0, 0.1, 0.3, 0.49, 0.5, 0.8, 1.1, 1.23, 1.49, 1.5, 1.51, 2.0])
qa = fake_quant_with_min_max_vars(a, tf.reduce_min(a), tf.reduce_max(a), num_bits=3, narrow_range=False)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run(a))
print(sess.run(qa))
|
the-stack_0_12992 | from kapteyn import maputils
from matplotlib import pyplot as plt
from kapteyn import tabarray
import numpy
# Get a header and change some values
f = maputils.FITSimage("m101.fits")
header = f.hdr
header['CDELT1'] = 0.1
header['CDELT2'] = 0.1
header['CRVAL1'] = 285
header['CRVAL2'] = 20
# Use the changed header as external source for new object
f = maputils.FITSimage(externalheader=header)
fig = plt.figure()
frame = fig.add_subplot(1,1,1)
annim = f.Annotatedimage(frame)
grat = annim.Graticule()
fn = 'WDB/smallworld.txt'
# Note that in this file the latitudes are in the first column
# (column 0). And the longitudes in the second (column=1)
xp, yp = annim.positionsfromfile(fn, 's', cols=[1,0])
annim.Marker(x=xp, y=yp, mode='pixels', marker=',', color='b')
annim.plot()
frame.set_title("Markers in the Carribbean")
plt.show() |
the-stack_0_12993 | #Simple Linear Regression
#import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Read data
dataset = pd.read_csv('Salary_Data.csv')
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,1].values
#Splitting data
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(x,y, test_size=1/3, random_state=0)
#Feature Scaling
'''from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)'''
#Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,Y_train)
#Predict the test set results
y_pred = regressor.predict(X_test)
#Visualising the training set
plt.scatter(X_train,Y_train,color='red')
plt.plot(X_train, regressor.predict(X_train),color='blue')
plt.title('Salary vs Experience(Training set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
#Visualising the Test set
plt.scatter(X_test,Y_test,color='red')
plt.plot(X_train, regressor.predict(X_train),color='blue')
plt.title('Salary vs Experience(Test set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show() |
the-stack_0_12996 | #!/usr/bin/env python
"""Strictly for loading agents to inspect. Based on `main.py`."""
import datetime
import os
import time
import argparse
import cv2
import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from os.path import join
from ravens import Dataset, Environment, cameras, agents, tasks
from ravens import utils as U
# Of critical importance! See the top of main.py for details.
MAX_ORDER = 4
# See Task().
PIXEL_SIZE = 0.003125
CAMERA_CONFIG = cameras.RealSenseD415.CONFIG
BOUNDS = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]])
def goal_similarity(obs, goal):
"""For goal-conditioning, measure how close current image is to goal.
Metrics: L2 and SSIM for now. The `obs` and `goal` should be of the same
format as in rollout(), where they have color/depth keys, with 3 camera
viewpoints. However, `obs` will be a list and `goal a np.array. For the
pose metrics, use the task reward.
"""
# Requires pip install scikit-image
from skimage.metrics import structural_similarity
colormap_o, _ = get_heightmap(obs=obs)
colormap_g, _ = get_heightmap(obs=goal)
L2 = np.linalg.norm(colormap_o - colormap_g) / np.prod(colormap_o.shape)
SSIM = structural_similarity(colormap_o, colormap_g, multichannel=True)
metrics = {}
metrics['L2'] = round(L2, 4)
metrics['SSIM'] = round(SSIM, 4)
return metrics
def get_heightmap(obs):
"""Reconstruct orthographic heightmaps with segmentation masks.
Here, `obs` could be current or goal, either will work.
See transporter.py, regression.py, task.py, dummy.py, and dataset.py.
We use this pattern quite a lot. Copy from transporter.py version.
"""
heightmaps, colormaps = U.reconstruct_heightmaps(
obs['color'], obs['depth'], CAMERA_CONFIG, BOUNDS, PIXEL_SIZE)
colormaps = np.float32(colormaps)
heightmaps = np.float32(heightmaps)
# Fuse maps from different views.
valid = np.sum(colormaps, axis=3) > 0
repeat = np.sum(valid, axis=0)
repeat[repeat == 0] = 1
colormap = np.sum(colormaps, axis=0) / repeat[..., None]
colormap = np.uint8(np.round(colormap))
heightmap = np.max(heightmaps, axis=0)
return colormap, heightmap
def load(path, iepisode, field):
"""Adapted from `dataset.py` so we can sample goal images. Just including
some logic to extract the episode automatically based on the index
`iepisode`, so we don't need to know the length in advance.
"""
field_path = os.path.join(path, field)
data_list = [os.path.join(field_path, x) for x in os.listdir(field_path)]
fname = [x for x in data_list if f'{iepisode:06d}' in x]
assert len(fname) == 1, fname
fname = fname[0]
return pickle.load(open(fname, 'rb'))
def debug_time_step(t, epidx, obs, act, extras, goal=None):
"""Save images and other stuff from time `t` in episode `epidx`."""
pth = 'tmp'
tt = str(t).zfill(2)
# Convert from BGR to RGB to match what we see in the GUI.
def save(fname, c_img):
cv2.imwrite(fname, img=cv2.cvtColor(c_img, cv2.COLOR_BGR2RGB))
# Save current color images from camera angles and the fused version.
for img_idx, c_img in enumerate(obs['color']):
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_{img_idx}.png')
save(fname, c_img)
colormap_o, _ = get_heightmap(obs=obs)
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_fused.png')
save(fname, colormap_o)
# (If applicable) save the goal color images.
if (goal is not None) and t == 1:
for img_idx, c_img in enumerate(goal['color']):
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_{img_idx}_goal.png')
save(fname, c_img)
colormap_g, _ = get_heightmap(obs=goal)
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_fused_goal.png')
save(fname, colormap_g)
# Print the action.
pose0 = act['params']['pose0']
pose1 = act['params']['pose1']
print(f" pose0, pose1: {U.round_pose(pose0)}, {U.round_pose(pose1)}")
# Attention. (Well, attn_input.png is also input to Transport...)
fname1 = join(pth, f'ep_{epidx}_t{tt}_attn_input.png')
fname2 = join(pth, f'ep_{epidx}_t{tt}_attn_heat_bgr.png')
cv2.imwrite(fname1, extras['input_c'])
cv2.imwrite(fname2, extras['attn_heat_bgr'])
# Transport
for idx, tran_heat in enumerate(extras['tran_heat_bgr']):
idxstr = str(idx).zfill(2)
fname = join(pth, f'ep_{epidx}_t{tt}_tran_rot_{idxstr}.png')
if idx == extras['tran_rot_argmax']:
fname = fname.replace('.png', '_rot_chosen.png')
cv2.imwrite(fname, tran_heat)
def rollout(agent, env, task, goal_conditioned, args, num_finished, debug=False):
"""Standard gym environment rollout.
Adding more debugging options (enable with debug=True), such as printing
the pose and saving the images and heatmaps. We can also run `dataset.py`
and see goal images in the `goals_out` directory.
:goal_conditioned: a boolean to check if we have goal-conditioning.
:num_finished: to track how many episodes we have finished. Ignores any
episodes drawn and then discarded due to initial states that were
already done. Also used to sample the goal states for
goal-conditioned policies. We have a fixed number of testing episodes
(characterized by goal images), so `num_finished` is the identifier.
Returns `t` to track episode length. Update (21 Aug 2020): also returns
last_stuff=(obs,info), consistent with main.py and generate_goals.py.
(13 Oct 2020): fixing so that we will always append stuff in the episode
list for gt_state agents. The problem is that the first time step (start_t=1)
wasn't saving because len(obs) = 0, but in gt_state we actually want to save.
Otherwise, a length 1 episode will have len(episode)==0 later. It's not a huge
deal because we still save the final info correctly, so that we can report
correct stats, but it helps to have the initial info because that gives us the
deltas over the starting state.
"""
if debug:
if not os.path.exists('tmp/'):
os.makedirs('tmp/')
print('')
start_t = 0
if args.agent in ['gt_state', 'gt_state_2_step']:
start_t = 1
episode = []
total_reward = 0
# Before task.reset(), need goal info for goal episode at idx `num_finished`.
if goal_conditioned:
task.goal_cond_testing = True
path = os.path.join('goals', args.task)
goal = {}
goal['color'] = load(path, num_finished, 'last_color')
goal['depth'] = load(path, num_finished, 'last_depth')
goal['info'] = load(path, num_finished, 'last_info')
goal_imgs = goal if goal_conditioned else None
# Reset env and call task.reset(), len(obs)=0 but info will have stuff for gt_state.
if goal_conditioned:
obs = env.reset(task, last_info=goal['info'])
else:
obs = env.reset(task)
info = env.info
for t in range(start_t, task.max_steps):
if debug and t > 0:
act, extras = agent.act(obs, info, goal=goal_imgs, debug_imgs=True)
else:
act = agent.act(obs, info, goal=goal_imgs)
# Optional debugging to save images, etc. Do before we get new obs.
if debug and 'params' in act:
debug_time_step(t, num_finished, obs, act, extras, goal=goal_imgs)
# (13 Oct 2020) Ah, if gt_state, we won't save at start_t=1, so let's fix that!
if (len(obs) > 0 and act['primitive']) or (args.agent in ['gt_state', 'gt_state_2_step']):
episode.append((act, info)) # don't save obs
(obs, reward, done, info) = env.step(act)
# If goal-conditioning, additionally compute image-based metrics.
if goal_conditioned and ('color' in obs and 'depth' in obs):
info['image_metrics'] = goal_similarity(obs, goal_imgs)
else:
info['image_metrics'] = None
if debug:
print(' {}/{}, rew: {:0.3f}, len(epis): {}, act: {}, info: {}'.format(t,
task.max_steps, reward, len(episode), act['primitive'], info['extras']))
if goal_conditioned:
print(' goal-conditioning image metrics: {}'.format(info['image_metrics']))
total_reward += reward
last_obs_info = (obs, info)
if done:
break
return total_reward, episode, t, last_obs_info
def is_goal_conditioned(args):
"""
Be careful with checking this condition. See `generate_goals.py`. Here,
though, we check the task name and as an extra safety measure, check that
the agent is also named with 'goal'.
Update: all right, let's modify this to incorpoate gt_state w/out too much
extra work. :(
"""
goal_tasks = ['insertion-goal', 'cable-shape-notarget', 'cable-line-notarget',
'cloth-flat-notarget', 'bag-color-goal']
goal_task = (args.task in goal_tasks)
if goal_task:
assert 'goal' in args.agent or 'gt_state' in args.agent, \
'Agent should be a goal-based agent, or gt_state agent.'
return goal_task
def ignore_this_demo(args, reward, t, last_extras):
"""In some cases, we should filter out demonstrations.
Filter for if t == 0, which means the initial state was a success, and
also if we have exit_gracefully, which means for the bag-items tasks, it
may not have had visible item(s) at the start, for some reason.
"""
ignore = (t == 0)
if 'exit_gracefully' in last_extras:
assert last_extras['exit_gracefully']
return True
return ignore
if __name__ == '__main__':
# Parse command line arguments.
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='0')
parser.add_argument('--disp', action='store_true')
parser.add_argument('--task', default='hanoi')
parser.add_argument('--agent', default='transporter')
parser.add_argument('--num_demos', default=1000, type=int)
parser.add_argument('--train_run', default=0, type=int)
parser.add_argument('--num_test_eps', default=20, type=int)
parser.add_argument('--num_rots', default=24, type=int,
help='Transporter rotations used from the trained model, usually 24')
parser.add_argument('--num_rots_inf', default=24, type=int,
help='Transporter rotations we want FOR INFERENCE time; it can be 1')
parser.add_argument('--hz', default=240.0, type=float)
parser.add_argument('--crop_bef_q', default=0, type=int, help='CoRL paper used 1')
parser.add_argument('--gpu_mem_limit', default=None)
parser.add_argument('--subsamp_g', action='store_true')
args = parser.parse_args()
# Configure which GPU to use.
cfg = tf.config.experimental
gpus = cfg.list_physical_devices('GPU')
if len(gpus) == 0:
print('No GPUs detected. Running with CPU.')
else:
cfg.set_visible_devices(gpus[int(args.gpu)], 'GPU')
# Configure how much GPU to use.
if args.gpu_mem_limit is not None:
MEM_LIMIT = int(1024 * float(args.gpu_mem_limit))
print(args.gpu_mem_limit)
dev_cfg = [cfg.VirtualDeviceConfiguration(memory_limit=MEM_LIMIT)]
cfg.set_virtual_device_configuration(gpus[0], dev_cfg)
# Initialize task, set to 'test,' but I think this only matters for kitting.
task = tasks.names[args.task]()
task.mode = 'test'
# Evaluate on saved snapshots. Go backwards to get better results first.
snapshot_itrs = [i*2000 for i in range(1,10+1)] # Do 10 snapshots to save on compute.
snapshot_itrs = snapshot_itrs[::-1]
if not os.path.exists('test_results'):
os.makedirs('test_results')
# Make environment once, due to issues with deformables + multiple calls.
env = Environment(args.disp, hz=args.hz)
# Check if it's goal-conditioned.
goal_conditioned = is_goal_conditioned(args)
for snapshot_itr in snapshot_itrs:
# Set random seeds, so different snapshots test on same starting states.
tf.random.set_seed(args.train_run)
np.random.seed(args.train_run)
# Set the beginning of the agent name.
name = f'{args.task}-{args.agent}-{args.num_demos}-{args.train_run}'
# Initialize agent and load from snapshot. NOTE: main difference from
# main.py is to use num_rots_inf (not args.num_rots) for inference time.
# Also, `self.name` must match what's in main.py, to load correct weights.
if args.agent == 'transporter':
name = f'{name}-rots-{args.num_rots}-crop_bef_q-{args.crop_bef_q}'
agent = agents.names[args.agent](name,
args.task,
num_rotations=args.num_rots_inf,
crop_bef_q=(args.crop_bef_q == 1))
elif 'transporter-goal' in args.agent:
assert goal_conditioned
name = f'{name}-rots-{args.num_rots}'
if args.subsamp_g:
name += '-sub_g'
else:
name += '-fin_g'
agent = agents.names[args.agent](name,
args.task,
num_rotations=args.num_rots_inf)
elif 'gt_state' in args.agent:
agent = agents.names[args.agent](name,
args.task,
one_rot_inf=(args.num_rots_inf==1),
goal_conditioned=goal_conditioned)
else:
agent = agents.names[args.agent](name, args.task)
agent.load(snapshot_itr)
print(f'\nFinished loading snapshot: {snapshot_itr}, for: {name}.')
# Hacky. Works for transporter and gt-state(2step) agents.
agent.real_task = task
# Evaluate agent. Save as list of (iter, episode_list, results(dict)).
# List `episode_list` has all the `info`s BEFORE the last one (gives
# starting state material), and the last one is `results['final_info']`.
performance = []
episode = 0
finished = 0
while finished < args.num_test_eps:
seed = 10**MAX_ORDER + episode
np.random.seed(seed)
total_reward, episode_list, length, last_obs_info = rollout(
agent, env, task, goal_conditioned, args, num_finished=finished)
_, info = last_obs_info # ignore obs
last_extras = info['extras']
if ignore_this_demo(args, total_reward, t=length, last_extras=last_extras):
print(f' Ignoring demo, {last_extras}, not counting episode {episode}')
else:
result = {'reward': total_reward, 'length': length}
result['final_info'] = info['extras']
if goal_conditioned:
result['image_metrics'] = info['image_metrics']
print(f' Test (seed {seed}): {finished}. Results: {result}')
performance.append((agent.total_iter, episode_list, result))
finished += 1
episode += 1
# Save results.
ss = str(snapshot_itr).zfill(5)
rots_inf = str(args.num_rots_inf).zfill(2)
base1 = f'{name}-rotsinf-{rots_inf}'
base2 = f'snapshot-{ss}-eps-{args.num_test_eps}.pkl'
head = os.path.join('test_results', base1)
if not os.path.exists(head):
os.makedirs(head)
fpath = os.path.join(head, base2)
with open(fpath, 'wb') as fh:
pickle.dump(performance, fh)
|
the-stack_0_12997 | """Test zipfile compat.
"""
import inspect
import sys
import zipfile
import pytest
import rarfile
# dont fail on new python by default
_VERS = [(3, 6), (3, 7), (3, 8)]
_UNSUPPORTED = sys.version_info[:2] not in _VERS
_ignore = set([
"detach",
"peek",
"read1",
"readinto1",
"seek",
# no kwargs
"readinto",
"readline",
"truncate",
"write",
# random
"FileHeader",
"from_file",
"testzip",
"writestr",
])
def load_cls_names(maincls):
assert inspect.isclass(maincls)
res = {}
for cls in inspect.getmro(maincls):
for name, val in inspect.getmembers(cls):
if name not in res:
res[name] = val
return res
def cleansig(sig):
res = str(sig).replace(", /", "")
if "*" in res:
res = res.split(", *")[0] + ")"
return res
def compare(rmaincls, zmaincls):
znames = load_cls_names(zmaincls)
rnames = load_cls_names(rmaincls)
for name, zval in znames.items():
if not inspect.isroutine(zval) or name[0] == "_" or name in _ignore:
continue
assert name in rnames, "member not found: \"%s\"" % name
rval = rnames[name]
zsig = inspect.signature(zval)
rsig = inspect.signature(rval)
zsigstr = cleansig(zsig)
rsigstr = cleansig(rsig)
assert zsigstr == rsigstr, "sig differs: %s.%s%s != %s.%s%s" % (
rmaincls.__name__, name, rsigstr,
zmaincls.__name__, name, zsigstr)
@pytest.mark.skipif(_UNSUPPORTED, reason="Unsupported for sig checks")
def test_cmp_zipfile():
compare(rarfile.RarFile, zipfile.ZipFile)
@pytest.mark.skipif(_UNSUPPORTED, reason="Unsupported for sig checks")
def test_cmp_zipextfile():
compare(rarfile.RarExtFile, zipfile.ZipExtFile)
@pytest.mark.skipif(_UNSUPPORTED, reason="Unsupported for sig checks")
def test_cmp_zipinfo():
compare(rarfile.RarInfo, zipfile.ZipInfo)
|
the-stack_0_12998 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Original Author = Jacob Morris
# URL = blendingjacob.blogspot.com
# Note: scene properties are moved into __init__ together with the 3 update functions
# for properties search for the name patterns adv_obj and advanced_objects
bl_info = {
"name": "CubeSter",
"author": "Jacob Morris",
"version": (0, 7, 1),
"blender": (2, 78, 0),
"location": "View 3D > Toolbar > CubeSter",
"description": "Takes image, image sequence, or audio file and converts it "
"into a height map based on pixel color and alpha values",
"category": "Add Mesh"
}
import bpy
import bmesh
from bpy.types import (
Operator,
Panel,
)
import timeit
from random import uniform
from math import radians
from os import (
path,
listdir,
)
# create block at center position x, y with block width 2 * hx and 2 * hy and height of h
def create_block(x, y, hw, h, verts: list, faces: list):
if bpy.context.scene.advanced_objects.cubester_block_style == "size":
z = 0.0
else:
z = h
h = 2 * hw
p = len(verts)
verts += [(x - hw, y - hw, z), (x + hw, y - hw, z), (x + hw, y + hw, z), (x - hw, y + hw, z)]
verts += [(x - hw, y - hw, z + h), (x + hw, y - hw, z + h),
(x + hw, y + hw, z + h), (x - hw, y + hw, z + h)]
faces += [(p, p + 1, p + 5, p + 4), (p + 1, p + 2, p + 6, p + 5),
(p + 2, p + 3, p + 7, p + 6), (p, p + 4, p + 7, p + 3),
(p + 4, p + 5, p + 6, p + 7), (p, p + 3, p + 2, p + 1)]
# go through all frames in len(frames), adjusting values at frames[x][y]
def create_f_curves(mesh, frames, frame_step_size, style):
# use data to animate mesh
action = bpy.data.actions.new("CubeSterAnimation")
mesh.animation_data_create()
mesh.animation_data.action = action
data_path = "vertices[%d].co"
vert_index = 4 if style == "blocks" else 0 # index of first vertex
# loop for every face height value
for frame_start_vert in range(len(frames[0])):
# only go once if plane, otherwise do all four vertices that are in top plane if blocks
end_point = frame_start_vert + 4 if style == "blocks" else frame_start_vert + 1
# loop through to get the four vertices that compose the face
for frame_vert in range(frame_start_vert, end_point):
# fcurves for x, y, z
fcurves = [action.fcurves.new(data_path % vert_index, i) for i in range(3)]
frame_counter = 0 # go through each frame and add position
temp_v = mesh.vertices[vert_index].co
# loop through frames
for frame in frames:
# new x, y, z positions
vals = [temp_v[0], temp_v[1], frame[frame_start_vert]]
for i in range(3): # for each x, y, z set each corresponding fcurve
fcurves[i].keyframe_points.insert(frame_counter, vals[i], {'FAST'})
frame_counter += frame_step_size # skip frames for smoother animation
vert_index += 1
# only skip vertices if made of blocks
if style == "blocks":
vert_index += 4
# create material with given name, apply to object
def create_material(scene, ob, name):
mat = bpy.data.materials.new("CubeSter_" + name)
adv_obj = scene.advanced_objects
image = None
# image
if not adv_obj.cubester_use_image_color and adv_obj.cubester_color_image in bpy.data.images:
try:
image = bpy.data.images[adv_obj.cubester_color_image]
except:
pass
else:
try:
image = bpy.data.images[adv_obj.cubester_image]
except:
pass
if scene.render.engine == "CYCLES":
mat.use_nodes = True
nodes = mat.node_tree.nodes
att = nodes.new("ShaderNodeAttribute")
att.attribute_name = "Col"
att.location = (-200, 300)
att = nodes.new("ShaderNodeTexImage")
if image:
att.image = image
if adv_obj.cubester_load_type == "multiple":
att.image.source = "SEQUENCE"
att.location = (-200, 700)
att = nodes.new("ShaderNodeTexCoord")
att.location = (-450, 600)
if adv_obj.cubester_materials == "image":
mat.node_tree.links.new(
nodes["Image Texture"].outputs[0],
nodes["Diffuse BSDF"].inputs[0]
)
mat.node_tree.links.new(
nodes["Texture Coordinate"].outputs[2],
nodes["Image Texture"].inputs[0]
)
else:
mat.node_tree.links.new(
nodes["Attribute"].outputs[0],
nodes["Diffuse BSDF"].inputs[0]
)
else:
if adv_obj.cubester_materials == "image" or scene.render.engine != "BLENDER_RENDER":
tex = bpy.data.textures.new("CubeSter_" + name, "IMAGE")
if image:
tex.image = image
slot = mat.texture_slots.add()
slot.texture = tex
else:
mat.use_vertex_color_paint = True
ob.data.materials.append(mat)
# generate mesh from audio
def create_mesh_from_audio(self, scene, verts, faces):
adv_obj = scene.advanced_objects
audio_filepath = adv_obj.cubester_audio_path
width = adv_obj.cubester_audio_width_blocks
length = adv_obj.cubester_audio_length_blocks
size_per_hundred = adv_obj.cubester_size_per_hundred_pixels
size = size_per_hundred / 100
# create all blocks
y = -(width / 2) * size + (size / 2)
for r in range(width):
x = -(length / 2) * size + (size / 2)
for c in range(length):
create_block(x, y, size / 2, 1, verts, faces)
x += size
y += size
# create object
mesh = bpy.data.meshes.new("cubed")
mesh.from_pydata(verts, [], faces)
ob = bpy.data.objects.new("cubed", mesh)
bpy.context.scene.objects.link(ob)
bpy.context.scene.objects.active = ob
ob.select = True
# inital vertex colors
if adv_obj.cubester_materials == "image" and adv_obj.cubester_color_image != "":
picture = bpy.data.images[adv_obj.cubester_color_image]
pixels = list(picture.pixels)
vert_colors = []
skip_y = int(picture.size[1] / width)
skip_x = int(picture.size[0] / length)
for row in range(0, picture.size[1], skip_y + 1):
# go through each column, step by appropriate amount
for column in range(0, picture.size[0] * 4, 4 + skip_x * 4):
r, g, b, a = get_pixel_values(picture, pixels, row, column)
vert_colors += [(r, g, b) for i in range(24)]
bpy.ops.mesh.vertex_color_add()
i = 0
vert_colors_size = len(vert_colors)
for c in ob.data.vertex_colors[0].data:
if i < vert_colors_size:
c.color = vert_colors[i]
i += 1
# image sequence handling
if adv_obj.cubester_load_type == "multiple":
images = find_sequence_images(self, bpy.context)
frames_vert_colors = []
max_images = adv_obj.cubester_max_images + 1 if \
len(images[0]) > adv_obj.cubester_max_images else len(images[0])
# goes through and for each image for each block finds new height
for image_index in range(0, max_images, adv_obj.cubester_skip_images):
filepath = images[0][image_index]
name = images[1][image_index]
picture = fetch_image(self, name, filepath)
pixels = list(picture.pixels)
frame_colors = []
for row in range(0, picture.size[1], skip_y + 1):
for column in range(0, picture.size[0] * 4, 4 + skip_x * 4):
r, g, b, a = get_pixel_values(picture, pixels, row, column)
frame_colors += [(r, g, b) for i in range(24)]
frames_vert_colors.append(frame_colors)
adv_obj.cubester_vertex_colors[ob.name] = \
{"type": "vertex", "frames": frames_vert_colors,
"frame_skip": adv_obj.cubester_frame_step,
"total_images": max_images}
# either add material or create
if ("CubeSter_" + "Vertex") in bpy.data.materials:
ob.data.materials.append(bpy.data.materials["CubeSter_" + "Vertex"])
else:
create_material(scene, ob, "Vertex")
# set keyframe for each object as initial point
frame = [1 for i in range(int(len(verts) / 8))]
frames = [frame]
area = bpy.context.area
old_type = area.type
area.type = "GRAPH_EDITOR"
scene.frame_current = 0
create_f_curves(mesh, frames, 1, "blocks")
# deselect all fcurves
fcurves = ob.data.animation_data.action.fcurves.data.fcurves
for i in fcurves:
i.select = False
max_images = adv_obj.cubester_audio_max_freq
min_freq = adv_obj.cubester_audio_min_freq
freq_frame = adv_obj.cubester_audio_offset_type
freq_step = (max_images - min_freq) / length
freq_sub_step = freq_step / width
frame_step = adv_obj.cubester_audio_frame_offset
# animate each block with a portion of the frequency
for c in range(length):
frame_off = 0
for r in range(width):
if freq_frame == "frame":
scene.frame_current = frame_off
l = c * freq_step
h = (c + 1) * freq_step
frame_off += frame_step
else:
l = c * freq_step + (r * freq_sub_step)
h = c * freq_step + ((r + 1) * freq_sub_step)
pos = c + (r * length) # block number
index = pos * 4 # first index for vertex
# select curves
for i in range(index, index + 4):
curve = i * 3 + 2 # fcurve location
fcurves[curve].select = True
try:
bpy.ops.graph.sound_bake(filepath=bpy.path.abspath(audio_filepath), low=l, high=h)
except:
pass
# deselect curves
for i in range(index, index + 4):
curve = i * 3 + 2 # fcurve location
fcurves[curve].select = False
area.type = old_type
# UV unwrap
create_uv_map(bpy.context, width, length)
# if radial apply needed modifiers
if adv_obj.cubester_audio_block_layout == "radial":
# add bezier curve of correct width
bpy.ops.curve.primitive_bezier_circle_add()
curve = bpy.context.object
# slope determined off of collected data
curve_size = (0.319 * (width * (size * 100)) - 0.0169) / 100
curve.dimensions = (curve_size, curve_size, 0.0)
# correct for z height
curve.scale = (curve.scale[0], curve.scale[0], curve.scale[0])
ob.select = True
curve.select = False
scene.objects.active = ob
# data was collected and then multi-variable regression was done in Excel
# influence of width and length
width_infl, length_infl, intercept = -0.159125, 0.49996, 0.007637
x_offset = ((width * (size * 100) * width_infl) +
(length * (size * 100) * length_infl) + intercept) / 100
ob.location = (ob.location[0] + x_offset, ob.location[1], ob.location[2])
ob.rotation_euler = (radians(-90), 0.0, 0.0)
bpy.ops.object.modifier_add(type="CURVE")
ob.modifiers["Curve"].object = curve
ob.modifiers["Curve"].deform_axis = "POS_Z"
# generate mesh from image(s)
def create_mesh_from_image(self, scene, verts, faces):
context = bpy.context
adv_obj = scene.advanced_objects
picture = bpy.data.images[adv_obj.cubester_image]
pixels = list(picture.pixels)
x_pixels = picture.size[0] / (adv_obj.cubester_skip_pixels + 1)
y_pixels = picture.size[1] / (adv_obj.cubester_skip_pixels + 1)
width = x_pixels / 100 * adv_obj.cubester_size_per_hundred_pixels
height = y_pixels / 100 * adv_obj.cubester_size_per_hundred_pixels
step = width / x_pixels
half_width = step / 2
y = -height / 2 + half_width
vert_colors = []
weights = [uniform(0.0, 1.0) for i in range(4)] # random weights
rows = 0
# go through each row of pixels stepping by adv_obj.cubester_skip_pixels + 1
for row in range(0, picture.size[1], adv_obj.cubester_skip_pixels + 1):
rows += 1
x = -width / 2 + half_width # reset to left edge of mesh
# go through each column, step by appropriate amount
for column in range(0, picture.size[0] * 4, 4 + adv_obj.cubester_skip_pixels * 4):
r, g, b, a = get_pixel_values(picture, pixels, row, column)
h = find_point_height(r, g, b, a, scene)
# if not transparent
if h != -1:
if adv_obj.cubester_mesh_style == "blocks":
create_block(x, y, half_width, h, verts, faces)
vert_colors += [(r, g, b) for i in range(24)]
else:
verts += [(x, y, h)]
vert_colors += [(r, g, b) for i in range(4)]
x += step
y += step
# if plane not blocks, then remove last 4 items from vertex_colors
# as the faces have already wrapped around
if adv_obj.cubester_mesh_style == "plane":
del vert_colors[len(vert_colors) - 4:len(vert_colors)]
# create faces if plane based and not block based
if adv_obj.cubester_mesh_style == "plane":
off = int(len(verts) / rows)
for r in range(rows - 1):
for c in range(off - 1):
faces += [(r * off + c, r * off + c + 1, (r + 1) * off + c + 1, (r + 1) * off + c)]
mesh = bpy.data.meshes.new("cubed")
mesh.from_pydata(verts, [], faces)
ob = bpy.data.objects.new("cubed", mesh)
context.scene.objects.link(ob)
context.scene.objects.active = ob
ob.select = True
# uv unwrap
if adv_obj.cubester_mesh_style == "blocks":
create_uv_map(context, rows, int(len(faces) / 6 / rows))
else:
create_uv_map(context, rows - 1, int(len(faces) / (rows - 1)))
# material
# determine name and if already created
if adv_obj.cubester_materials == "vertex": # vertex color
image_name = "Vertex"
elif not adv_obj.cubester_use_image_color and \
adv_obj.cubester_color_image in bpy.data.images and \
adv_obj.cubester_materials == "image": # replaced image
image_name = adv_obj.cubester_color_image
else: # normal image
image_name = adv_obj.cubester_image
# either add material or create
if ("CubeSter_" + image_name) in bpy.data.materials:
ob.data.materials.append(bpy.data.materials["CubeSter_" + image_name])
# create material
else:
create_material(scene, ob, image_name)
# vertex colors
bpy.ops.mesh.vertex_color_add()
i = 0
for c in ob.data.vertex_colors[0].data:
c.color = vert_colors[i]
i += 1
frames = []
# image sequence handling
if adv_obj.cubester_load_type == "multiple":
images = find_sequence_images(self, context)
frames_vert_colors = []
max_images = adv_obj.cubester_max_images + 1 if \
len(images[0]) > adv_obj.cubester_max_images else len(images[0])
# goes through and for each image for each block finds new height
for image_index in range(0, max_images, adv_obj.cubester_skip_images):
filepath = images[0][image_index]
name = images[1][image_index]
picture = fetch_image(self, name, filepath)
pixels = list(picture.pixels)
frame_heights = []
frame_colors = []
for row in range(0, picture.size[1], adv_obj.cubester_skip_pixels + 1):
for column in range(0, picture.size[0] * 4, 4 + adv_obj.cubester_skip_pixels * 4):
r, g, b, a = get_pixel_values(picture, pixels, row, column)
h = find_point_height(r, g, b, a, scene)
if h != -1:
frame_heights.append(h)
if adv_obj.cubester_mesh_style == "blocks":
frame_colors += [(r, g, b) for i in range(24)]
else:
frame_colors += [(r, g, b) for i in range(4)]
if adv_obj.cubester_mesh_style == "plane":
del vert_colors[len(vert_colors) - 4:len(vert_colors)]
frames.append(frame_heights)
frames_vert_colors.append(frame_colors)
# determine what data to use
if adv_obj.cubester_materials == "vertex" or scene.render.engine == "BLENDER_ENGINE":
adv_obj.cubester_vertex_colors[ob.name] = {
"type": "vertex", "frames": frames_vert_colors,
"frame_skip": adv_obj.cubester_frame_step,
"total_images": max_images
}
else:
adv_obj.cubester_vertex_colors[ob.name] = {
"type": "image", "frame_skip": scene.cubester_frame_step,
"total_images": max_images
}
att = get_image_node(ob.data.materials[0])
att.image_user.frame_duration = len(frames) * adv_obj.cubester_frame_step
# animate mesh
create_f_curves(
mesh, frames,
adv_obj.cubester_frame_step,
adv_obj.cubester_mesh_style
)
# generate uv map for object
def create_uv_map(context, rows, columns):
adv_obj = context.scene.advanced_objects
mesh = context.object.data
mesh.uv_textures.new("cubester")
bm = bmesh.new()
bm.from_mesh(mesh)
uv_layer = bm.loops.layers.uv[0]
bm.faces.ensure_lookup_table()
x_scale = 1 / columns
y_scale = 1 / rows
y_pos = 0.0
x_pos = 0.0
count = columns - 1 # hold current count to compare to if need to go to next row
# if blocks
if adv_obj.cubester_mesh_style == "blocks":
for fa in range(int(len(bm.faces) / 6)):
for i in range(6):
pos = (fa * 6) + i
bm.faces[pos].loops[0][uv_layer].uv = (x_pos, y_pos)
bm.faces[pos].loops[1][uv_layer].uv = (x_pos + x_scale, y_pos)
bm.faces[pos].loops[2][uv_layer].uv = (x_pos + x_scale, y_pos + y_scale)
bm.faces[pos].loops[3][uv_layer].uv = (x_pos, y_pos + y_scale)
x_pos += x_scale
if fa >= count:
y_pos += y_scale
x_pos = 0.0
count += columns
# if planes
else:
for fa in range(len(bm.faces)):
bm.faces[fa].loops[0][uv_layer].uv = (x_pos, y_pos)
bm.faces[fa].loops[1][uv_layer].uv = (x_pos + x_scale, y_pos)
bm.faces[fa].loops[2][uv_layer].uv = (x_pos + x_scale, y_pos + y_scale)
bm.faces[fa].loops[3][uv_layer].uv = (x_pos, y_pos + y_scale)
x_pos += x_scale
if fa >= count:
y_pos += y_scale
x_pos = 0.0
count += columns
bm.to_mesh(mesh)
# if already loaded return image, else load and return
def fetch_image(self, name, load_path):
if name in bpy.data.images:
return bpy.data.images[name]
else:
try:
image = bpy.data.images.load(load_path)
return image
except RuntimeError:
self.report({"ERROR"}, "CubeSter: '{}' could not be loaded".format(load_path))
return None
# find height for point
def find_point_height(r, g, b, a, scene):
adv_obj = scene.advanced_objects
if a: # if not completely transparent
normalize = 1
# channel weighting
if not adv_obj.cubester_advanced:
composed = 0.25 * r + 0.25 * g + 0.25 * b + 0.25 * a
else:
# user defined weighting
if not adv_obj.cubester_random_weights:
composed = adv_obj.cubester_weight_r * r + adv_obj.cubester_weight_g * g + \
adv_obj.cubester_weight_b * b + adv_obj.cubester_weight_a * a
total = adv_obj.cubester_weight_r + adv_obj.cubester_weight_g + adv_obj.cubester_weight_b + \
adv_obj.cubester_weight_a
normalize = 1 / total
# random weighting
else:
weights = [uniform(0.0, 1.0) for i in range(4)]
composed = weights[0] * r + weights[1] * g + weights[2] * b + weights[3] * a
total = weights[0] + weights[1] + weights[2] + weights[3]
normalize = 1 / total
if adv_obj.cubester_invert:
h = (1 - composed) * adv_obj.cubester_height_scale * normalize
else:
h = composed * adv_obj.cubester_height_scale * normalize
return h
else:
return -1
# find all images that would belong to sequence
def find_sequence_images(self, context):
scene = context.scene
images = [[], []]
if scene.advanced_objects.cubester_image in bpy.data.images:
image = bpy.data.images[scene.advanced_objects.cubester_image]
main = image.name.split(".")[0]
# first part of name to check against other files
length = len(main)
keep_going = True
for i in range(length - 1, -1, -1):
if main[i].isdigit() and keep_going:
length -= 1
else:
keep_going = not keep_going
name = main[0:length]
dir_name = path.dirname(bpy.path.abspath(image.filepath))
try:
for file in listdir(dir_name):
if path.isfile(path.join(dir_name, file)) and file.startswith(name):
images[0].append(path.join(dir_name, file))
images[1].append(file)
except FileNotFoundError:
self.report({"ERROR"}, "CubeSter: '{}' directory not found".format(dir_name))
return images
# get image node
def get_image_node(mat):
nodes = mat.node_tree.nodes
att = nodes["Image Texture"]
return att
# get the RGBA values from pixel
def get_pixel_values(picture, pixels, row, column):
# determine i position to start at based on row and column position
i = (row * picture.size[0] * 4) + column
pixs = pixels[i: i + 4]
r = pixs[0]
g = pixs[1]
b = pixs[2]
a = pixs[3]
return r, g, b, a
# frame change handler for materials
def material_frame_handler(scene):
frame = scene.frame_current
adv_obj = scene.advanced_objects
keys = list(adv_obj.cubester_vertex_colors.keys())
# get keys and see if object is still in scene
for i in keys:
# if object is in scene then update information
if i in bpy.data.objects:
ob = bpy.data.objects[i]
data = adv_obj.advanced_objects.cubester_vertex_colors[ob.name]
skip_frames = data["frame_skip"]
# update materials using vertex colors
if data['type'] == "vertex":
colors = data["frames"]
if frame % skip_frames == 0 and 0 <= frame < (data['total_images'] - 1) * skip_frames:
use_frame = int(frame / skip_frames)
color = colors[use_frame]
i = 0
for c in ob.data.vertex_colors[0].data:
c.color = color[i]
i += 1
else:
att = get_image_node(ob.data.materials[0])
offset = frame - int(frame / skip_frames)
att.image_user.frame_offset = -offset
# if the object is no longer in the scene then delete then entry
else:
del adv_obj.advanced_objects.cubester_vertex_colors[i]
class CubeSterPanel(Panel):
bl_idname = "OBJECT_PT.cubester"
bl_label = "CubeSter"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Create"
bl_options = {"DEFAULT_CLOSED"}
bl_context = "objectmode"
def draw(self, context):
layout = self.layout.box()
scene = bpy.context.scene
adv_obj = scene.advanced_objects
images_found = 0
rows = 0
columns = 0
layout.prop(adv_obj, "cubester_audio_image")
if adv_obj.cubester_audio_image == "image":
box = layout.box()
box.prop(adv_obj, "cubester_load_type")
box.label("Image To Convert:")
box.prop_search(adv_obj, "cubester_image", bpy.data, "images")
box.prop(adv_obj, "cubester_load_image")
# find number of approriate images if sequence
if adv_obj.cubester_load_type == "multiple":
box = layout.box()
# display number of images found there
images = find_sequence_images(self, context)
images_found = len(images[0]) if len(images[0]) <= adv_obj.cubester_max_images \
else adv_obj.cubester_max_images
if len(images[0]):
box.label(str(len(images[0])) + " Images Found", icon="PACKAGE")
box.prop(adv_obj, "cubester_max_images")
box.prop(adv_obj, "cubester_skip_images")
box.prop(adv_obj, "cubester_frame_step")
box = layout.box()
col = box.column(align=True)
col.prop(adv_obj, "cubester_skip_pixels")
col.prop(adv_obj, "cubester_size_per_hundred_pixels")
col.prop(adv_obj, "cubester_height_scale")
box.prop(adv_obj, "cubester_invert", icon="FILE_REFRESH")
box = layout.box()
box.prop(adv_obj, "cubester_mesh_style", icon="MESH_GRID")
if adv_obj.cubester_mesh_style == "blocks":
box.prop(adv_obj, "cubester_block_style")
else:
# audio file
layout.prop(adv_obj, "cubester_audio_path")
box = layout.box()
col = box.column(align=True)
col.prop(adv_obj, "cubester_audio_min_freq")
col.prop(adv_obj, "cubester_audio_max_freq")
box.separator()
box.prop(adv_obj, "cubester_audio_offset_type")
if adv_obj.cubester_audio_offset_type == "frame":
box.prop(adv_obj, "cubester_audio_frame_offset")
box.prop(adv_obj, "cubester_audio_block_layout")
box.separator()
col = box.column(align=True)
col.prop(adv_obj, "cubester_audio_width_blocks")
col.prop(adv_obj, "cubester_audio_length_blocks")
rows = adv_obj.cubester_audio_width_blocks
columns = adv_obj.cubester_audio_length_blocks
col.prop(adv_obj, "cubester_size_per_hundred_pixels")
# materials
box = layout.box()
box.prop(adv_obj, "cubester_materials", icon="MATERIAL")
if adv_obj.cubester_materials == "image":
box.prop(adv_obj, "cubester_load_type")
# find number of approriate images if sequence
if adv_obj.cubester_load_type == "multiple":
# display number of images found there
images = find_sequence_images(self, context)
images_found = len(images[0]) if len(images[0]) <= adv_obj.cubester_max_images \
else adv_obj.cubester_max_images
if len(images[0]):
box.label(str(len(images[0])) + " Images Found", icon="PACKAGE")
box.prop(adv_obj, "cubester_max_images")
box.prop(adv_obj, "cubester_skip_images")
box.prop(adv_obj, "cubester_frame_step")
box.separator()
if adv_obj.cubester_audio_image == "image":
box.prop(adv_obj, "cubester_use_image_color", icon="COLOR")
if not adv_obj.cubester_use_image_color or adv_obj.cubester_audio_image == "audio":
box.label("Image To Use For Colors:")
box.prop_search(adv_obj, "cubester_color_image", bpy.data, "images")
box.prop(adv_obj, "cubester_load_color_image")
if adv_obj.cubester_image in bpy.data.images:
rows = int(bpy.data.images[adv_obj.cubester_image].size[1] /
(adv_obj.cubester_skip_pixels + 1))
columns = int(bpy.data.images[adv_obj.cubester_image].size[0] /
(adv_obj.cubester_skip_pixels + 1))
box = layout.box()
if adv_obj.cubester_mesh_style == "blocks":
box.label("Approximate Cube Count: " + str(rows * columns))
box.label("Expected Verts/Faces: " + str(rows * columns * 8) + " / " + str(rows * columns * 6))
else:
box.label("Approximate Point Count: " + str(rows * columns))
box.label("Expected Verts/Faces: " + str(rows * columns) + " / " + str(rows * (columns - 1)))
# blocks and plane generation time values
if adv_obj.cubester_mesh_style == "blocks":
slope = 0.0000876958
intercept = 0.02501
block_infl, frame_infl, intercept2 = 0.0025934, 0.38507, -0.5840189
else:
slope = 0.000017753
intercept = 0.04201
block_infl, frame_infl, intercept2 = 0.000619, 0.344636, -0.272759
# if creating image based mesh
points = rows * columns
if adv_obj.cubester_audio_image == "image":
if adv_obj.cubester_load_type == "single":
time = rows * columns * slope + intercept # approximate time count for mesh
else:
time = (points * slope) + intercept + (points * block_infl) + \
(images_found / adv_obj.cubester_skip_images * frame_infl) + intercept2
box.label("Images To Be Used: " + str(int(images_found / adv_obj.cubester_skip_images)))
else:
# audio based mesh
box.label("Audio Track Length: " + str(adv_obj.cubester_audio_file_length) + " frames")
block_infl, frame_infl, intercept = 0.0948, 0.0687566, -25.85985
time = (points * block_infl) + (adv_obj.cubester_audio_file_length * frame_infl) + intercept
if time < 0.0: # usually no audio loaded
time = 0.0
time_mod = "s"
if time > 60: # convert to minutes if needed
time /= 60
time_mod = "min"
time = round(time, 3)
box.label("Expected Time: " + str(time) + " " + time_mod)
# advanced
if adv_obj.cubester_audio_image == "image":
icon_1 = "TRIA_DOWN" if adv_obj.cubester_advanced else "TRIA_RIGHT"
# layout.separator()
box = layout.box()
box.prop(adv_obj, "cubester_advanced", icon=icon_1)
if adv_obj.cubester_advanced:
box.prop(adv_obj, "cubester_random_weights", icon="RNDCURVE")
if not adv_obj.cubester_random_weights:
box.label("RGBA Channel Weights", icon="COLOR")
col = box.column(align=True)
col.prop(adv_obj, "cubester_weight_r")
col.prop(adv_obj, "cubester_weight_g")
col.prop(adv_obj, "cubester_weight_b")
col.prop(adv_obj, "cubester_weight_a")
# generate mesh
layout.operator("mesh.cubester", icon="OBJECT_DATA")
class CubeSter(Operator):
bl_idname = "mesh.cubester"
bl_label = "Generate Mesh"
bl_description = "Generate a mesh from an Image or Sound File"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
verts, faces = [], []
start = timeit.default_timer()
scene = bpy.context.scene
adv_obj = scene.advanced_objects
if adv_obj.cubester_audio_image == "image":
if adv_obj.cubester_image != "":
create_mesh_from_image(self, scene, verts, faces)
frames = find_sequence_images(self, context)
created = len(frames[0])
else:
self.report({'WARNING'},
"Please add an Image for Object generation. Operation Cancelled")
return {"CANCELLED"}
else:
if (adv_obj.cubester_audio_path != "" and
path.isfile(adv_obj.cubester_audio_path) and adv_obj.cubester_check_audio is True):
create_mesh_from_audio(self, scene, verts, faces)
created = adv_obj.cubester_audio_file_length
else:
self.report({'WARNING'},
"Please add an Sound File for Object generation. Operation Cancelled")
return {"CANCELLED"}
stop = timeit.default_timer()
if adv_obj.cubester_mesh_style == "blocks" or adv_obj.cubester_audio_image == "audio":
self.report({"INFO"},
"CubeSter: {} blocks and {} frame(s) "
"in {}s".format(str(int(len(verts) / 8)),
str(created),
str(round(stop - start, 4)))
)
else:
self.report({"INFO"},
"CubeSter: {} points and {} frame(s) "
"in {}s" .format(str(len(verts)),
str(created),
str(round(stop - start, 4)))
)
return {"FINISHED"}
def register():
bpy.utils.register_module(__name__)
bpy.app.handlers.frame_change_pre.append(material_frame_handler)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.app.handlers.frame_change_pre.remove(material_frame_handler)
if __name__ == "__main__":
register()
|
the-stack_0_12999 | """Redis cache backend."""
import random
import re
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.serializers.base import PickleSerializer
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class RedisSerializer(PickleSerializer):
"""
Similar to PickSerializer, except integers are serialized as native Redis
integers for better incr() and decr() atomicity.
"""
def dumps(self, obj):
# Only skip pickling for integers, a int subclasses as bool should be
# pickled.
if type(obj) is int:
return obj
return super().dumps(obj)
def loads(self, data):
try:
return int(data)
except ValueError:
return super().loads(data)
class RedisCacheClient:
def __init__(
self,
servers,
serializer=None,
db=None,
pool_class=None,
parser_class=None,
):
import redis
self._lib = redis
self._servers = servers
self._pools = {}
self._client = self._lib.Redis
if isinstance(pool_class, str):
pool_class = import_string(pool_class)
self._pool_class = pool_class or self._lib.ConnectionPool
if isinstance(serializer, str):
serializer = import_string(serializer)
if callable(serializer):
serializer = serializer()
self._serializer = serializer or RedisSerializer()
if isinstance(parser_class, str):
parser_class = import_string(parser_class)
parser_class = parser_class or self._lib.connection.DefaultParser
self._pool_options = {"parser_class": parser_class, "db": db}
def _get_connection_pool_index(self, write):
# Write to the first server. Read from other servers if there are more,
# otherwise read from the first server.
if write or len(self._servers) == 1:
return 0
return random.randint(1, len(self._servers) - 1)
def _get_connection_pool(self, write):
index = self._get_connection_pool_index(write)
if index not in self._pools:
self._pools[index] = self._pool_class.from_url(
self._servers[index],
**self._pool_options,
)
return self._pools[index]
def get_client(self, key=None, *, write=False):
# key is used so that the method signature remains the same and custom
# cache client can be implemented which might require the key to select
# the server, e.g. sharding.
pool = self._get_connection_pool(write)
return self._client(connection_pool=pool)
def add(self, key, value, timeout):
client = self.get_client(key, write=True)
value = self._serializer.dumps(value)
if timeout == 0:
if ret := bool(client.set(key, value, nx=True)):
client.delete(key)
return ret
else:
return bool(client.set(key, value, ex=timeout, nx=True))
def get(self, key, default):
client = self.get_client(key)
value = client.get(key)
return default if value is None else self._serializer.loads(value)
def set(self, key, value, timeout):
client = self.get_client(key, write=True)
value = self._serializer.dumps(value)
if timeout == 0:
client.delete(key)
else:
client.set(key, value, ex=timeout)
def touch(self, key, timeout):
client = self.get_client(key, write=True)
if timeout is None:
return bool(client.persist(key))
else:
return bool(client.expire(key, timeout))
def delete(self, key):
client = self.get_client(key, write=True)
return bool(client.delete(key))
def get_many(self, keys):
client = self.get_client(None)
ret = client.mget(keys)
return {
k: self._serializer.loads(v) for k, v in zip(keys, ret) if v is not None
}
def has_key(self, key):
client = self.get_client(key)
return bool(client.exists(key))
def incr(self, key, delta):
client = self.get_client(key)
if not client.exists(key):
raise ValueError("Key '%s' not found." % key)
return client.incr(key, delta)
def set_many(self, data, timeout):
client = self.get_client(None, write=True)
pipeline = client.pipeline()
pipeline.mset({k: self._serializer.dumps(v) for k, v in data.items()})
if timeout is not None:
# Setting timeout for each key as redis does not support timeout
# with mset().
for key in data:
pipeline.expire(key, timeout)
pipeline.execute()
def delete_many(self, keys):
client = self.get_client(None, write=True)
client.delete(*keys)
def clear(self):
client = self.get_client(None, write=True)
return bool(client.flushdb())
class RedisCache(BaseCache):
def __init__(self, server, params):
super().__init__(params)
if isinstance(server, str):
self._servers = re.split("[;,]", server)
else:
self._servers = server
self._class = RedisCacheClient
self._options = params.get("OPTIONS", {})
@cached_property
def _cache(self):
return self._class(self._servers, **self._options)
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
# The key will be made persistent if None used as a timeout.
# Non-positive values will cause the key to be deleted.
return None if timeout is None else max(0, int(timeout))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.get(key, default)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
self._cache.set(key, value, self.get_backend_timeout(timeout))
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.touch(key, self.get_backend_timeout(timeout))
def delete(self, key, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.delete(key)
def get_many(self, keys, version=None):
key_map = {
self.make_and_validate_key(key, version=version): key for key in keys
}
ret = self._cache.get_many(key_map.keys())
return {key_map[k]: v for k, v in ret.items()}
def has_key(self, key, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.has_key(key)
def incr(self, key, delta=1, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.incr(key, delta)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_and_validate_key(key, version=version)
safe_data[key] = value
self._cache.set_many(safe_data, self.get_backend_timeout(timeout))
return []
def delete_many(self, keys, version=None):
safe_keys = []
for key in keys:
key = self.make_and_validate_key(key, version=version)
safe_keys.append(key)
self._cache.delete_many(safe_keys)
def clear(self):
return self._cache.clear()
|
the-stack_0_13003 | import datetime
from functools import partial
import numpy as np
import regex as re
import toolz
from multipledispatch import Dispatcher
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.lineage as lin
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.sql.compiler as comp
from ibis.bigquery.datatypes import ibis_type_to_bigquery_type
from ibis.impala import compiler as impala_compiler
from ibis.impala.compiler import (
ImpalaSelect,
ImpalaTableSetFormatter,
_reduction,
fixed_arity,
unary,
)
class BigQueryUDFNode(ops.ValueOp):
pass
class BigQuerySelectBuilder(comp.SelectBuilder):
@property
def _select_class(self):
return BigQuerySelect
class BigQueryUDFDefinition(comp.DDL):
def __init__(self, expr, context):
self.expr = expr
self.context = context
def compile(self):
return self.expr.op().js
class BigQueryUnion(comp.Union):
@staticmethod
def keyword(distinct):
return 'UNION DISTINCT' if distinct else 'UNION ALL'
def find_bigquery_udf(expr):
if isinstance(expr.op(), BigQueryUDFNode):
result = expr
else:
result = None
return lin.proceed, result
class BigQueryQueryBuilder(comp.QueryBuilder):
select_builder = BigQuerySelectBuilder
union_class = BigQueryUnion
def generate_setup_queries(self):
queries = map(
partial(BigQueryUDFDefinition, context=self.context),
lin.traverse(find_bigquery_udf, self.expr),
)
# UDFs are uniquely identified by the name of the Node subclass we
# generate.
return list(
toolz.unique(queries, key=lambda x: type(x.expr.op()).__name__)
)
def build_ast(expr, context):
builder = BigQueryQueryBuilder(expr, context=context)
return builder.get_result()
def to_sql(expr, context):
query_ast = build_ast(expr, context)
compiled = query_ast.compile()
return compiled
class BigQueryContext(comp.QueryContext):
def _to_sql(self, expr, ctx):
return to_sql(expr, context=ctx)
def _extract_field(sql_attr):
def extract_field_formatter(translator, expr):
op = expr.op()
arg = translator.translate(op.args[0])
return 'EXTRACT({} from {})'.format(sql_attr, arg)
return extract_field_formatter
bigquery_cast = Dispatcher('bigquery_cast')
@bigquery_cast.register(str, dt.Timestamp, dt.Integer)
def bigquery_cast_timestamp_to_integer(compiled_arg, from_, to):
return 'UNIX_MICROS({})'.format(compiled_arg)
@bigquery_cast.register(str, dt.DataType, dt.DataType)
def bigquery_cast_generate(compiled_arg, from_, to):
sql_type = ibis_type_to_bigquery_type(to)
return 'CAST({} AS {})'.format(compiled_arg, sql_type)
def _cast(translator, expr):
op = expr.op()
arg, target_type = op.args
arg_formatted = translator.translate(arg)
return bigquery_cast(arg_formatted, arg.type(), target_type)
def _struct_field(translator, expr):
arg, field = expr.op().args
arg_formatted = translator.translate(arg)
return '{}.`{}`'.format(arg_formatted, field)
def _array_concat(translator, expr):
return 'ARRAY_CONCAT({})'.format(
', '.join(map(translator.translate, expr.op().args))
)
def _array_index(translator, expr):
# SAFE_OFFSET returns NULL if out of bounds
return '{}[SAFE_OFFSET({})]'.format(
*map(translator.translate, expr.op().args)
)
def _string_find(translator, expr):
haystack, needle, start, end = expr.op().args
if start is not None:
raise NotImplementedError('start not implemented for string find')
if end is not None:
raise NotImplementedError('end not implemented for string find')
return 'STRPOS({}, {}) - 1'.format(
translator.translate(haystack), translator.translate(needle)
)
def _translate_pattern(translator, pattern):
# add 'r' to string literals to indicate to BigQuery this is a raw string
return 'r' * isinstance(pattern.op(), ops.Literal) + translator.translate(
pattern
)
def _regex_search(translator, expr):
arg, pattern = expr.op().args
regex = _translate_pattern(translator, pattern)
result = 'REGEXP_CONTAINS({}, {})'.format(translator.translate(arg), regex)
return result
def _regex_extract(translator, expr):
arg, pattern, index = expr.op().args
regex = _translate_pattern(translator, pattern)
result = 'REGEXP_EXTRACT_ALL({}, {})[SAFE_OFFSET({})]'.format(
translator.translate(arg), regex, translator.translate(index)
)
return result
def _regex_replace(translator, expr):
arg, pattern, replacement = expr.op().args
regex = _translate_pattern(translator, pattern)
result = 'REGEXP_REPLACE({}, {}, {})'.format(
translator.translate(arg), regex, translator.translate(replacement)
)
return result
def _string_concat(translator, expr):
return 'CONCAT({})'.format(
', '.join(map(translator.translate, expr.op().arg))
)
def _string_join(translator, expr):
sep, args = expr.op().args
return 'ARRAY_TO_STRING([{}], {})'.format(
', '.join(map(translator.translate, args)), translator.translate(sep)
)
def _string_ascii(translator, expr):
(arg,) = expr.op().args
return 'TO_CODE_POINTS({})[SAFE_OFFSET(0)]'.format(
translator.translate(arg)
)
def _string_right(translator, expr):
arg, nchars = map(translator.translate, expr.op().args)
return 'SUBSTR({arg}, -LEAST(LENGTH({arg}), {nchars}))'.format(
arg=arg, nchars=nchars
)
def _array_literal_format(expr):
return str(list(expr.op().value))
def _log(translator, expr):
op = expr.op()
arg, base = op.args
arg_formatted = translator.translate(arg)
if base is None:
return 'ln({})'.format(arg_formatted)
base_formatted = translator.translate(base)
return 'log({}, {})'.format(arg_formatted, base_formatted)
def _literal(translator, expr):
if isinstance(expr, ir.NumericValue):
value = expr.op().value
if not np.isfinite(value):
return 'CAST({!r} AS FLOAT64)'.format(str(value))
# special case literal timestamp, date, and time scalars
if isinstance(expr.op(), ops.Literal):
value = expr.op().value
if isinstance(expr, ir.DateScalar):
if isinstance(value, datetime.datetime):
raw_value = value.date()
else:
raw_value = value
return "DATE '{}'".format(raw_value)
elif isinstance(expr, ir.TimestampScalar):
return "TIMESTAMP '{}'".format(value)
elif isinstance(expr, ir.TimeScalar):
# TODO: define extractors on TimeValue expressions
return "TIME '{}'".format(value)
try:
return impala_compiler._literal(translator, expr)
except NotImplementedError:
if isinstance(expr, ir.ArrayValue):
return _array_literal_format(expr)
raise NotImplementedError(type(expr).__name__)
def _arbitrary(translator, expr):
arg, how, where = expr.op().args
if where is not None:
arg = where.ifelse(arg, ibis.NA)
if how not in (None, 'first'):
raise com.UnsupportedOperationError(
'{!r} value not supported for arbitrary in BigQuery'.format(how)
)
return 'ANY_VALUE({})'.format(translator.translate(arg))
_date_units = {
'Y': 'YEAR',
'Q': 'QUARTER',
'W': 'WEEK',
'M': 'MONTH',
'D': 'DAY',
}
_timestamp_units = {
'us': 'MICROSECOND',
'ms': 'MILLISECOND',
's': 'SECOND',
'm': 'MINUTE',
'h': 'HOUR',
}
_time_units = _timestamp_units.copy()
_timestamp_units.update(_date_units)
def _truncate(kind, units):
def truncator(translator, expr):
arg, unit = expr.op().args
trans_arg = translator.translate(arg)
valid_unit = units.get(unit)
if valid_unit is None:
raise com.UnsupportedOperationError(
'BigQuery does not support truncating {} values to unit '
'{!r}'.format(arg.type(), unit)
)
return '{}_TRUNC({}, {})'.format(kind, trans_arg, valid_unit)
return truncator
def _timestamp_op(func, units):
def _formatter(translator, expr):
op = expr.op()
arg, offset = op.args
unit = offset.type().unit
if unit not in units:
raise com.UnsupportedOperationError(
'BigQuery does not allow binary operation '
'{} with INTERVAL offset {}'.format(func, unit)
)
formatted_arg = translator.translate(arg)
formatted_offset = translator.translate(offset)
result = '{}({}, {})'.format(func, formatted_arg, formatted_offset)
return result
return _formatter
STRFTIME_FORMAT_FUNCTIONS = {
dt.Date: 'DATE',
dt.Time: 'TIME',
dt.Timestamp: 'TIMESTAMP',
}
_operation_registry = impala_compiler._operation_registry.copy()
_operation_registry.update(
{
ops.ExtractYear: _extract_field('year'),
ops.ExtractMonth: _extract_field('month'),
ops.ExtractDay: _extract_field('day'),
ops.ExtractHour: _extract_field('hour'),
ops.ExtractMinute: _extract_field('minute'),
ops.ExtractSecond: _extract_field('second'),
ops.ExtractMillisecond: _extract_field('millisecond'),
ops.StringReplace: fixed_arity('REPLACE', 3),
ops.StringSplit: fixed_arity('SPLIT', 2),
ops.StringConcat: _string_concat,
ops.StringJoin: _string_join,
ops.StringAscii: _string_ascii,
ops.StringFind: _string_find,
ops.StrRight: _string_right,
ops.Repeat: fixed_arity('REPEAT', 2),
ops.RegexSearch: _regex_search,
ops.RegexExtract: _regex_extract,
ops.RegexReplace: _regex_replace,
ops.GroupConcat: _reduction('STRING_AGG'),
ops.IfNull: fixed_arity('IFNULL', 2),
ops.Cast: _cast,
ops.StructField: _struct_field,
ops.ArrayCollect: unary('ARRAY_AGG'),
ops.ArrayConcat: _array_concat,
ops.ArrayIndex: _array_index,
ops.ArrayLength: unary('ARRAY_LENGTH'),
ops.HLLCardinality: _reduction('APPROX_COUNT_DISTINCT'),
ops.Log: _log,
ops.Sign: unary('SIGN'),
ops.Modulus: fixed_arity('MOD', 2),
ops.Date: unary('DATE'),
# BigQuery doesn't have these operations built in.
# ops.ArrayRepeat: _array_repeat,
# ops.ArraySlice: _array_slice,
ops.Literal: _literal,
ops.Arbitrary: _arbitrary,
ops.TimestampTruncate: _truncate('TIMESTAMP', _timestamp_units),
ops.DateTruncate: _truncate('DATE', _date_units),
ops.TimeTruncate: _truncate('TIME', _timestamp_units),
ops.Time: unary('TIME'),
ops.TimestampAdd: _timestamp_op(
'TIMESTAMP_ADD', {'h', 'm', 's', 'ms', 'us'}
),
ops.TimestampSub: _timestamp_op(
'TIMESTAMP_DIFF', {'h', 'm', 's', 'ms', 'us'}
),
ops.DateAdd: _timestamp_op('DATE_ADD', {'D', 'W', 'M', 'Q', 'Y'}),
ops.DateSub: _timestamp_op('DATE_SUB', {'D', 'W', 'M', 'Q', 'Y'}),
ops.TimestampNow: fixed_arity('CURRENT_TIMESTAMP', 0),
}
)
_invalid_operations = {
ops.Translate,
ops.FindInSet,
ops.Capitalize,
ops.DateDiff,
ops.TimestampDiff,
}
_operation_registry = {
k: v
for k, v in _operation_registry.items()
if k not in _invalid_operations
}
class BigQueryExprTranslator(impala_compiler.ImpalaExprTranslator):
_registry = _operation_registry
_rewrites = impala_compiler.ImpalaExprTranslator._rewrites.copy()
context_class = BigQueryContext
def _trans_param(self, expr):
op = expr.op()
if op not in self.context.params:
raise KeyError(op)
return '@{}'.format(expr.get_name())
compiles = BigQueryExprTranslator.compiles
rewrites = BigQueryExprTranslator.rewrites
@compiles(ops.DayOfWeekIndex)
def bigquery_day_of_week_index(t, e):
arg = e.op().args[0]
arg_formatted = t.translate(arg)
return 'MOD(EXTRACT(DAYOFWEEK FROM {}) + 5, 7)'.format(arg_formatted)
@rewrites(ops.DayOfWeekName)
def bigquery_day_of_week_name(e):
arg = e.op().args[0]
return arg.strftime('%A')
@compiles(ops.Divide)
def bigquery_compiles_divide(t, e):
return 'IEEE_DIVIDE({}, {})'.format(*map(t.translate, e.op().args))
@compiles(ops.Strftime)
def compiles_strftime(translator, expr):
arg, format_string = expr.op().args
arg_type = arg.type()
strftime_format_func_name = STRFTIME_FORMAT_FUNCTIONS[type(arg_type)]
fmt_string = translator.translate(format_string)
arg_formatted = translator.translate(arg)
if isinstance(arg_type, dt.Timestamp):
return 'FORMAT_{}({}, {}, {!r})'.format(
strftime_format_func_name,
fmt_string,
arg_formatted,
arg_type.timezone if arg_type.timezone is not None else 'UTC',
)
return 'FORMAT_{}({}, {})'.format(
strftime_format_func_name, fmt_string, arg_formatted
)
@compiles(ops.StringToTimestamp)
def compiles_string_to_timestamp(translator, expr):
arg, format_string, timezone_arg = expr.op().args
fmt_string = translator.translate(format_string)
arg_formatted = translator.translate(arg)
if timezone_arg is not None:
timezone_str = translator.translate(timezone_arg)
return 'PARSE_TIMESTAMP({}, {}, {})'.format(
fmt_string, arg_formatted, timezone_str
)
return 'PARSE_TIMESTAMP({}, {})'.format(fmt_string, arg_formatted)
class BigQueryTableSetFormatter(ImpalaTableSetFormatter):
def _quote_identifier(self, name):
if re.match(r'^[A-Za-z][A-Za-z_0-9]*$', name):
return name
return '`{}`'.format(name)
class BigQuerySelect(ImpalaSelect):
translator = BigQueryExprTranslator
@property
def table_set_formatter(self):
return BigQueryTableSetFormatter
@rewrites(ops.IdenticalTo)
def identical_to(expr):
left, right = expr.op().args
return (left.isnull() & right.isnull()) | (left == right)
@rewrites(ops.Log2)
def log2(expr):
(arg,) = expr.op().args
return arg.log(2)
@rewrites(ops.Sum)
def bq_sum(expr):
arg = expr.op().args[0]
where = expr.op().args[1]
if isinstance(arg, ir.BooleanColumn):
return arg.cast('int64').sum(where=where)
else:
return expr
@rewrites(ops.Mean)
def bq_mean(expr):
arg = expr.op().args[0]
where = expr.op().args[1]
if isinstance(arg, ir.BooleanColumn):
return arg.cast('int64').mean(where=where)
else:
return expr
UNIT_FUNCS = {'s': 'SECONDS', 'ms': 'MILLIS', 'us': 'MICROS'}
@compiles(ops.TimestampFromUNIX)
def compiles_timestamp_from_unix(t, e):
value, unit = e.op().args
return 'TIMESTAMP_{}({})'.format(UNIT_FUNCS[unit], t.translate(value))
@compiles(ops.Floor)
def compiles_floor(t, e):
bigquery_type = ibis_type_to_bigquery_type(e.type())
arg = e.op().arg
return 'CAST(FLOOR({}) AS {})'.format(t.translate(arg), bigquery_type)
@compiles(ops.CMSMedian)
def compiles_approx(translator, expr):
expr = expr.op()
arg = expr.arg
where = expr.where
if where is not None:
arg = where.ifelse(arg, ibis.NA)
return 'APPROX_QUANTILES({}, 2)[OFFSET(1)]'.format(
translator.translate(arg)
)
@compiles(ops.Covariance)
def compiles_covar(translator, expr):
expr = expr.op()
left = expr.left
right = expr.right
where = expr.where
if expr.how == 'sample':
how = 'SAMP'
elif expr.how == 'pop':
how = 'POP'
else:
raise ValueError(
"Covariance with how={!r} is not supported.".format(how)
)
if where is not None:
left = where.ifelse(left, ibis.NA)
right = where.ifelse(right, ibis.NA)
return "COVAR_{}({}, {})".format(how, left, right)
@rewrites(ops.Any)
@rewrites(ops.All)
@rewrites(ops.NotAny)
@rewrites(ops.NotAll)
def bigquery_any_all_no_op(expr):
return expr
@compiles(ops.Any)
def bigquery_compile_any(translator, expr):
return "LOGICAL_OR({})".format(*map(translator.translate, expr.op().args))
@compiles(ops.NotAny)
def bigquery_compile_notany(translator, expr):
return "LOGICAL_AND(NOT ({}))".format(
*map(translator.translate, expr.op().args)
)
@compiles(ops.All)
def bigquery_compile_all(translator, expr):
return "LOGICAL_AND({})".format(*map(translator.translate, expr.op().args))
@compiles(ops.NotAll)
def bigquery_compile_notall(translator, expr):
return "LOGICAL_OR(NOT ({}))".format(
*map(translator.translate, expr.op().args)
)
class BigQueryDialect(impala_compiler.ImpalaDialect):
translator = BigQueryExprTranslator
dialect = BigQueryDialect
|
the-stack_0_13007 | import re
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import and use built-in open()
from io import open as io_open
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
def readall(*args):
with io_open(path.join(here, *args), encoding="utf-8") as fp:
return fp.read()
metadata = dict(
re.findall(r"""__([a-z]+)__ = "([^"]+)""",
readall("websocket_commands", "__init__.py"))
)
setup(
name='websocket-commands',
version=metadata['version'],
packages=['websocket_commands'],
url='http://github.com/en-lofty/websocket-commands.git',
license='',
author='raphael',
author_email='[email protected]',
description='A library that makes communicating between frontend and '
'backend websockets simple.',
install_requires=['deprecation', ]
)
|
the-stack_0_13010 | """Support for OASA Telematics from telematics.oasa.gr."""
from datetime import timedelta
import logging
from operator import itemgetter
import oasatelematics
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, DEVICE_CLASS_TIMESTAMP
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_STOP_ID = "stop_id"
ATTR_STOP_NAME = "stop_name"
ATTR_ROUTE_ID = "route_id"
ATTR_ROUTE_NAME = "route_name"
ATTR_NEXT_ARRIVAL = "next_arrival"
ATTR_SECOND_NEXT_ARRIVAL = "second_next_arrival"
ATTR_NEXT_DEPARTURE = "next_departure"
ATTRIBUTION = "Data retrieved from telematics.oasa.gr"
CONF_STOP_ID = "stop_id"
CONF_ROUTE_ID = "route_id"
DEFAULT_NAME = "OASA Telematics"
ICON = "mdi:bus"
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STOP_ID): cv.string,
vol.Required(CONF_ROUTE_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the OASA Telematics sensor."""
name = config[CONF_NAME]
stop_id = config[CONF_STOP_ID]
route_id = config.get(CONF_ROUTE_ID)
data = OASATelematicsData(stop_id, route_id)
add_entities([OASATelematicsSensor(data, stop_id, route_id, name)], True)
class OASATelematicsSensor(SensorEntity):
"""Implementation of the OASA Telematics sensor."""
def __init__(self, data, stop_id, route_id, name):
"""Initialize the sensor."""
self.data = data
self._name = name
self._stop_id = stop_id
self._route_id = route_id
self._name_data = self._times = self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def extra_state_attributes(self):
"""Return the state attributes."""
params = {}
if self._times is not None:
next_arrival_data = self._times[0]
if ATTR_NEXT_ARRIVAL in next_arrival_data:
next_arrival = next_arrival_data[ATTR_NEXT_ARRIVAL]
params.update({ATTR_NEXT_ARRIVAL: next_arrival.isoformat()})
if len(self._times) > 1:
second_next_arrival_time = self._times[1][ATTR_NEXT_ARRIVAL]
if second_next_arrival_time is not None:
second_arrival = second_next_arrival_time
params.update(
{ATTR_SECOND_NEXT_ARRIVAL: second_arrival.isoformat()}
)
params.update(
{
ATTR_ROUTE_ID: self._times[0][ATTR_ROUTE_ID],
ATTR_STOP_ID: self._stop_id,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
)
params.update(
{
ATTR_ROUTE_NAME: self._name_data[ATTR_ROUTE_NAME],
ATTR_STOP_NAME: self._name_data[ATTR_STOP_NAME],
}
)
return {k: v for k, v in params.items() if v}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data from OASA API and update the states."""
self.data.update()
self._times = self.data.info
self._name_data = self.data.name_data
next_arrival_data = self._times[0]
if ATTR_NEXT_ARRIVAL in next_arrival_data:
self._state = next_arrival_data[ATTR_NEXT_ARRIVAL].isoformat()
class OASATelematicsData:
"""The class for handling data retrieval."""
def __init__(self, stop_id, route_id):
"""Initialize the data object."""
self.stop_id = stop_id
self.route_id = route_id
self.info = self.empty_result()
self.oasa_api = oasatelematics
self.name_data = {
ATTR_ROUTE_NAME: self.get_route_name(),
ATTR_STOP_NAME: self.get_stop_name(),
}
def empty_result(self):
"""Object returned when no arrivals are found."""
return [{ATTR_ROUTE_ID: self.route_id}]
def get_route_name(self):
"""Get the route name from the API."""
try:
route = self.oasa_api.getRouteName(self.route_id)
if route:
return route[0].get("route_departure_eng")
except TypeError:
_LOGGER.error("Cannot get route name from OASA API")
return None
def get_stop_name(self):
"""Get the stop name from the API."""
try:
name_data = self.oasa_api.getStopNameAndXY(self.stop_id)
if name_data:
return name_data[0].get("stop_descr_matrix_eng")
except TypeError:
_LOGGER.error("Cannot get stop name from OASA API")
return None
def update(self):
"""Get the latest arrival data from telematics.oasa.gr API."""
self.info = []
results = self.oasa_api.getStopArrivals(self.stop_id)
if not results:
self.info = self.empty_result()
return
# Parse results
results = [r for r in results if r.get("route_code") in self.route_id]
current_time = dt_util.utcnow()
for result in results:
if (btime2 := result.get("btime2")) is not None:
arrival_min = int(btime2)
timestamp = current_time + timedelta(minutes=arrival_min)
arrival_data = {
ATTR_NEXT_ARRIVAL: timestamp,
ATTR_ROUTE_ID: self.route_id,
}
self.info.append(arrival_data)
if not self.info:
_LOGGER.debug("No arrivals with given parameters")
self.info = self.empty_result()
return
# Sort the data by time
sort = sorted(self.info, key=itemgetter(ATTR_NEXT_ARRIVAL))
self.info = sort
|
the-stack_0_13012 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import logging
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import apex # pylint: disable=import-error
from apex.parallel import DistributedDataParallel # pylint: disable=import-error
from .mutator import RegularizedDartsMutator, RegularizedMutatorParallel, DartsDiscreteMutator # pylint: disable=wrong-import-order
from nni.nas.pytorch.utils import AverageMeterGroup # pylint: disable=wrong-import-order
from .utils import CyclicIterator, TorchTensorEncoder, accuracy, reduce_metrics
PHASE_SMALL = "small"
PHASE_LARGE = "large"
class InteractiveKLLoss(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
# self.kl_loss = nn.KLDivLoss(reduction = 'batchmean')
self.kl_loss = nn.KLDivLoss()
def forward(self, student, teacher):
return self.kl_loss(F.log_softmax(student / self.temperature, dim=1),
F.softmax(teacher / self.temperature, dim=1))
class CdartsTrainer(object):
"""
CDARTS trainer.
Parameters
----------
model_small : nn.Module
PyTorch model to be trained. This is the search network of CDARTS.
model_large : nn.Module
PyTorch model to be trained. This is the evaluation network of CDARTS.
criterion : callable
Receives logits and ground truth label, return a loss tensor, e.g., ``nn.CrossEntropyLoss()``.
loaders : list of torch.utils.data.DataLoader
List of train data and valid data loaders, for training weights and architecture weights respectively.
samplers : list of torch.utils.data.Sampler
List of train data and valid data samplers. This can be PyTorch standard samplers if not distributed.
In distributed mode, sampler needs to have ``set_epoch`` method. Refer to data utils in CDARTS example for details.
logger : logging.Logger
The logger for logging. Will use nni logger by default (if logger is ``None``).
regular_coeff : float
The coefficient of regular loss.
regular_ratio : float
The ratio of regular loss.
warmup_epochs : int
The epochs to warmup the search network
fix_head : bool
``True`` if fixing the paramters of auxiliary heads, else unfix the paramters of auxiliary heads.
epochs : int
Number of epochs planned for training.
steps_per_epoch : int
Steps of one epoch.
loss_alpha : float
The loss coefficient.
loss_T : float
The loss coefficient.
distributed : bool
``True`` if using distributed training, else non-distributed training.
log_frequency : int
Step count per logging.
grad_clip : float
Gradient clipping for weights.
interactive_type : string
``kl`` or ``smoothl1``.
output_path : string
Log storage path.
w_lr : float
Learning rate of the search network parameters.
w_momentum : float
Momentum of the search and the evaluation network.
w_weight_decay : float
The weight decay the search and the evaluation network parameters.
alpha_lr : float
Learning rate of the architecture parameters.
alpha_weight_decay : float
The weight decay the architecture parameters.
nasnet_lr : float
Learning rate of the evaluation network parameters.
local_rank : int
The number of thread.
share_module : bool
``True`` if sharing the stem and auxiliary heads, else not sharing these modules.
"""
def __init__(self, model_small, model_large, criterion, loaders, samplers, logger=None,
regular_coeff=5, regular_ratio=0.2, warmup_epochs=2, fix_head=True,
epochs=32, steps_per_epoch=None, loss_alpha=2, loss_T=2, distributed=True,
log_frequency=10, grad_clip=5.0, interactive_type='kl', output_path='./outputs',
w_lr=0.2, w_momentum=0.9, w_weight_decay=3e-4, alpha_lr=0.2, alpha_weight_decay=1e-4,
nasnet_lr=0.2, local_rank=0, share_module=True):
if logger is None:
logger = logging.getLogger(__name__)
train_loader, valid_loader = loaders
train_sampler, valid_sampler = samplers
self.train_loader = CyclicIterator(train_loader, train_sampler, distributed)
self.valid_loader = CyclicIterator(valid_loader, valid_sampler, distributed)
self.regular_coeff = regular_coeff
self.regular_ratio = regular_ratio
self.warmup_epochs = warmup_epochs
self.fix_head = fix_head
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
if self.steps_per_epoch is None:
self.steps_per_epoch = min(len(self.train_loader), len(self.valid_loader))
self.loss_alpha = loss_alpha
self.grad_clip = grad_clip
if interactive_type == "kl":
self.interactive_loss = InteractiveKLLoss(loss_T)
elif interactive_type == "smoothl1":
self.interactive_loss = nn.SmoothL1Loss()
self.loss_T = loss_T
self.distributed = distributed
self.log_frequency = log_frequency
self.main_proc = not distributed or local_rank == 0
self.logger = logger
self.checkpoint_dir = output_path
if self.main_proc:
os.makedirs(self.checkpoint_dir, exist_ok=True)
if distributed:
torch.distributed.barrier()
self.model_small = model_small
self.model_large = model_large
if self.fix_head:
for param in self.model_small.aux_head.parameters():
param.requires_grad = False
for param in self.model_large.aux_head.parameters():
param.requires_grad = False
self.mutator_small = RegularizedDartsMutator(self.model_small).cuda()
self.mutator_large = DartsDiscreteMutator(self.model_large, self.mutator_small).cuda()
self.criterion = criterion
self.optimizer_small = torch.optim.SGD(self.model_small.parameters(), w_lr,
momentum=w_momentum, weight_decay=w_weight_decay)
self.optimizer_large = torch.optim.SGD(self.model_large.parameters(), nasnet_lr,
momentum=w_momentum, weight_decay=w_weight_decay)
self.optimizer_alpha = torch.optim.Adam(self.mutator_small.parameters(), alpha_lr,
betas=(0.5, 0.999), weight_decay=alpha_weight_decay)
if distributed:
apex.parallel.convert_syncbn_model(self.model_small)
apex.parallel.convert_syncbn_model(self.model_large)
self.model_small = DistributedDataParallel(self.model_small, delay_allreduce=True)
self.model_large = DistributedDataParallel(self.model_large, delay_allreduce=True)
self.mutator_small = RegularizedMutatorParallel(self.mutator_small, delay_allreduce=True)
if share_module:
self.model_small.callback_queued = True
self.model_large.callback_queued = True
# mutator large never gets optimized, so do not need parallelized
def _warmup(self, phase, epoch):
assert phase in [PHASE_SMALL, PHASE_LARGE]
if phase == PHASE_SMALL:
model, optimizer = self.model_small, self.optimizer_small
elif phase == PHASE_LARGE:
model, optimizer = self.model_large, self.optimizer_large
model.train()
meters = AverageMeterGroup()
for step in range(self.steps_per_epoch):
x, y = next(self.train_loader)
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
logits_main, _ = model(x)
loss = self.criterion(logits_main, y)
loss.backward()
self._clip_grad_norm(model)
optimizer.step()
prec1, prec5 = accuracy(logits_main, y, topk=(1, 5))
metrics = {"prec1": prec1, "prec5": prec5, "loss": loss}
metrics = reduce_metrics(metrics, self.distributed)
meters.update(metrics)
if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch):
self.logger.info("Epoch [%d/%d] Step [%d/%d] (%s) %s", epoch + 1, self.epochs,
step + 1, self.steps_per_epoch, phase, meters)
def _clip_grad_norm(self, model):
if isinstance(model, DistributedDataParallel):
nn.utils.clip_grad_norm_(model.module.parameters(), self.grad_clip)
else:
nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
def _reset_nan(self, parameters):
with torch.no_grad():
for param in parameters:
for i, p in enumerate(param):
if p != p: # equivalent to `isnan(p)`
param[i] = float("-inf")
def _joint_train(self, epoch):
self.model_large.train()
self.model_small.train()
meters = AverageMeterGroup()
for step in range(self.steps_per_epoch):
trn_x, trn_y = next(self.train_loader)
val_x, val_y = next(self.valid_loader)
trn_x, trn_y = trn_x.cuda(), trn_y.cuda()
val_x, val_y = val_x.cuda(), val_y.cuda()
# step 1. optimize architecture
self.optimizer_alpha.zero_grad()
self.optimizer_large.zero_grad()
reg_decay = max(self.regular_coeff * (1 - float(epoch - self.warmup_epochs) / (
(self.epochs - self.warmup_epochs) * self.regular_ratio)), 0)
loss_regular = self.mutator_small.reset_with_loss()
if loss_regular:
loss_regular *= reg_decay
logits_search, emsemble_logits_search = self.model_small(val_x)
logits_main, emsemble_logits_main = self.model_large(val_x)
loss_cls = (self.criterion(logits_search, val_y) + self.criterion(logits_main, val_y)) / self.loss_alpha
loss_interactive = self.interactive_loss(emsemble_logits_search, emsemble_logits_main) * (self.loss_T ** 2) * self.loss_alpha
loss = loss_cls + loss_interactive + loss_regular
loss.backward()
self._clip_grad_norm(self.model_large)
self.optimizer_large.step()
self.optimizer_alpha.step()
# NOTE: need to call here `self._reset_nan(self.mutator_small.parameters())` if `cut_choices`
# step 2. optimize op weights
self.optimizer_small.zero_grad()
with torch.no_grad():
# resample architecture since parameters have been changed
self.mutator_small.reset_with_loss()
logits_search_train, _ = self.model_small(trn_x)
loss_weight = self.criterion(logits_search_train, trn_y)
loss_weight.backward()
self._clip_grad_norm(self.model_small)
self.optimizer_small.step()
metrics = {"loss_cls": loss_cls, "loss_interactive": loss_interactive,
"loss_regular": loss_regular, "loss_weight": loss_weight}
metrics = reduce_metrics(metrics, self.distributed)
meters.update(metrics)
if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch):
self.logger.info("Epoch [%d/%d] Step [%d/%d] (joint) %s", epoch + 1, self.epochs,
step + 1, self.steps_per_epoch, meters)
def train(self):
for epoch in range(self.epochs):
if epoch < self.warmup_epochs:
with torch.no_grad(): # otherwise grads will be retained on the architecture params
self.mutator_small.reset_with_loss()
self._warmup(PHASE_SMALL, epoch)
else:
with torch.no_grad():
self.mutator_large.reset()
self._warmup(PHASE_LARGE, epoch)
self._joint_train(epoch)
self.export(os.path.join(self.checkpoint_dir, "epoch_{:02d}.json".format(epoch)),
os.path.join(self.checkpoint_dir, "epoch_{:02d}.genotypes".format(epoch)))
def export(self, file, genotype_file):
if self.main_proc:
mutator_export, genotypes = self.mutator_small.export(self.logger)
with open(file, "w") as f:
json.dump(mutator_export, f, indent=2, sort_keys=True, cls=TorchTensorEncoder)
with open(genotype_file, "w") as f:
f.write(str(genotypes))
|
the-stack_0_13013 | import tensorflow as tf
from tensorflow.keras import Model
import tensorflow_addons as tfa
from tensorflow.keras.layers import Dense, Dropout, LayerNormalization, Layer
def create_padding_mask(input):
"""
Creates mask for input to Transformer based on the average of all elements = 0
:param input: input sequence
:return: mask
"""
input = tf.pad(input, paddings=[[0, 0], [1, 0], [0, 0]], constant_values=1)
input = tf.cast(tf.math.equal(tf.keras.backend.mean(input, axis=-1), 0), tf.float32)
# add extra dimensions to add the padding to the attention logits.
return input[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
class MultiHeadAttention(Layer):
"""
This is the standard multi-head attention layer
"""
def __init__(self, d_model, num_heads=8):
super(MultiHeadAttention, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
if d_model % num_heads != 0:
raise ValueError(
f'embedding dimension = {d_model} should be divisible by number of heads = {num_heads}'
)
self.depth = d_model // num_heads
self.wq = Dense(d_model)
self.wk = Dense(d_model)
self.wv = Dense(d_model)
self.dense = Dense(d_model)
def split_heads(self, x, batch_size):
x = tf.reshape(
x, (batch_size, -1, self.num_heads, self.depth)
)
return tf.transpose(x, perm=[0, 2, 1, 3])
def scaled_dot_product_attention(self, query, key, value, mask):
matmul_qk = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = matmul_qk / tf.math.sqrt(dim_key)
if mask is not None:
scaled_score += (mask * -1e9)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
def call(self, inputs, mask):
batch_size = tf.shape(inputs)[0]
query = self.wq(inputs)
key = self.wk(inputs)
value = self.wv(inputs)
query = self.split_heads(query, batch_size)
key = self.split_heads(key, batch_size)
value = self.split_heads(value, batch_size)
attention, weights = self.scaled_dot_product_attention(query, key, value, mask)
attention = tf.transpose(attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(
attention, (batch_size, -1, self.d_model)
)
output = self.dense(concat_attention)
return output, weights
class TransformerBlock(Layer):
"""
This is the standard Transformer block
"""
def __init__(self, d_model, num_heads, dff, dropout=0.1):
super(TransformerBlock, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = tf.keras.Sequential(
[Dense(dff, activation="relu"),
Dense(d_model),]
)
self.layernorm1 = LayerNormalization(epsilon=1e-6)
self.layernorm2 = LayerNormalization(epsilon=1e-6)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
def call(self, x, training, mask):
attn_output, attention_weigths = self.mha(x, mask)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
class VideoQualityTransformer(Model):
"""
Transformer for video quality assessment using the standard Transformer,
the maximum_position_encoding should cover the maximal clip number in the databases
"""
def __init__(
self,
num_layers,
d_model,
num_heads,
mlp_dim,
dropout=0.1,
maximum_position_encoding=6000
):
super(VideoQualityTransformer, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
# positional embedding is predefined with a sufficient length
self.pos_emb = self.add_weight('pos_emb', shape=(1, maximum_position_encoding, d_model))
# add video quality token
self.quality_emb = self.add_weight('quality_emb', shape=(1, 1, d_model))
# normal Transformer architecture
self.feature_proj = Dense(d_model)
self.dropout = Dropout(dropout)
self.enc_layers = [
TransformerBlock(d_model, num_heads, mlp_dim, dropout)
for _ in range(num_layers)
]
# MLP head
self.mlp_head = tf.keras.Sequential(
[
Dense(mlp_dim, activation=tfa.activations.gelu),
Dropout(dropout),
Dense(1),
]
)
def call(self, x, training):
batch_size = tf.shape(x)[0]
mask = create_padding_mask(x)
frame_length = tf.shape(x)[1]
x = self.feature_proj(x)
quality_emb = tf.broadcast_to(self.quality_emb, [batch_size, 1, self.d_model])
x = tf.concat([quality_emb, x], axis=1)
# truncate the positional embedding for shorter videos
x = x + self.pos_emb[:, : frame_length + 1, :]
x = self.dropout(x, training=training)
for layer in self.enc_layers:
x = layer(x, training, mask)
# First (CLS) is used for VQA
x = self.mlp_head(x[:, 0])
return x |
the-stack_0_13016 | import simplejson
import string
import time
import traceback
import logging
import requests
ID="api" #this is our command identifier, so with conventional commands, this is the command name
permission=0 #Min permission required to run the command (needs to be 0 as our lowest command is 0)
import collections
def update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class ModDotaAPI:
def __init__(self):
self.requests_session = requests.Session()
self.requests_session.headers = {
'User-agent': 'ModDota_API/1.X (+http://github.com/SinZ163/ModDotaFAQ)'
}
self.ReadDump()
def fetch_page(self, url, timeout=10, decode_json=True):
request = self.requests_session.get(url, timeout=timeout)
if decode_json:
return request.json()
else:
return request.text
def ReadDump(self):
serverInfo = self.fetch_page("https://raw.githubusercontent.com/ModDota/API/master/_data/lua_server.json")
#serverInfo = self.fetch_page("https://raw.githubusercontent.com/SinZ163/TestTracking/master/lua_server.json")
communityInfo = self.fetch_page("https://raw.githubusercontent.com/ModDota/API/master/_data/override_lua_server.json")
self.lua_server = serverInfo.copy()
self.lua_server = update(self.lua_server, communityInfo)
#TODO: add community db here and inject into lua_server
MDAPI_logger = logging.getLogger("MDAPI_Reborn")
modDotaAPI = ModDotaAPI()
#called when the bot has loaded everything and is connected
def __initialize__(self, Startup):
pass
#the command entry point from '=api" or something
def execute(self, name, params, channel, userdata, rank):
msg = " ".join(params)
functions = []
output = channel
#TODO: add logic to figure out which dump we want
for Class, ClassInfo in modDotaAPI.lua_server.iteritems():
for FunctionName, FunctionInfo in ClassInfo["functions"].iteritems():
#print(FunctionName)
if msg.lower() in FunctionName.lower():
MDAPI_logger.info("Found a method, "+FunctionName)
functions.append((Class, FunctionName))
if len(functions) == 0:
self.sendMessage(channel, "No results found.")
if len(functions) > 5:
#pm it
if name == "DB" or len(functions) > 20:
self.sendMessage(channel, "Too many functions matched ("+str(len(functions))+"). Please refine your search.")
return
else:
output = name
self.sendMessage(channel, "Too many functions matched ("+str(len(functions))+"). replying privately.")
colBold = chr(2)
colItalics = chr(29)
colGreen = chr(3)+"03"
colBlue = chr(3)+"02"
colBrown = chr(3)+"07"
colEnd = chr(3)
for function in functions:
className = function[0]
functionName = function[1]
functionInfo = modDotaAPI.lua_server[className]["functions"][functionName]
argInfo = ""
description = ""
if "args" in functionInfo:
if len(functionInfo["args"]) > 0:
#We have argument info
for index, arg in enumerate(functionInfo["args"]):
if index > 0:
argInfo = argInfo + ", "
if "arg_names" in functionInfo:
if len(functionInfo["arg_names"]) > 0:
#we have argument info with named variables
argInfo = argInfo + u"{nullable}{colBrown}{argType}{colBrown}{nullable} {colBlue}{argName}{colEnd}".format(
colBrown = colBrown,
colBlue = colBlue,
colEnd = colEnd,
argType = arg,
argName = functionInfo["arg_names"][index],
nullable = colItalics if "?" in arg else ""
)
continue
argInfo = argInfo + u"{nullable}{colBrown}{argType}{colEnd}{nullable}".format(
colBrown = colBrown,
colEnd = colEnd,
argType = arg,
nullable = colItalics if "?" in arg else ""
)
if argInfo != "":
argInfo = " " + argInfo + " "
if "description" in functionInfo:
description = "{colGreen} -- {description}{colEnd}".format(
description = functionInfo["description"],
colGreen = colGreen,
colEnd = colEnd
)
#self.sendMessage(output, "["+method[0]+"] "+modDotaAPI.db[method[0]]["methods"][method[1]]["return"] + " " + method[1] + colBold+"(" + colBold + msg + colBold+")" + colBold + comment)
self.sendMessage(output, "[{colBlue}{className}{colEnd}] {colBrown}{returnType}{colEnd} {name}{bold}({bold}{argInfo}{bold}){bold} {description}".format(
bold = colBold,
italic = colItalics,
colBlue = colBlue,
colBrown = colBrown,
colEnd = colEnd,
className = className,
name = functionName,
returnType = functionInfo["return"],
argInfo = argInfo,
description = description
))
|
the-stack_0_13018 | # -*- coding: utf-8 -*-
#!/usr/bin/python
import os
import sys
import json
import argparse
import re
import requests
import codecs
from configparser import ConfigParser
from distutils.version import LooseVersion
# Hackety Hack. Puc mantenir el prestapyt com a submodul i buscar la lib dins d'aquest.
# git submodule add https://github.com/prestapyt/prestapyt.git
# El paquet disponible a pip no es prou nou per prestashop 1.7
sys.path.insert(1, 'prestapyt/')
from prestapyt import PrestaShopWebServiceDict
def get_fb_catalog(ps, f, c):
plist = ps.get('products',options={'filter[active]': '1'})
lang_id = c.get('ps','lang_id')
base_url = c.get('ps', 'base_url')
print("PROCESSING: {}".format(len(plist['products']['product'])))
# field header
f.write(u'id\ttitle\tdescription\tlink\timage_link\tavailability\tprice\tcurrency\tgoogle_product_category\tbrand\tage_group\tgender\tcondition\n')
for product in plist['products']['product']:
prod = ps.get('products/'+product['attrs']['id'])
if prod['product']['active'] == '0':
print("Product not active: "+product['attrs']['id'])
continue
# id - prod['product']['reference']
id = prod['product']['reference']
# title - for name in prod['product']['name']['language']: name['value'] if lang == 'ES' else next
if isinstance(prod['product']['name']['language'], list):
for name in prod['product']['name']['language']:
if name['attrs']['id'] == lang_id:
title = name['value']
else:
title = prod['product']['name']['language']['value']
# description - for desc prod['product']['description_short']['language']: desc['value'] if lang == 'ES' else next
if isinstance(prod['product']['description_short']['language'], list):
for name in prod['product']['description_short']['language']:
if name['attrs']['id'] == lang_id:
description = re.sub('<[^>]+?>', '', name['value'])
else:
description = re.sub('<[^>]+?>', '', prod['product']['description_short']['language']['value'])
# link -
if isinstance(prod['product']['link_rewrite']['language'], list):
for ln in prod['product']['link_rewrite']['language']:
if ln['attrs']['id'] == lang_id:
link = "{0}/{1}-{2}.html".format(base_url, product['attrs']['id'], ln['value'])
else:
link = "{0}/{1}-{2}.html".format(base_url, product['attrs']['id'], prod['product']['link_rewrite']['language']['value'])
# image_link
r = requests.get("{0}/get-image.php?imageid={1}".format(base_url, prod['product']['id_default_image']['value']))
image_link = r.text
# availability -
# TODO: stocks available quan hi ha més d'una combinació
# si stock_available es una llista vol dir que hi ha més d'una combinació.
# de moment assumim stock = len de la llista
if isinstance(prod['product']['associations']['stock_availables']['stock_available'], list):
stocks_avail['stock_available']['quantity'] = str(len(prod['product']['associations']['stock_availables']['stock_available']))
else:
stocks_avail = ps.get('stock_availables/'+prod['product']['associations']['stock_availables']['stock_available']['id'])
print("ID: "+id+" Quantity: "+stocks_avail['stock_available']['quantity'])
if int(stocks_avail['stock_available']['quantity']) > 0:
print("in stock")
#if lang_id == '1':
avail = 'in stock'
#else:
# avail = 'disponible'
else:
print("out of stock")
#if lang_id == '1':
avail = 'out of stock'
#else:
# avail = 'agotado'
# price
price = "{:.2f}".format(float(prod['product']['price'])*1.21)
currency = "EUR"
# google_product_category
catemap = dict(c.items('catemap'))
try:
gpc = catemap[ prod['product']['id_category_default'] ]
except KeyError:
print("Key ERROR - Product ID: {0} Category ID: {1}".format(prod['product']['id'], prod['product']['id_category_default']))
quit()
# brand - from config
brand = c.get('general', 'brand')
# age_group - adult
age_group = 'adult'
# TODO: color
#color = ''
# gender - female
gender = 'female'
# TODO: shipping
# condition - new
condition = 'new'
# with shipping info
#print("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\t{11}\t{12}\t{13}".format(id, title, description, link, image_link, avail, price, gpc, brand, age_group, color, gender, shipping, condition))
# without shipping info, color
f.write(u'{0}\t"{1}"\t"{2}"\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\t{11}\t{12}\n'.format(id, title, description, link, image_link, avail, price, currency, gpc, brand, age_group, gender, condition))
return
if __name__ == '__main__':
try:
basedir = sys.argv[1]
except IndexError:
basedir = '.'
config = ConfigParser()
config.read(basedir+'/config.ini')
file = codecs.open("{0}/{1}-{2}.tsv".format(config.get('report','folder_name'), config.get('report','file_name'), config.get('report','lang')), "w", "utf-8-sig")
ps = PrestaShopWebServiceDict(config.get('ps', 'api_url'), config.get('ps', 'token'))
get_fb_catalog(ps, file, config)
file.close() |
the-stack_0_13019 | import requests
import urllib.request
import time
import urllib
import re
import csv
import sys
from bs4 import BeautifulSoup
def uni_montreal():
url = "https://diro.umontreal.ca/english/departement-directory/professors/"
r = requests.get(url) # request to url
# getting the soup by parsing the html parsel to text to request r
soup = BeautifulSoup(r.text, "html5lib")
# print(soup.prettify)
# file initialization to write
file_name = sys.argv[0]
# file_name = file_name[4:]
txt_file = file_name.replace(".py", ".txt")
f = open(txt_file, "w")
csv_file = file_name.replace(".py", ".csv")
f2 = open(csv_file, "w")
csvwriter = csv.writer(f2)
overall_file = "all_emails.csv"
f3 = open(overall_file, "a")
csvwriter2 = csv.writer(f3)
u_name = "University of Montreal"
country = "Canada"
grabage_emails = []
var = [f, csvwriter, csvwriter2, u_name, country, grabage_emails]
# d gives the array of all profs on the dept homepage
dd = soup.find('div', {'class':'list_individus'})
d = dd.find_all('div', {'class':'individu with-affiliations with-expertises'})
#iterating for every prof
for i in d:
h4 = i.find('h4', {'class':"nom-prenom"})
a = h4.find('a')
if a == None:
continue
link = "https://diro.umontreal.ca"+a.get('href')
name = (a.get_text()).strip()
name = " ".join(name.split())
# print(name, link)
# check if link is valid on Not
try:
prof_resp = requests.get(link)
except:
continue
div_mail = i.find('div', {'class':'courriel'})
a_mail = div_mail.find('a')
if a_mail != None:
email = a_mail.get_text()
else:
email = "Not Found"
print(name, link)
filterandgetEmail(var, grabage_emails, name, link, email, prof_resp)
f.close()
f2.close()
f3.close()
print("Finished")
def filterandgetEmail(var, grabage_emails, name, link, email, prof_resp):
f = var[0]
csvwriter = var[1]
csvwriter2 = var[2]
u_name = var[3]
country = var[4]
keyword_list = ['Computer Architecture','hardware and system architecture', 'hardware and architecture', 'embedded system', 'computer organization','VLSI Design', 'Computer and System',
'multiprocessor architecture']
flag = 1
prof_soup = BeautifulSoup(prof_resp.text, "html.parser")
research_text = prof_soup.text
for pattern in keyword_list:
if re.search(pattern, research_text, re.IGNORECASE):
flag = 0
if email != 'Not Found':
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
new_emails = set(re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}", prof_resp.text))
for eemail in grabage_emails:
if eemail in new_emails:
new_emails.remove(eemail)
if len(new_emails) == 0:
email = "Email Not Found"
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
# f.write(link + '\n' + name)
for email in new_emails:
f.write(link + '\n' + name + '\t\t' + email + '\n')
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
# f.write("\n")
f.write(pattern)
f.write('\n\n')
break
if __name__ == '__main__':
uni_montreal()
|
the-stack_0_13020 | # Friends again #
# March 15, 2019
# By Robin Nash
import sys
def getCircle(friend, pairs, circle):
circle.append(pairs[circle[-1]])
last = circle[-1]
if last == circle[0]:
return circle[:-1]
if last in circle[:-1]:
return circle[circle.index(last):-1]
return getCircle(friend, pairs, circle)
def getDistance(a,b,circle):
ai = circle.index(a)
bi = circle.index(b)
d1 = bi-ai-1
d2 = len(circle) - bi + ai -1
return min([d1, d2])
data = ['9', '2 3', '1 2', '3 1', '10 11', '100 10', '11 100', '12 100', '13 14','14 100', '1 100', '2 3', '12 14']
##data = sys.stdin.read().strip().split('\n')[:-1]
friendsNum = int(data.pop(0))
pairs = {f:ff for f,ff in [pair.split() for pair in data[:friendsNum]]}
checkPairs = [pair.split() for pair in data[friendsNum:]]
data.clear()
circles = []
append = circles.append
for f,ff in pairs.items():
if circles == [] or True not in [f in c and ff in c for c in circles]:
circle = getCircle(f,pairs,[f])
append(circle)
##sample = [(1,2,[2,3,4,1]), (1,2,[4,5,1,0,0,0,2])]
##for a,b,circle in sample:
## a,b = sorted([circle.index(a),circle.index(b)])
## a,b = circle[a],circle[b]
## print(getDistance(a,b,circle))
for a,b in checkPairs:
try:
circle = [c for c in circles if a and b in circle][0]
distance = getDistance(a,b,circle)
print('Yes',distance)
except ValueError:
print('No')
except IndexError:
print("No")
print(circles)
#1552666028.0 |
the-stack_0_13021 | from rest_framework.permissions import BasePermission
from environments.models import Environment
from environments.permissions.constants import UPDATE_FEATURE_STATE
from projects.models import Project
ACTION_PERMISSIONS_MAP = {
"retrieve": "VIEW_PROJECT",
"destroy": "DELETE_FEATURE",
"list": "VIEW_PROJECT",
"create": "CREATE_FEATURE",
"add_owners": "CREATE_FEATURE",
"remove_owners": "CREATE_FEATURE",
"update": "CREATE_FEATURE",
"partial_update": "CREATE_FEATURE",
}
class FeaturePermissions(BasePermission):
def has_permission(self, request, view):
try:
project_id = view.kwargs.get("project_pk") or request.data.get("project")
project = Project.objects.get(id=project_id)
if view.action in ACTION_PERMISSIONS_MAP:
return request.user.has_project_permission(
ACTION_PERMISSIONS_MAP.get(view.action), project
)
# move on to object specific permissions
return view.detail
except Project.DoesNotExist:
return False
def has_object_permission(self, request, view, obj):
# map of actions and their required permission
if view.action in ACTION_PERMISSIONS_MAP:
return request.user.has_project_permission(
ACTION_PERMISSIONS_MAP[view.action], obj.project
)
if view.action == "segments":
return request.user.is_project_admin(obj.project)
return False
class FeatureStatePermissions(BasePermission):
def has_permission(self, request, view):
try:
if view.action == "create" and request.data.get("environment"):
environment = Environment.objects.get(id=request.data["environment"])
return request.user.has_environment_permission(
UPDATE_FEATURE_STATE, environment
)
# - detail view means we can just defer to object permissions
# - list view means we just need to filter the objects based on permissions
return view.detail or view.action == "list"
except Environment.DoesNotExist:
return False
def has_object_permission(self, request, view, obj):
return request.user.has_environment_permission(
UPDATE_FEATURE_STATE, environment=obj.environment
)
class EnvironmentFeatureStatePermissions(BasePermission):
def has_permission(self, request, view):
if view.action == "create":
environment_api_key = view.kwargs.get("environment_api_key")
if not environment_api_key:
return False
environment = Environment.objects.get(api_key=environment_api_key)
return request.user.has_environment_permission(
permission=UPDATE_FEATURE_STATE, environment=environment
)
if view.action == "list":
return True
# move on to object specific permissions
return view.detail
def has_object_permission(self, request, view, obj):
return request.user.has_environment_permission(
permission=UPDATE_FEATURE_STATE, environment=obj.environment
)
class IdentityFeatureStatePermissions(EnvironmentFeatureStatePermissions):
pass
|
the-stack_0_13023 | import unittest
import sys
sys.path.append("../src/")
from merge_sort_without_sentinel import merge_sort
class TestMergeSortWithoutSentinel(unittest.TestCase):
def test_merge_sort_already_sorted(self):
A = [1, 2, 3, 4, 5, 6]
merge_sort(A)
self.assertEqual(A, [1, 2, 3, 4, 5, 6])
def test_merge_sort(self):
A = [5, 2, 4, 6, 1, 3]
merge_sort(A)
self.assertEqual(A, [1, 2, 3, 4, 5, 6])
if __name__ == "__main__":
unittest.main()
|
the-stack_0_13024 | # -*- coding: utf-8 -*-
import sys
import warnings
from pathlib import Path
PROJECT_DIR = Path(__file__).resolve().parent
if str(PROJECT_DIR.parent) not in sys.path:
sys.path.insert(0, str(PROJECT_DIR.parent))
warnings.filterwarnings(
"ignore", category=FutureWarning, module="sklearn.utils.deprecation"
)
from common import *
warnings.filterwarnings(
"always", category=FutureWarning, module="sklearn.utils.deprecation"
)
figure_saver = PaperFigureSaver(
directories=Path("~") / "tmp" / PROJECT_DIR.parent.name / PROJECT_DIR.name,
debug=True,
)
map_figure_saver = figure_saver(**map_figure_saver_kwargs)
for fig_saver in (figure_saver, map_figure_saver):
fig_saver.experiment = PROJECT_DIR.name
memory = get_memory("__".join((PROJECT_DIR.parent.name, PROJECT_DIR.name)), verbose=100)
CACHE_DIR = Path(DATA_DIR) / ".pickle" / PROJECT_DIR.parent.name / PROJECT_DIR.name
data_split_cache = SimpleCache("data_split", cache_dir=CACHE_DIR)
save_ale_2d_and_get_importance = partial(
save_ale_2d_and_get_importance, figure_saver=figure_saver
)
save_pdp_plot_2d = partial(save_pdp_plot_2d, figure_saver=figure_saver)
save_ale_plot_1d_with_ptp = partial(
save_ale_plot_1d_with_ptp, figure_saver=figure_saver
)
save_pdp_plot_1d = partial(
save_pdp_plot_1d, CACHE_DIR=CACHE_DIR, figure_saver=figure_saver
)
multi_ale_plot_1d = partial(multi_ale_plot_1d, figure_saver=figure_saver)
# Number of SHAP jobs.
try:
X_train, X_test, y_train, y_test = data_split_cache.load()
# Maximum job array index (inclusive).
shap_params["max_index"] = math.floor(X_train.shape[0] / shap_params["job_samples"])
# Upper bound only.
shap_params["total_samples"] = (shap_params["max_index"] + 1) * shap_params[
"job_samples"
]
except NoCachedDataError:
warnings.warn(
"Processed data not found, not calculating 'max_index' or 'total_samples'."
)
# Upper bound only.
shap_interact_params["total_samples"] = (
shap_interact_params["max_index"] + 1
) * shap_interact_params["job_samples"]
# SHAP cache.
shap_cache = SimpleCache("shap_cache", cache_dir=CACHE_DIR / Path("shap"))
shap_interact_cache = SimpleCache(
"shap_interact_cache", cache_dir=CACHE_DIR / Path("shap_interaction")
)
interact_data_cache = SimpleCache("SHAP_interact_data", cache_dir=CACHE_DIR)
# Redefine the common functionality for our use-case - no shifted variables.
_common_get_data = get_data
_common_get_offset_data = get_offset_data
selected_features = (
"Dry Day Period",
"FAPAR 50P 4k",
"Max Temp",
"VOD Ku-band 50P 4k -3 Month",
"LAI 50P 4k -1 Month",
"Dry Day Period -1 Month",
"Dry Day Period -3 Month",
"SIF 50P 4k",
"LAI 50P 4k -3 Month",
"VOD Ku-band 50P 4k -1 Month",
"VOD Ku-band 50P 4k",
"FAPAR 50P 4k -1 Month",
"pftCrop",
"SIF 50P 4k -9 Month",
"popd",
)
@wraps(_common_get_data)
def get_data(*args, **kwargs):
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = _common_get_data(*args, **kwargs)
# We need to subset exog_data, filled_datasets, and masked_datasets.
exog_data = exog_data[list(selected_features)]
filled_datasets = filled_datasets.select_variables(selected_features)
masked_datasets = masked_datasets.select_variables(selected_features)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
@wraps(_common_get_offset_data)
def get_offset_data(*args, **kwargs):
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = _common_get_offset_data(*args, **kwargs)
# We need to subset exog_data, filled_datasets, and masked_datasets.
exog_data = exog_data[list(selected_features)]
filled_datasets = filled_datasets.select_variables(selected_features)
masked_datasets = masked_datasets.select_variables(selected_features)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
def get_model(X_train=None, y_train=None):
return common_get_model(cache_dir=CACHE_DIR, X_train=X_train, y_train=y_train)
model_score_cache = SimpleCache("model_scores", cache_dir=CACHE_DIR)
@model_score_cache
def get_model_scores(rf=None, X_test=None, X_train=None, y_test=None, y_train=None):
return common_get_model_scores(rf, X_test, X_train, y_test, y_train)
|
the-stack_0_13025 | import sys
import pytest
from dagster import file_relative_path, lambda_solid, pipeline, repository
from dagster.core.definitions.repository_definition import RepositoryData
from dagster.core.test_utils import instance_for_test
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.core.workspace import WorkspaceProcessContext
from dagster.core.workspace.load_target import GrpcServerTarget
from dagster.grpc.server import GrpcServerProcess
def define_do_something(num_calls):
@lambda_solid(name="do_something_" + str(num_calls))
def do_something():
return num_calls
return do_something
@lambda_solid
def do_input(x):
return x
def define_foo_pipeline(num_calls):
do_something = define_do_something(num_calls)
@pipeline(name="foo_" + str(num_calls))
def foo_pipeline():
do_input(do_something())
return foo_pipeline
class TestDynamicRepositoryData(RepositoryData):
def __init__(self):
self._num_calls = 0
# List of pipelines changes everytime get_all_pipelines is called
def get_all_pipelines(self):
self._num_calls = self._num_calls + 1
return [define_foo_pipeline(self._num_calls)]
@repository
def bar_repo():
return TestDynamicRepositoryData()
@pytest.fixture(name="instance")
def instance_fixture():
with instance_for_test() as instance:
yield instance
@pytest.fixture(name="workspace_process_context")
def workspace_process_context_fixture(instance):
loadable_target_origin = LoadableTargetOrigin(
executable_path=sys.executable,
python_file=file_relative_path(__file__, "test_custom_repository_data.py"),
)
server_process = GrpcServerProcess(loadable_target_origin=loadable_target_origin)
try:
with server_process.create_ephemeral_client(): # shuts down when leaves this context
with WorkspaceProcessContext(
instance,
GrpcServerTarget(
host="localhost",
socket=server_process.socket,
port=server_process.port,
location_name="test",
),
) as workspace_process_context:
yield workspace_process_context
finally:
server_process.wait()
def test_repository_data_can_reload_without_restarting(workspace_process_context):
request_context = workspace_process_context.create_request_context()
repo_location = request_context.get_repository_location("test")
repo = repo_location.get_repository("bar_repo")
# get_all_pipelines called on server init twice, then on repository load, so starts at 3
# this is a janky test
assert repo.has_pipeline("foo_3")
assert not repo.has_pipeline("foo_1")
assert not repo.has_pipeline("foo_2")
external_pipeline = repo.get_full_external_pipeline("foo_3")
assert external_pipeline.has_solid_invocation("do_something_3")
# Reloading the location changes the pipeline without needing
# to restart the server process
workspace_process_context.reload_repository_location("test")
request_context = workspace_process_context.create_request_context()
repo_location = request_context.get_repository_location("test")
repo = repo_location.get_repository("bar_repo")
assert repo.has_pipeline("foo_4")
assert not repo.has_pipeline("foo_3")
external_pipeline = repo.get_full_external_pipeline("foo_4")
assert external_pipeline.has_solid_invocation("do_something_4")
def test_custom_repo_select_only_job():
assert not bar_repo.get_all_jobs()
|
the-stack_0_13026 | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.networks.q_rnn_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.networks import expand_dims_layer
from tf_agents.networks import q_rnn_network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
class QRnnNetworkTest(tf.test.TestCase):
def test_network_builds(self):
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
rnn_network = q_rnn_network.QRnnNetwork(tf_env.observation_spec(),
tf_env.action_spec())
first_time_step = tf_env.current_time_step()
q_values, state = rnn_network(
first_time_step.observation, first_time_step.step_type,
network_state=rnn_network.get_initial_state(batch_size=1)
)
self.assertEqual((1, 2), q_values.shape)
self.assertEqual((1, 40), state[0].shape)
self.assertEqual((1, 40), state[1].shape)
def test_network_can_preprocess_and_combine(self):
batch_size = 3
frames = 5
num_actions = 2
lstm_size = 6
states = (tf.random.uniform([batch_size, frames, 1]),
tf.random.uniform([batch_size, frames]))
preprocessing_layers = (
tf.keras.layers.Dense(4),
tf.keras.Sequential([
expand_dims_layer.ExpandDims(-1), # Convert to vec size (1,).
tf.keras.layers.Dense(4)]))
network = q_rnn_network.QRnnNetwork(
input_tensor_spec=(
tensor_spec.TensorSpec([1], tf.float32),
tensor_spec.TensorSpec([], tf.float32)),
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=tf.keras.layers.Add(),
lstm_size=(lstm_size,),
action_spec=tensor_spec.BoundedTensorSpec(
[1], tf.int32, 0, num_actions - 1))
empty_step_type = tf.constant(
[[time_step.StepType.FIRST] * frames] * batch_size)
q_values, _ = network(states, empty_step_type,
network_state=network.get_initial_state(batch_size))
self.assertAllEqual(
q_values.shape.as_list(), [batch_size, frames, num_actions])
# At least 2 variables each for the preprocessing layers.
self.assertGreater(len(network.trainable_variables), 4)
def test_network_can_preprocess_and_combine_no_time_dim(self):
batch_size = 3
num_actions = 2
lstm_size = 5
states = (tf.random.uniform([batch_size, 1]),
tf.random.uniform([batch_size]))
preprocessing_layers = (
tf.keras.layers.Dense(4),
tf.keras.Sequential([
expand_dims_layer.ExpandDims(-1), # Convert to vec size (1,).
tf.keras.layers.Dense(4)]))
network = q_rnn_network.QRnnNetwork(
input_tensor_spec=(
tensor_spec.TensorSpec([1], tf.float32),
tensor_spec.TensorSpec([], tf.float32)),
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=tf.keras.layers.Add(),
lstm_size=(lstm_size,),
action_spec=tensor_spec.BoundedTensorSpec(
[1], tf.int32, 0, num_actions - 1))
empty_step_type = tf.constant([time_step.StepType.FIRST] * batch_size)
q_values, _ = network(
states, empty_step_type,
network_state=network.get_initial_state(batch_size=batch_size))
# Processed 1 time step and the time axis was squeezed back.
self.assertAllEqual(
q_values.shape.as_list(), [batch_size, num_actions])
# At least 2 variables each for the preprocessing layers.
self.assertGreater(len(network.trainable_variables), 4)
def test_network_builds_stacked_cells(self):
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
rnn_network = q_rnn_network.QRnnNetwork(
tf_env.observation_spec(), tf_env.action_spec(), lstm_size=(10, 5))
first_time_step = tf_env.current_time_step()
q_values, state = rnn_network(
first_time_step.observation, first_time_step.step_type,
network_state=rnn_network.get_initial_state(batch_size=1)
)
tf.nest.assert_same_structure(rnn_network.state_spec, state)
self.assertEqual(2, len(state))
self.assertEqual((1, 2), q_values.shape)
self.assertEqual((1, 10), state[0][0].shape)
self.assertEqual((1, 10), state[0][1].shape)
self.assertEqual((1, 5), state[1][0].shape)
self.assertEqual((1, 5), state[1][1].shape)
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_13028 | """
Simple iOS tests, showing accessing elements and getting/setting text from them.
"""
import unittest
import os
from random import randint
from appium import webdriver
from time import sleep
class SimpleIOSTests(unittest.TestCase):
def setUp(self):
# set up appium
app = os.path.abspath('../../apps/TestApp/build/release-iphonesimulator/TestApp-iphonesimulator.app')
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'app': app,
'platformName': 'iOS',
'platformVersion': '11.1',
'deviceName': 'iPhone 6'
})
def tearDown(self):
self.driver.quit()
def _populate(self):
# populate text fields with two random numbers
els = [self.driver.find_element_by_accessibility_id('TextField1'),
self.driver.find_element_by_accessibility_id('TextField2')]
self._sum = 0
for i in range(2):
rnd = randint(0, 10)
els[i].send_keys(rnd)
self._sum += rnd
def test_ui_computation(self):
# populate text fields with values
self._populate()
# trigger computation by using the button
self.driver.find_element_by_accessibility_id('ComputeSumButton').click()
# is sum equal ?
# sauce does not handle class name, so get fourth element
sum = self.driver.find_element_by_accessibility_id('Answer').text
self.assertEqual(int(sum), self._sum)
def test_scroll(self):
els = self.driver.find_elements_by_class_name('XCUIElementTypeButton')
els[5].click()
sleep(1)
try:
el = self.driver.find_element_by_accessibility_id('Allow')
el.click()
sleep(1)
except:
pass
el = self.driver.find_element_by_xpath('//XCUIElementTypeMap[1]')
location = el.location
self.driver.swipe(start_x=location['x'], start_y=location['y'], end_x=0.5, end_y=location['y'], duration=800)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SimpleIOSTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
the-stack_0_13029 | # Copyright (c) 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import constants
from neutron.db import ipam_backend_mixin
from neutron.tests import base
class TestIpamBackendMixin(base.BaseTestCase):
def setUp(self):
super(TestIpamBackendMixin, self).setUp()
self.mixin = ipam_backend_mixin.IpamBackendMixin()
self.ctx = mock.Mock()
self.default_new_ips = (('id-1', '192.168.1.1'),
('id-2', '192.168.1.2'))
self.default_original_ips = (('id-1', '192.168.1.1'),
('id-5', '172.20.16.5'))
self.owner_non_router = constants.DEVICE_OWNER_DHCP
self.owner_router = constants.DEVICE_OWNER_ROUTER_INTF
def _prepare_ips(self, ips):
return [{'ip_address': ip[1],
'subnet_id': ip[0]} for ip in ips]
def _mock_slaac_subnet_on(self):
slaac_subnet = {'ipv6_address_mode': constants.IPV6_SLAAC,
'ipv6_ra_mode': constants.IPV6_SLAAC}
self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet)
def _mock_slaac_subnet_off(self):
non_slaac_subnet = {'ipv6_address_mode': None,
'ipv6_ra_mode': None}
self.mixin._get_subnet = mock.Mock(return_value=non_slaac_subnet)
def _test_get_changed_ips_for_port(self, expected_change, original_ips,
new_ips, owner):
change = self.mixin._get_changed_ips_for_port(self.ctx,
original_ips,
new_ips,
owner)
self.assertEqual(expected_change, change)
def test__get_changed_ips_for_port(self):
new_ips = self._prepare_ips(self.default_new_ips)
original_ips = self._prepare_ips(self.default_original_ips)
expected_change = self.mixin.Changes(add=[new_ips[1]],
original=[original_ips[0]],
remove=[original_ips[1]])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_router)
def test__get_changed_ips_for_port_autoaddress(self):
new_ips = self._prepare_ips(self.default_new_ips)
original = (('id-1', '192.168.1.1'),
('id-5', '2000:1234:5678::12FF:FE34:5678'))
original_ips = self._prepare_ips(original)
self._mock_slaac_subnet_on()
expected_change = self.mixin.Changes(add=[new_ips[1]],
original=original_ips,
remove=[])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_non_router)
def _test_get_changed_ips_for_port_no_ip_address(self):
# IP address should be added if only subnet_id is provided,
# independently from auto_address status for subnet
new_ips = [{'subnet_id': 'id-3'}]
original_ips = []
expected_change = self.mixin.Changes(add=[new_ips[0]],
original=[],
remove=[])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_non_router)
def test__get_changed_ips_for_port_no_ip_address_no_slaac(self):
self._mock_slaac_subnet_off()
self._test_get_changed_ips_for_port_no_ip_address()
def test__get_changed_ips_for_port_no_ip_address_slaac(self):
self._mock_slaac_subnet_on()
self._test_get_changed_ips_for_port_no_ip_address()
def test__is_ip_required_by_subnet_for_router_port(self):
# Owner -> router:
# _get_subnet should not be called,
# expected True
self._mock_slaac_subnet_off()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_router)
self.assertTrue(result)
self.assertFalse(self.mixin._get_subnet.called)
def test__is_ip_required_by_subnet_for_non_router_port(self):
# Owner -> not router:
# _get_subnet should be called,
# expected True, because subnet is not slaac
self._mock_slaac_subnet_off()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_non_router)
self.assertTrue(result)
self.assertTrue(self.mixin._get_subnet.called)
def test__is_ip_required_by_subnet_for_non_router_port_and_slaac(self):
# Owner -> not router:
# _get_subnet should be called,
# expected False, because subnet is slaac
self._mock_slaac_subnet_on()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_non_router)
self.assertFalse(result)
self.assertTrue(self.mixin._get_subnet.called)
|
the-stack_0_13032 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates a line item creative association for a creative
set.
To create creative sets, run create_creative_set.py. To create creatives, run
create_creatives.py. To determine which LICAs exist, run get_all_licas.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: LineItemCreativeAssociationService.createLineItemCreativeAssociations
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
CREATIVE_SET_ID = 'INSERT_CREATIVE_SET_ID_HERE'
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
def main(client, creative_set_id, line_item_id):
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v201408')
# Create LICA for a creative set.
lica = {'creativeSetId': creative_set_id, 'lineItemId': line_item_id}
# Add LICA.
lica = lica_service.createLineItemCreativeAssociations([lica])
# Display results.
print (('LICA with line item ID \'%s\' and creative set ID \'%s\' was '
'created.') % (lica['lineItemId'], lica['creativeSetId']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CREATIVE_SET_ID, LINE_ITEM_ID)
|
the-stack_0_13034 | import os
import sys
import argparse
import logging
from tqdm.notebook import tqdm
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import shutil
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import transformers
from config.configs import set_random_fixed, get_path_info
from data.dataloader import get_Finetune_dataloader_Atype, get_Finetune_dataloader_Btype
from data.tokenizer import Tokenizer
from util.utils import (load_metricfn, load_optimizer, load_scheduler, load_lossfn,
save_checkpoint, load_checkpoint,
time_measurement, count_parameters, initialize_weights)
from util.optim_scheduler import ScheduledOptim
from models.model import build_classification_model, build_regression_model
import wandb
class Finetune_Trainer():
def __init__(self, parser, task):
# set parser
self.args = parser.parse_args()
#initialize wandb
#wandb.init(name=task)
# save loss history to plot later on
self.training_history = []
self.validation_history = []
# set variables needed for training
self.n_epoch = self.args.epoch
self.train_batch_size = self.args.train_batch_size
self.display_step = self.args.display_step # training
self.val_batch_size = self.args.val_batch_size
self.test_batch_size = self.args.test_batch_size
self.display_examples = self.args.display_examples # testing
self.lr = self.args.init_lr
#self.eps = self.args.adam_eps
self.weight_decay = self.args.weight_decay
self.beta1 = self.args.adam_beta1
self.beta2 = self.args.adam_beta2
self.warmup_steps = self.args.warm_up
#self.factor = self.args.factor
#self.patience = self.args.patience
#self.clip = self.args.clip
self.language = self.args.language
self.max_len = self.args.max_len
self.vocab_size = self.args.vocab_size
self.device = self.args.device
self.pretrain_weightpath = os.path.join(os.getcwd(),'weights')
if os.path.isdir('finetune_weights'):
shutil.rmtree("finetune_weights")
self.weightpath = os.path.join(os.getcwd(),'finetune_weights')
self.final_weightpath = os.path.join(os.getcwd(),'final_finetune_weights')
self.best_pretrain_epoch = self.args.best_pretrain_epoch
# build dataloader
self.task = task
task_Atype = ['cola','sst2']
task_Btype = ['stsb','rte','mrpc','qqp','mnli']
self.task_Btype = ['stsb','rte','mrpc','qqp','mnli']
task_Btype_sentence = ['stsb','rte','mrpc']
task_Btype_question = ['qqp']
task_Btype_hypothesis = ['mnli']
if task in task_Atype:
self.train_dataloader, self.val_dataloader, self.test_dataloader = get_Finetune_dataloader_Atype(
self.train_batch_size, self.val_batch_size, self.test_batch_size,
self.language, self.max_len,
'glue', task, 'sentence', 'label',
None
)
elif task in task_Btype_sentence:
self.train_dataloader, self.val_dataloader, self.test_dataloader = get_Finetune_dataloader_Btype(
self.train_batch_size, self.val_batch_size, self.test_batch_size,
self.language, self.max_len,
self.args.dataset_name, self.args.dataset_type, 'sentence1', 'sentence2', 'label',
None
)
elif task in task_Btype_question:
self.train_dataloader, self.val_dataloader, self.test_dataloader = get_Finetune_dataloader_Btype(
self.train_batch_size, self.val_batch_size, self.test_batch_size,
self.language, self.max_len,
self.args.dataset_name, self.args.dataset_type, 'question1', 'question2', 'label',
None
)
elif task in task_Btype_hypothesis:
self.train_dataloader, self.val_dataloader, self.test_dataloader = get_Finetune_dataloader_Btype(
self.train_batch_size, self.val_batch_size, self.test_batch_size,
self.language, self.max_len,
self.args.dataset_name, self.args.dataset_type, 'premise', 'hypothesis', 'label',
None
)
else:
assert "The task you typed in is not supported!"
self.train_batch_num = len(self.train_dataloader)
self.val_batch_num = len(self.val_dataloader)
self.test_batch_num = len(self.test_dataloader)
self.num_training_steps = (self.train_batch_num) * (self.n_epoch)
self.t_total = self.train_batch_num * self.n_epoch
# load metric
if task == 'mnli':
self.metric = load_metricfn('matthews_corrcoef')
elif task == 'stsb':
self.metric = load_metricfn('pearson')
else:
self.metric = load_metricfn('accuracy_score')
# build model
if task in task_Atype:
self.model= build_classification_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device, 'one')
elif task == 'stsb':
self.model = build_regression_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device)
else:
self.model = build_classification_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device, 'two')
load_checkpoint(self.model, os.path.join(self.pretrain_weightpath,str(self.best_pretrain_epoch)+".pth"))
# build optimizer
self.optimizer = load_optimizer(self.model, self.lr, self.weight_decay,
self.beta1, self.beta2)
# build scheduler
self.optim_scheduler = ScheduledOptim(self.optimizer, self.args.model_dim, self.warmup_steps)
# build lossfn
if task=='stsb':
self.lossfn = load_lossfn('MSELoss',self.args.pad_idx) # Regression
else:
self.lossfn = load_lossfn('CrossEntropyLoss',self.args.pad_idx) # Classification
def train_test(self):
best_model_epoch, training_history, validation_history = self.finetune()
self.test(best_model_epoch)
self.plot(training_history, validation_history)
def finetune(self):
# set logging
logging.basicConfig(level=logging.WARNING)
# logging message
sys.stdout.write('#################################################\n')
sys.stdout.write('You have started training the model.\n')
print('Your model size is : ')
count_parameters(self.model)
sys.stdout.write('#################################################\n')
# set randomness of training procedure fixed
self.set_random(516)
# build directory to save to model's weights
self.build_directory()
# set initial variables for training, validation
train_batch_num = len(self.train_dataloader)
validation_batch_num = len(self.val_dataloader)
# set initial variables for model selection
best_model_epoch=0
best_model_score=0
best_model_loss =float('inf')
# save information of the procedure of training
training_history=[]
validation_history=[]
# predict when training will end based on average time
total_time_spent = 0
# start of looping through training data
for epoch_idx in range(self.n_epoch):
# measure time when epoch start
start_time = time.time()
sys.stdout.write('#################################################\n')
sys.stdout.write(f"Epoch : {epoch_idx+1} / {self.n_epoch}")
sys.stdout.write('\n')
sys.stdout.write('#################################################\n')
########################
#### Training Phase ####
########################
# switch model to train mode
self.model.train()
# set initial variables for training (inside epoch)
training_loss_per_epoch=0.0
training_acc_per_epoch = 0
# train model using batch gradient descent with Adam Optimizer
for batch_idx, batch in tqdm(enumerate(self.train_dataloader)):
# move batch of data to gpu
input_ids = batch['input_ids'] #[bs, 1, sl]
token_type_ids = batch['token_type_ids'] #[bs, 1, sl]
labels = batch['label'].to(torch.float) #[bs, 1]
# reshape input_ids and token_type_ids
if self.task in self.task_Btype:
reshaped_input_ids = input_ids.to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().cuda(reshaped_input_ids.device)
else:
reshaped_input_ids = input_ids.contiguous().permute(0,2,1).squeeze(2).to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().permute(0,2,1).squeeze(2).cuda(reshaped_input_ids.device)
# reshape input_ids and token_type_ids
reshaped_labels = labels.contiguous().squeeze(1).cuda(reshaped_input_ids.device)
# compute model output
# 1 sentence classification : Cola, SST2
# 2 sentence classification : RTE, MRPC, QQP, MNLI
# 2 sentence regression : STSB
model_output = self.model(reshaped_input_ids, reshaped_token_type_ids).squeeze() # [bs, 2] in classification, [bs, 1] in regression
train_pred = torch.tensor([1 if n >0 else 0 for n in model_output]).to(self.device)
training_acc_per_epoch += self.metric(train_pred.cpu().detach().numpy(), reshaped_labels.cpu().detach().numpy())
# print(model_output.float().type())
# print(model_output)
# print(reshaped_labels.type())
# print(reshaped_labels)
if batch_idx == 0:
print("##### train pred #####")
print(model_output)
print(reshaped_labels)
print("#"*len("##### train pred #####"))
# compute loss using model output and labels(reshaped ver)
loss = self.lossfn(model_output, reshaped_labels)
# clear gradients, and compute gradient with current batch
self.optimizer.zero_grad()
loss.backward()
# clip gradients
#torch.nn.utils.clip_grad_norm_(self.model.parameters(),self.clip)
# update gradients
self.optim_scheduler.step_and_update_lr()
# add loss to training_loss
training_loss_per_iteration = loss.item()
training_loss_per_epoch += training_loss_per_iteration
# Display summaries of training procedure with period of display_step
if ((batch_idx+1) % self.display_step==0) and (batch_idx>0):
sys.stdout.write(f"Training Phase | Epoch: {epoch_idx+1} | Step: {batch_idx+1} / {train_batch_num} | loss : {training_loss_per_iteration}")
sys.stdout.write('\n')
# save training loss of each epoch, in other words, the average of every batch in the current epoch
training_mean_loss_per_epoch = training_loss_per_epoch / train_batch_num
training_history.append(training_mean_loss_per_epoch)
training_acc_per_epoch = (training_acc_per_epoch/train_batch_num)*100
##########################
#### Validation Phase ####
##########################
# switch model to eval mode
self.model.eval()
# set initial variables for validation (inside epoch)
validation_loss_per_epoch=0.0
validation_score_per_epoch=0.0
# validate model using batch gradient descent with Adam Optimizer
for batch_idx, batch in tqdm(enumerate(self.val_dataloader)):
# move batch of data to gpu
input_ids = batch['input_ids'] #[bs, 1, sl]
token_type_ids = batch['token_type_ids'] #[bs, 1, sl]
labels = batch['label'].to(torch.float) #[bs, 1]
# reshape input_ids and token_type_ids
if self.task in self.task_Btype:
reshaped_input_ids = input_ids.to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().cuda(reshaped_input_ids.device)
else:
reshaped_input_ids = input_ids.contiguous().permute(0,2,1).squeeze(2).to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().permute(0,2,1).squeeze(2).cuda(reshaped_input_ids.device)
reshaped_labels = labels.contiguous().squeeze(1).cuda(reshaped_input_ids.device)
# compute model output
# 1 sentence classification : Cola, SST2
# 2 sentence classification : RTE, MRPC, QQP, MNLI
# 2 sentence regression : STSB
with torch.no_grad():
model_output = self.model(reshaped_input_ids, reshaped_token_type_ids).squeeze() # [bs, 2] in classification, [bs, 1] in regression
if batch_idx == 0:
print(model_output)
print(reshaped_labels)
# compute loss using model output and labels(reshaped ver)
loss = self.lossfn(model_output, reshaped_labels)
# add loss to training_loss
validation_loss_per_iteration = loss.item()
validation_loss_per_epoch += validation_loss_per_iteration
# reshape model output
reshaped_model_output = torch.tensor([1 if n >0 else 0 for n in model_output.squeeze()]).to(self.device)
# compute bleu score using model output and labels(reshaped ver)
validation_score_per_iteration = self.metric(reshaped_model_output.cpu().detach().numpy(), reshaped_labels.cpu().detach().numpy())*100
validation_score_per_epoch += validation_score_per_iteration
# save validation loss of each epoch, in other words, the average of every batch in the current epoch
validation_mean_loss_per_epoch = validation_loss_per_epoch / validation_batch_num
validation_history.append(validation_mean_loss_per_epoch)
# save validation score of each epoch, in other words, the average of every batch in the current epoch
validation_mean_score_per_epoch = validation_score_per_epoch / validation_batch_num
# Display summaries of validation result after all validation is done
sys.stdout.write(f"Validation Phase | Epoch: {epoch_idx+1} | loss : {validation_mean_loss_per_epoch} | score : {validation_mean_score_per_epoch}")
sys.stdout.write('\n')
# Model Selection Process using validation_mean_score_per_epoch
if (validation_mean_loss_per_epoch < best_model_loss):
best_model_epoch = epoch_idx+1
best_model_loss = validation_mean_loss_per_epoch
best_model_score = validation_mean_score_per_epoch
save_checkpoint(self.model, self.optimizer, epoch_idx,
os.path.join(self.weightpath,str(epoch_idx+1)+".pth"))
#wandb log
train_log_dict = {
"train/step": epoch_idx, # grows exponentially with internal wandb step
"train/loss": training_mean_loss_per_epoch, # x-axis is train/step
"train/accuracy": training_acc_per_epoch} # x-axis is train/step
val_log_dict ={
"val/loss": validation_mean_loss_per_epoch, # x-axis is internal wandb step
"val/accuracy":validation_mean_score_per_epoch
}
# wandb.log(train_log_dict)
# wandb.log(val_log_dict)
# measure time when epoch end
end_time = time.time()
# measure the amount of time spent in this epoch
epoch_mins, epoch_secs = time_measurement(start_time, end_time)
sys.stdout.write(f"Time spent in epoch {epoch_idx+1} is {epoch_mins} minuites and {epoch_secs} seconds\n")
# measure the total amount of time spent until now
total_time_spent += (end_time - start_time)
total_time_spent_mins = int(total_time_spent/60)
total_time_spent_secs = int(total_time_spent - (total_time_spent_mins*60))
sys.stdout.write(f"Total amount of time spent until epoch {epoch_idx+1} is {total_time_spent_mins} minuites and {total_time_spent_secs} seconds\n")
# calculate how more time is estimated to be used for training
#avg_time_spent_secs = total_time_spent_secs / (epoch_idx+1)
#left_epochs = self.n_epoch - (epoch_idx+1)
#estimated_left_time = avg_time_spent_secs * left_epochs
#estimated_left_time_mins = int(estimated_left_time/60)
#estimated_left_time_secs = int(estimated_left_time - (estimated_left_time_mins*60))
#sys.stdout.write(f"Estimated amount of time until epoch {self.n_epoch} is {estimated_left_time_mins} minuites and {estimated_left_time_secs} seconds\n")
# summary of whole procedure
sys.stdout.write('#################################################\n')
sys.stdout.write(f"Training and Validation has ended.\n")
sys.stdout.write(f"Your best model was the model from epoch {best_model_epoch+1} and scored {self.args.metric} score : {best_model_score} | loss : {best_model_loss}\n")
sys.stdout.write('#################################################\n')
return best_model_epoch, training_history, validation_history
def test(self, best_model_epoch):
# logging message
sys.stdout.write('#################################################\n')
sys.stdout.write('You have started testing the model.\n')
sys.stdout.write('#################################################\n')
# set randomness of training procedure fixed
self.set_random(516)
# build directory to save to model's weights
self.build_final_directory()
# loading the best_model from checkpoint
task_Atype = ['cola','sst2']
if self.task in task_Atype:
best_model= build_classification_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device, 'one')
elif self.task == 'stsb':
best_model = build_regression_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device)
else:
best_model = build_classification_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device, 'two')
load_checkpoint(best_model,
os.path.join(self.weightpath,str(best_model_epoch)+".pth"))
# set initial variables for test
test_batch_num = len(self.test_dataloader)
##########################
###### Test Phase ######
##########################
# switch model to eval mode
best_model.eval()
# set initial variables for validation (inside epoch)
test_score_per_epoch=0.0
test_score_tmp_list=[]
# validate model using batch gradient descent with Adam Optimizer
for batch_idx, batch in tqdm(enumerate(self.test_dataloader)):
# move batch of data to gpu
input_ids = batch['input_ids'] #[bs, 1, sl]
token_type_ids = batch['token_type_ids'] #[bs, 1, sl]
labels = batch['label'] #[bs, 1]
# reshape input_ids and token_type_ids
reshaped_input_ids = input_ids.contiguous().permute(0,2,1).squeeze(2).to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().permute(0,2,1).squeeze(2).cuda(reshaped_input_ids.device)
reshaped_labels = labels.contiguous().squeeze(1).cuda(reshaped_input_ids.device)
# compute model output
# 1 sentence classification : Cola, SST2
# 2 sentence classification : RTE, MRPC, QQP, MNLI
# 2 sentence regression : STSB
with torch.no_grad():
model_output = self.model(reshaped_input_ids, reshaped_token_type_ids) # [bs, 2] in classification, [bs, 1] in regression
# reshape model output
reshaped_model_output = model_output.argmax(dim=1)
# compute bleu score using model output and labels(reshaped ver)
test_score_per_iteration = self.metric(reshaped_model_output.cpu().detach().numpy(), reshaped_labels.cpu().detach().numpy())*100
test_score_tmp_list.append(test_score_per_iteration)
test_score_per_epoch += test_score_per_iteration
# calculate test score
test_score_per_epoch = test_score_per_epoch / test_batch_num
# Evaluate summaries with period of display_steps
sys.stdout.write(f"Test Phase | Best Epoch: {best_model_epoch} | score : {test_score_per_epoch}\n")
save_checkpoint(self.model, self.optimizer, 1,
os.path.join(self.final_weightpath,"final_"+self.task+".pth"))
def plot(self, training_history, validation_history):
step = np.linspace(0,self.n_epoch,self.n_epoch)
plt.plot(step,np.array(training_history),label='Training')
plt.plot(step,np.array(validation_history),label='Validation')
plt.xlabel('number of epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
cur_path = os.getcwd()
plt.savefig(cur_path)
sys.stdout.write('Image of train, validation history saved as plot png!\n')
def build_directory(self):
# Making directory to store model pth
curpath = os.getcwd()
weightpath = os.path.join(curpath,'finetune_weights')
os.mkdir(weightpath)
def build_final_directory(self):
curpath = os.getcwd()
final_weightpath = os.path.join(curpath,'final_finetune_weights')
os.mkdir(final_weightpath)
def set_random(self, seed_num):
set_random_fixed(seed_num)
|
the-stack_0_13037 | from purbeurre.models import Product
from django.db.models import Q
from collections import Counter
class DatabaseSearch:
"""This class's job is to find categories concerned by the user research
and return the best products (in terms of nutri-score) of each category."""
def get_substitutes_per_category(self, search):
"""This method gets the list of concerned categories from the
get_categories_from_search() method, and if it's not empty,
builds a list of dictionnaries where each one has the category's name
as key and a list of the 6 best products as value."""
cat_subs_list = []
categories = self.get_categories_from_search(search)
if categories is None:
return None
else:
for i, cat in enumerate(categories.keys()):
cat_subs_list.append({cat: []})
rq = Product.objects.filter(
category__name=cat
).order_by('nutriscore')[:6]
for r in rq:
if r.nutriscore < 'd':
cat_subs_list[i][cat].append(r)
return cat_subs_list
def get_categories_from_search(self, search):
products = Product.objects.filter(Q(name__startswith=search.lower())
| Q(name__contains=search.lower()))
return self.keep_only_real_categories(products)
def keep_only_real_categories(self, products):
"""It can happen that a user's search contains a word that is in
a product that has nothing to do with the intended research, so this is
why this method is here : it deletes categories where the product was
found too few times compared to the category where it was the most
present. Actually, the decision ratio if a category is deleted or not
is 10%."""
categories_list = []
# For each product, adds its category name to a list
for product in products:
categories_list.append(product.category.name)
if len(categories_list) == 0:
return None
# Gets the category the most present
greatest = max(Counter(categories_list).values())
keys_to_del = []
# Builds and sorts a dict from the category the most present
# to the least
dict = Counter(categories_list)
the_dict = {k: v for k, v in sorted(dict.items(),
key=lambda item: item[1],
reverse=True)}
# Checks which categories are too few
for k, v in the_dict.items():
if v < greatest*0.1:
keys_to_del.append(k)
# Removes them
for key in keys_to_del:
del(the_dict[key])
return the_dict
|
the-stack_0_13038 | import datetime
from typing import Union, Optional
import discord
from discord.ext import commands
async def trigger_role(member: discord.Member, role: Union[discord.Role, int, str], guild: Optional[discord.Guild] = None) -> bool:
"""
Triggers a role on a member.
If member already has `role` then role is removed, if the member does not yet have the `role`, then it will be applied.
If role is a discord.Role then nothing is pulled from cache
If role is an integer then a discord.Role object is pulled from cache
if role is a string, then a discord.Role object is pulled from the `guild.roles` cache.
If `guild` is None, and `role` is int or str, then TypeError is raised
Throws:
TypeError, see above
ValueError if the `role` cannot be retrieved from cache
Whatever discord.Member.add_roles can throw
returns False if role was removed, True if it was added
"""
if type(role) == int:
role = discord.utils.get(guild.roles, id=role)
elif type(role) == str:
role = discord.utils.get(guild.roles, name=role)
elif not isinstance(role, discord.Role):
raise TypeError(f"Expected discord.Role, got {type(role)}")
if role is None:
raise ValueError("Role could not be retrieved from cache")
if guild is None and isinstance(role, (str, int, )):
raise TypeError(
"Expected a guild since role was str or int, but got None")
def has_role(member: discord.Member, role: discord.Role) -> bool:
"""Returns True if the member has the role, false if not"""
return role in member.roles
if has_role(member, role):
await member.remove_roles(role)
return False
await member.add_roles(role)
return True
async def simple_embed(ctx, text):
await ctx.send(
embed=discord.Embed(title=text,
colour=discord.Colour(0x00FF00), timestamp=datetime.datetime.utcnow())
.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
.set_footer(text="Success!"))
async def send_error(ctx, error):
await ctx.send(
embed=discord.Embed(title=str(error),
colour=discord.Colour(0xFF0000), timestamp=datetime.datetime.utcnow())
.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
.set_footer(text="Guild only!"))
|
the-stack_0_13039 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""Server of Rock Paper Scissor game (2 players)."""
from socketserver import BaseRequestHandler, TCPServer
__author__ = 'fyabc'
ADDRESS = 'localhost', 20000
MSG_SIZE = 8192
class RpsHandler(BaseRequestHandler):
def handle(self):
print('Get connection from', self.client_address)
while True:
msg = self.request.recv(MSG_SIZE)
if not msg:
break
print('Message from {}: {}'.format(self.client_address, msg))
self.request.sendall(msg)
def main():
server = TCPServer(ADDRESS, RpsHandler)
server.serve_forever()
if __name__ == '__main__':
main()
|
the-stack_0_13040 | #!/usr/bin/env python3
# coding=utf-8
# 导入相关系统包
import requests
import base64
import zipfile
import configparser
import socket
import ping3
import re
import os
from prettytable import PrettyTable
from colorama import init, Fore, Back, Style
class DrawTable(object):
'''工具类,打印表格格式化'''
def __init__(self):
self.table = []
header = [
"id",
"name",
"ping(ms)",
"port_status",
"server",
"port",
"method"
]
self.x = PrettyTable(header)
self.x.reversesort = True
def append(self,*args,**kwargs):
if(kwargs):
content=[
kwargs['id'],
kwargs['name'],
kwargs['ping'],
kwargs['port_status'],
kwargs['server'],
kwargs['port'],
kwargs['method'],
]
self.x.add_row(content)
def str(self):
return str(self.x)
init (autoreset=False)
class colored(object):
'''工具类,打印不同颜色字体'''
def red(self,s):
return Fore.LIGHTRED_EX + s + Fore.RESET
def green(self,s):
return Fore.LIGHTGREEN_EX + s + Fore.RESET
def yellow(self,s):
return Fore.LIGHTYELLOW_EX + s + Fore.RESET
def white(self,s):
return Fore.LIGHTWHITE_EX + s + Fore.RESET
def blue(self,s):
return Fore.LIGHTBLUE_EX + s + Fore.RESET
# 对base编码进行解码
def base64decode(text):
i = len(text) % 4
if i == 1:
text = text + '==='
elif i == 2:
text = text + '=='
elif i == 3:
text = text + '='
text = re.sub(r'_', '/', text)
text = re.sub(r'-', '+', text)
return base64.urlsafe_b64decode(text).decode()
# 通过订阅链接获取ssr url链接列表
def get_ssr_list(url):
color = colored()
url_colored = color.blue(url)
print('Being parsed the ssr url:', url_colored)
print('It will take a moment,Please be patient~~')
result = requests.get(url, headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3742.400 QQBrowser/10.5.3864.400'})
try:
ssr_result = base64decode(result.text)
except:
print(color.red("ssr subscribe url parsed failed,please check the ssr subscribe url~~"))
return None
else:
ssr_list = ssr_result.split('\n')
ssr_real_list = list()
for ssr in ssr_list:
if ssr:
ssr_real_list.append(ssr[6:])
return ssr_real_list
# 解析ssr url链接
def analysis_ssr_url(ssr_url):
try:
ssr_url = base64decode(ssr_url)
except:
pass
else:
ssr_dict = dict()
param_list = ssr_url.split(':')
if len(param_list) == 6:
server = param_list[0]
port = param_list[1]
protocol = param_list[2]
method = param_list[3]
obfs = param_list[4]
second_encryption_param_list = param_list[-1].split('/?')
password = base64decode(second_encryption_param_list[0])
encryption_param_list = second_encryption_param_list[-1].split('&')
for params in encryption_param_list:
key = params.split('=')[0]
value = params.split('=')[1]
if key == 'obfsparam':
key = 'obfs_param'
if key == 'protoparam':
key = 'protocol_param'
ssr_dict[key] = base64decode(value)
ssr_dict['server'] = server
ssr_dict['server_port'] = int(port)
ssr_dict['method'] = method
ssr_dict['obfs'] = obfs
ssr_dict['password'] = password
ssr_dict['ping'] = get_ping_speed(server, ssr_dict['remarks'])
ssr_dict['port_status'] = get_port_status(server, int(port))
ssr_dict['protocol'] = protocol
return ssr_dict
else:
color = colored()
print(color.yellow("Sorry, Not support ipv6 node~~"))
return None
# 生成ssr 信息列表字典
def generate_ssr_info_dict_list(ssr_url_list):
ssr_info_dict_list = list()
for ssr_url in ssr_url_list:
ssr_info_dict = analysis_ssr_url(ssr_url)
if ssr_info_dict:
ssr_info_dict_list.append(ssr_info_dict)
return ssr_info_dict_list
# 生成打印表格
def generate_ssr_display_table(ssr_info_dict_list):
table = DrawTable()
id = 1
for ssr_info_dict in ssr_info_dict_list:
color = colored()
if ssr_info_dict['ping'] == '∞':
ping = color.red(ssr_info_dict['ping'])
else:
ping = color.green(str(ssr_info_dict['ping']))
if ssr_info_dict['port_status'] == "×":
port_status = color.red(ssr_info_dict['port_status'])
else:
port_status = color.green(ssr_info_dict['port_status'])
table.append(
id = id,
name=ssr_info_dict['remarks'],
ping=ping,
port_status=port_status,
server=ssr_info_dict['server'],
port=ssr_info_dict['server_port'],
method=ssr_info_dict['method']
)
id = id + 1
return table.str()
# 获取ssr节点ping值
def get_ping_speed(server, remarks):
color = colored()
if check_ip_addr(server):
ping_speed = ping3.ping(server, timeout=5, unit='ms')
if ping_speed:
flag = color.green('√')
ping_speed = format(ping_speed, '.3f')
else:
flag = color.red('×')
ping_speed = '∞'
else:
flag = color.red('×')
ping_speed = '∞'
print("Testing ping:", remarks, server, flag)
return ping_speed
def check_ip_addr(server):
ipRe = "^(?=^.{3,255}$)[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+$"
if re.search(ipRe, server):
return True
else:
return False
# 获取用户家目录
def get_home_dir():
cmd = 'echo ${HOME}'
home_dir = os.popen(cmd).read().strip()
return home_dir
# 获取配置目录
def get_config_dir():
home_dir = get_home_dir()
config_dir = os.path.join(home_dir, '.ssr-command-client')
config_file_dir = os.path.join(config_dir, 'config.ini')
lock_file_dir = os.path.join(config_dir, '.config.lock')
return config_dir, config_file_dir, lock_file_dir
# 创建配置目录
def create_config_dir():
config_dir, config_file_dir, lock_file_dir = get_config_dir()
if os.path.exists(config_dir):
pass
else:
os.mkdir(config_dir)
if os.path.exists(config_file_dir):
pass
else:
with open(config_file_dir, 'w') as file:
file.write('')
# 下载ssr源码
def download_ssr_source():
url = 'https://github.com/TyrantLucifer/shadowsocksr/archive/3.2.2.zip'
result = requests.get(url, headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3742.400 QQBrowser/10.5.3864.400'})
config_dir, config_file_dir, lock_file_dir = get_config_dir()
shadowsocksr_zip_file_path = os.path.join(config_dir, 'shadowsocksr.zip')
with open(shadowsocksr_zip_file_path, "wb") as file:
file.write(result.content)
zipFile = zipfile.ZipFile(shadowsocksr_zip_file_path)
zipFile.extractall(config_dir)
os.chdir(config_dir)
os.rename(zipFile.namelist()[0], 'shadowsocksr')
# 初始化配置文件
def init_config_file():
config_dir, config_file_dir, lock_file_dir = get_config_dir()
server_json_file_path = os.path.join(config_dir, 'ssr-list.json')
config_json_file_path = os.path.join(config_dir, 'config.json')
shadowsocksr_client_path = os.path.join(config_dir, 'shadowsocksr/shadowsocks/local.py')
shadowsocksr_pid_file_path = os.path.join(config_dir, 'shadowsocksr.pid')
shadowsocksr_log_file_path = os.path.join(config_dir, 'shadowsocksr.log')
cf = configparser.ConfigParser()
cf.add_section('default')
cf.set('default', 'subscribe_url', 'https://raw.githubusercontent.com/satrom/V2SSR/master/SSR/Day.txt')
cf.set('default', 'server_json_file_path', server_json_file_path)
cf.set('default', 'config_json_file_path', config_json_file_path)
cf.set('default', 'local_address', '127.0.0.1')
cf.set('default', 'timeout', '300')
cf.set('default', 'workers', '1')
cf.set('default', 'shadowsocksr_client_path', shadowsocksr_client_path)
cf.set('default', 'shadowsocksr_pid_file_path', shadowsocksr_pid_file_path)
cf.set('default', 'shadowsocksr_log_file_path', shadowsocksr_log_file_path)
with open(config_file_dir, 'w+') as file:
cf.write(file)
with open(lock_file_dir, 'w') as lock_file:
lock_file.write('')
# 获取配置项
def get_config_value(key):
config_dir, config_file_dir, lock_file_dir = get_config_dir()
cf = configparser.ConfigParser()
cf.read(config_file_dir)
return cf.get('default', key)
# 设置配置项
def set_config_value(key, value):
config_dir, config_file_dir, lock_file_dir = get_config_dir()
cf = configparser.ConfigParser()
cf.read(config_file_dir)
cf.set('default', key, str(value))
with open(config_file_dir, 'w+') as file:
cf.write(file)
# 测试端口是否可以联通
def get_port_status(server, port):
server_addr = (server, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect(server_addr)
except:
flag = "×"
else:
flag = "√"
s.close()
return flag
|
the-stack_0_13041 | #!/usr/bin/env python3
#
# aiohttp documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 5 12:35:35 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import re
from pathlib import Path
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
_docs_path = os.path.dirname(__file__)
_version_path = os.path.abspath(
os.path.join(_docs_path, "..", "aiohttp", "__init__.py")
)
with open(_version_path, encoding="latin1") as fp:
try:
_version_info = re.search(
r'^__version__ = "'
r"(?P<major>\d+)"
r"\.(?P<minor>\d+)"
r"\.(?P<patch>\d+)"
r'(?P<tag>.*)?"$',
fp.read(),
re.M,
).groupdict()
except IndexError:
raise RuntimeError("Unable to determine version.")
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# stdlib-party extensions:
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
# Third-party extensions:
"sphinxcontrib.asyncio",
"sphinxcontrib.blockdiag",
"sphinxcontrib.towncrier", # provides `towncrier-draft-entries` directive
]
try:
import sphinxcontrib.spelling # noqa
extensions.append("sphinxcontrib.spelling")
except ImportError:
pass
intersphinx_mapping = {
"python": ("http://docs.python.org/3", None),
"multidict": ("https://multidict.readthedocs.io/en/stable/", None),
"yarl": ("https://yarl.readthedocs.io/en/stable/", None),
"aiosignal": ("https://aiosignal.readthedocs.io/en/stable/", None),
"aiohttpjinja2": ("https://aiohttp-jinja2.readthedocs.io/en/stable/", None),
"aiohttpremotes": ("https://aiohttp-remotes.readthedocs.io/en/stable/", None),
"aiohttpsession": ("https://aiohttp-session.readthedocs.io/en/stable/", None),
"aiohttpdemos": ("https://aiohttp-demos.readthedocs.io/en/latest/", None),
"asynctest": ("https://asynctest.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# -- Project information -----------------------------------------------------
github_url = "https://github.com"
github_repo_org = "aio-libs"
github_repo_name = "aiohttp"
github_repo_slug = f"{github_repo_org}/{github_repo_name}"
github_repo_url = f"{github_url}/{github_repo_slug}"
github_sponsors_url = f"{github_url}/sponsors"
project = github_repo_name
copyright = f"{project} contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "{major}.{minor}".format(**_version_info)
# The full version, including alpha/beta/rc tags.
release = "{major}.{minor}.{patch}{tag}".format(**_version_info)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# The default language to highlight source code in.
highlight_language = "python3"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Extension configuration -------------------------------------------------
# -- Options for extlinks extension ---------------------------------------
extlinks = {
"issue": (f"{github_repo_url}/issues/%s", "#"),
"pr": (f"{github_repo_url}/pull/%s", "PR #"),
"commit": (f"{github_repo_url}/commit/%s", ""),
"gh": (f"{github_url}/%s", "GitHub: "),
"user": (f"{github_sponsors_url}/%s", "@"),
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "aiohttp_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Async HTTP client/server for asyncio and Python",
"canonical_url": "http://docs.aiohttp.org/en/stable/",
"github_user": github_repo_org,
"github_repo": github_repo_name,
"github_button": True,
"github_type": "star",
"github_banner": True,
"badges": [
{
"image": f"{github_repo_url}/workflows/CI/badge.svg",
"target": f"{github_repo_url}/actions?query=workflow%3ACI",
"height": "20",
"alt": "Azure Pipelines CI status",
},
{
"image": f"https://codecov.io/github/{github_repo_slug}/coverage.svg?branch=master",
"target": f"https://codecov.io/github/{github_repo_slug}",
"height": "20",
"alt": "Code coverage status",
},
{
"image": f"https://badge.fury.io/py/{project}.svg",
"target": f"https://badge.fury.io/py/{project}",
"height": "20",
"alt": "Latest PyPI package version",
},
{
"image": f"https://img.shields.io/discourse/status?server=https%3A%2F%2F{github_repo_org}.discourse.group",
"target": f"https://{github_repo_org}.discourse.group",
"height": "20",
"alt": "Discourse status",
},
{
"image": "https://badges.gitter.im/Join%20Chat.svg",
"target": f"https://gitter.im/{github_repo_org}/Lobby",
"height": "20",
"alt": "Chat on Gitter",
},
],
}
html_css_files = [
"css/logo-adjustments.css",
]
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "aiohttp-plain.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"searchbox.html",
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = f"{project}doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
f"{project}.tex",
f"{project} Documentation",
f"{project} contributors",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", project, f"{project} Documentation", [project], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
project,
f"{project} Documentation",
"Aiohttp contributors",
project,
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -------------------------------------------------------------------------
nitpicky = True
nitpick_ignore = [
("py:mod", "aiohttp"), # undocumented, no `.. currentmodule:: aiohttp` in docs
("py:class", "aiohttp.SimpleCookie"), # undocumented
("py:class", "aiohttp.web.RequestHandler"), # undocumented
("py:class", "aiohttp.NamedPipeConnector"), # undocumented
("py:meth", "aiohttp.ClientSession.request"), # undocumented
("py:class", "aiohttp.protocol.HttpVersion"), # undocumented
("py:class", "aiohttp.ClientRequest"), # undocumented
("py:class", "aiohttp.payload.Payload"), # undocumented
("py:class", "aiohttp.abc.AbstractResolver"), # undocumented
("py:func", "aiohttp.ws_connect"), # undocumented
("py:meth", "start"), # undocumented
("py:exc", "aiohttp.ClientHttpProxyError"), # undocumented
("py:class", "asyncio.AbstractServer"), # undocumented
("py:mod", "aiohttp.test_tools"), # undocumented
("py:class", "list of pairs"), # undocumented
("py:class", "aiohttp.protocol.HttpVersion"), # undocumented
("py:meth", "aiohttp.ClientSession.request"), # undocumented
("py:class", "aiohttp.StreamWriter"), # undocumented
("py:attr", "aiohttp.StreamResponse.body"), # undocumented
("py:class", "aiohttp.payload.StringPayload"), # undocumented
("py:meth", "aiohttp.web.Application.copy"), # undocumented
("py:meth", "asyncio.AbstractEventLoop.create_server"), # undocumented
("py:data", "aiohttp.log.server_logger"), # undocumented
("py:data", "aiohttp.log.access_logger"), # undocumented
("py:data", "aiohttp.helpers.AccessLogger"), # undocumented
("py:attr", "helpers.AccessLogger.LOG_FORMAT"), # undocumented
("py:meth", "aiohttp.web.AbstractRoute.url"), # undocumented
("py:class", "aiohttp.web.MatchedSubAppResource"), # undocumented
("py:attr", "body"), # undocumented
("py:class", "socket.socket"), # undocumented
("py:class", "socket.AddressFamily"), # undocumented
("py:obj", "logging.DEBUG"), # undocumented
("py:class", "aiohttp.abc.AbstractAsyncAccessLogger"), # undocumented
("py:meth", "aiohttp.web.Response.write_eof"), # undocumented
("py:meth", "aiohttp.payload.Payload.set_content_disposition"), # undocumented
("py:class", "cgi.FieldStorage"), # undocumented
("py:meth", "aiohttp.web.UrlDispatcher.register_resource"), # undocumented
("py:func", "aiohttp_debugtoolbar.setup"), # undocumented
]
# -- Options for towncrier_draft extension -----------------------------------
towncrier_draft_autoversion_mode = "draft" # or: 'sphinx-version', 'sphinx-release'
towncrier_draft_include_empty = True
towncrier_draft_working_directory = PROJECT_ROOT_DIR
# Not yet supported: towncrier_draft_config_path = 'pyproject.toml' # relative to cwd
|
the-stack_0_13042 | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(
model,
args.img,
result,
palette=args.palette,
score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(
model,
args.img,
result[0],
palette=args.palette,
score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
the-stack_0_13044 | # -*- coding: utf-8 -*-
from model.group import Group
import allure
# def test_add_group(app, db, json_groups, check_ui):
# group = json_groups
# old_groups = db.get_group_list()
# app.group_helper.creation(group)
# new_groups = db.get_group_list()
# old_groups.append(group)
# if check_ui:
# assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def test_add_group(app, db, json_groups, check_ui):
group = json_groups
with allure.step('Given a group list'):
old_groups = db.get_group_list()
with allure.step("When I add a group %s to the list" % group):
app.group_helper.creation(group)
with allure.step('Then the new group list equal to the old list with the added group'):
new_groups = db.get_group_list()
old_groups.append(group)
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
#testdata= [Group(name=name, header=header, footer=footer)
# for name in ["", random_string("name", 10)]
# for header in ["", random_string("header", 20)]
# for footer in ["", random_string("footer", 20)]]
#
# def test_add_empty_group(app, db, check_ui):
# old_groups = db.get_group_list()
# group = Group(name="", header="", footer="")
# app.group_helper.creation(group)
# new_groups = db.get_group_list()
# assert len(old_groups) + 1 == len(new_groups)
# old_groups.append(group)
# if check_ui:
# assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
|
the-stack_0_13045 | class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.height = 1
class AVL_Tree(object):
def insert(self, root, key):
if not root:
return TreeNode(key)
elif key < root.val:
root.left = self.insert(root.left, key)
else:
root.right = self.insert(root.right, key)
root.height = 1 + max(self.getHeight(root.left), self.getHeight(root.right))
balance = self.getBalance(root)
if balance > 1 and key < root.left.val:
return self.rightRotate(root)
if balance < -1 and key > root.right.val:
return self.leftRotate(root)
if balance > 1 and key > root.left.val:
root.left = self.leftRotate(root.left)
return self.rightRotate(root)
if balance < -1 and key < root.right.val:
root.right = self.rightRotate(root.right)
return self.leftRotate(root)
return root
def leftRotate(self, z):
y = z.right
T2 = y.left
y.left = z
z.right = T2
z.height = 1 + max(self.getHeight(z.left), self.getHeight(z.right))
y.height = 1 + max(self.getHeight(y.left), self.getHeight(y.right))
return y
def rightRotate(self, z):
y = z.left
T3 = y.right
y.right = z
z.left = T3
z.height = 1 + max(self.getHeight(z.left), self.getHeight(z.right))
y.height = 1 + max(self.getHeight(y.left), self.getHeight(y.right))
return y
def getHeight(self, root):
if not root:
return 0
return root.height
def getBalance(self, root):
if not root:
return 0
return self.getHeight(root.left) - self.getHeight(root.right)
def preOrder(self, root):
if not root:
return
print("{0} ".format(root.val), end="")
self.preOrder(root.left)
self.preOrder(root.right)
if __name__ == "__main__":
"""
from timeit import timeit
myTree = AVL_Tree()
root = None
root = myTree.insert(root, 10)
root = myTree.insert(root, 20)
root = myTree.insert(root, 30)
root = myTree.insert(root, 40)
root = myTree.insert(root, 50)
root = myTree.insert(root, 25)
print(timeit(lambda: myTree.preOrder(root), number=10000)) # 0.1360708509964752
"""
|
the-stack_0_13046 |
import sys
sys.path.append('../../..')
from fastNLP import cache_results
from reproduction.sequence_labelling.cws.data.cws_shift_pipe import CWSShiftRelayPipe
from reproduction.sequence_labelling.cws.model.bilstm_shift_relay import ShiftRelayCWSModel
from fastNLP import Trainer
from torch.optim import Adam
from fastNLP import BucketSampler
from fastNLP import GradientClipCallback
from reproduction.sequence_labelling.cws.model.metric import RelayMetric
from fastNLP.embeddings import StaticEmbedding
from fastNLP import EvaluateCallback
#########hyper
L = 4
hidden_size = 200
num_layers = 1
drop_p = 0.2
lr = 0.008
data_name = 'pku'
#########hyper
device = 0
cache_fp = 'caches/{}.pkl'.format(data_name)
@cache_results(_cache_fp=cache_fp, _refresh=True) # 将结果缓存到cache_fp中,这样下次运行就直接读取,而不需要再次运行
def prepare_data():
data_bundle = CWSShiftRelayPipe(dataset_name=data_name, L=L).process_from_file()
# 预训练的character embedding和bigram embedding
char_embed = StaticEmbedding(data_bundle.get_vocab('chars'), dropout=0.5, word_dropout=0.01,
model_dir_or_name='~/exps/CWS/pretrain/vectors/1grams_t3_m50_corpus.txt')
bigram_embed = StaticEmbedding(data_bundle.get_vocab('bigrams'), dropout=0.5, min_freq=3, word_dropout=0.01,
model_dir_or_name='~/exps/CWS/pretrain/vectors/2grams_t3_m50_corpus.txt')
return data_bundle, char_embed, bigram_embed
data, char_embed, bigram_embed = prepare_data()
model = ShiftRelayCWSModel(char_embed=char_embed, bigram_embed=bigram_embed,
hidden_size=hidden_size, num_layers=num_layers, drop_p=drop_p, L=L)
sampler = BucketSampler()
optimizer = Adam(model.parameters(), lr=lr)
clipper = GradientClipCallback(clip_value=5, clip_type='value') # 截断太大的梯度
evaluator = EvaluateCallback(data.get_dataset('test')) # 额外测试在test集上的效果
callbacks = [clipper, evaluator]
trainer = Trainer(data.get_dataset('train'), model, optimizer=optimizer, loss=None, batch_size=128, sampler=sampler,
update_every=1, n_epochs=10, print_every=5, dev_data=data.get_dataset('dev'), metrics=RelayMetric(),
metric_key='f', validate_every=-1, save_path=None, use_tqdm=True, device=device, callbacks=callbacks,
check_code_level=0, num_workers=1)
trainer.train() |
the-stack_0_13047 | from openpyxl import load_workbook
from openpyxl.utils import get_column_letter
from itertools import islice
from datetime import datetime
import pandas as pd
import streamlit as st
import logging
import os
files = os.listdir('./data')
workbooks = [item for item in files if '.xlsx' in item]
logging.basicConfig(filename='log.log', filemode='w', format='%(asctime)s - %(levelname)s %(message)s', datefmt='%H:%M:%S', encoding='utf-8', level=logging.DEBUG, force=True)
months = {'January': '01', 'February': '02', 'March': '03', 'April': '04', 'May': '05', 'June': '06', 'July': '07',
'August': '08', 'September': '09', 'October': '10', 'November': '11', 'December': '12', 'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07',
'Aug': '08', 'Sep': '09', 'Oc': '10', 'Nov': '11', 'Dec': '12'}
years = ['2010','2011','2012','2013','2014','2015','2016','2017','2018','2019','2020','2021']
def check_file(file):
"""
Checks if file exists and logs it.
"""
while True:
try:
file = f'data/{file}'
if os.path.exists(file):
logging.info(f"{file} exists")
break
except FileNotFoundError:
print("FileNotFound: not a valid file.")
logging.warning("FileNotFound: not a valid file.")
else:
continue
def get_summary(ws, month_year_format):
"""
Grabs relevant data from Summary MoM sheet.
"""
row = None
for item in ws['A']:
if month_year_format in str(item.value):
row = item.row
st.write(f'(Row: {row})')
values = [ro for ro in ws.iter_rows(min_row=row, max_row=row, values_only=True)]
new_values = [item for item in values[0][1:] if item != None]
# create dictionary from row data
row_data = {}
row_data['30s_abandonment'] = f'Abandon after 30s: {round(new_values[1]*100,2)}%'
row_data['fcr'] = f'FCR : {new_values[2]*100}0%'
row_data['dsat'] = f'DSAT : {new_values[3]*100}0%'
row_data['csat'] = f'CSAT : {new_values[4]*100}0%'
logging.info('get_summary succesful')
return row_data
def nps_check(type, number):
"""
Check size of group and return 'GOOD' or 'BAD'.
"""
if type == 'promoters':
if number >= 200:
return 'GOOD'
else:
return 'BAD'
if type == 'passives':
if number >= 100:
return 'GOOD'
else:
return 'BAD'
if type == 'detractors':
if number < 100:
return 'GOOD'
else:
return 'BAD'
def get_voc(ws, month_year_format):
"""
Grabs relevant data from VOC MoM sheet.
"""
col = None
for item in ws[1]:
if month_year_format in str(item.value):
col = item.column
st.write(f'(Column: {col})')
values = [co for co in ws.iter_cols(min_col=col, max_col=col, values_only=True)]
new_values = [item for item in values[0][1:] if item != None and isinstance(item, int)]
# create dictionary from column data
col_data = {}
col_data['base'] = f'Base Size: {new_values[0]}'
col_data['promoters'] = [f'Promoters: {new_values[1]}', nps_check('promoters', new_values[1])]
col_data['passives'] = [f'Passives: {new_values[2]}', nps_check('passives', new_values[2])]
col_data['detractors'] = [f'Detractors: {new_values[3]}', nps_check('detractors', new_values[3])]
logging.info('get_voc succesful')
return col_data
def get_current():
"""
Grabs the current month in integer / string formats and year.
"""
# format month year for datetime comparison
month = datetime.now().strftime('%m')
month_word = datetime.now().strftime('%B')
year = datetime.now().year
logging.info(f'Current: {month_word}, {month}-{year}')
return month, month_word, year
def log_summary(row_data):
"""
Log Summary data.
"""
print(row_data)
for item in row_data:
logging.info(row_data[item])
def log_voc(col_data):
"""
Log VOC data.
"""
for item in col_data:
if 'base' in item:
logging.info(col_data[item])
else:
logging.info(f'{col_data[item][0]} - {col_data[item][1]}')
def show_summary(row_data):
"""
Display Summary data in streamlit app.
"""
for item in row_data:
st.write(row_data[item])
logging.info(f'Displayed summary in app')
def show_voc(col_data):
"""
Display VOC data in streamlit app.
"""
for item in col_data:
if 'base' in item:
st.write(col_data[item])
else:
st.write(f'{col_data[item][0]} - {col_data[item][1]}')
logging.info(f'Displayed voc in app')
def show_logs():
with open('log.log') as log_file:
for line in log_file:
st.write(line)
logging.info('Viewed logs') |
the-stack_0_13048 | try:
from setuptools import setup
from setuptools import find_packages
packages = find_packages()
except ImportError:
from distutils.core import setup
import os
packages = [x.strip('./').replace('/','.') for x in os.popen('find -name "__init__.py" | xargs -n1 dirname').read().strip().split('\n')]
setup(
name='cle',
description='CLE Loads Everything (at least, many binary formats!) and provides a pythonic interface to analyze what they are and what they would look like in memory.',
version='7.8.2.21',
packages=packages,
install_requires=[
'pyelftools>=0.24',
'cffi',
'idalink',
'archinfo>=7.8.2.21',
'pyvex>=7.8.2.21',
'pefile',
]
)
|
the-stack_0_13050 | import os
import csv
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torchaudio.datasets.utils import download_url, extract_archive
from torch import Tensor
from torch.utils.data import Dataset
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for LJSpeech-1.1.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / 'metadata.csv'
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url(url, root, hash_value=checksum)
extract_archive(archive)
with open(self._metadata_path, "r", newline='') as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
the-stack_0_13051 | import random
import string
from dpaster import core
from tests.fixtures import python_code
def test_get_syntax_stdin(python_code):
assert "python" in core.get_syntax("<stdin>", python_code)
def test_get_syntax_java_file():
assert core.get_syntax("HelloWorld.java", "") == "java"
def test_get_syntax_weird_filename():
assert core.get_syntax("main.cthulhu", "") == "text"
def test_get_syntax_weird_content():
random.seed(123)
content = "".join(
ch
for ch in [
random.choice(
string.ascii_letters
+ string.digits
+ r"!@#$%^&*()_+-=[]{}\/"
)
for _ in range(100)
]
)
assert core.get_syntax("<stdin>", content) == "text"
|
the-stack_0_13055 | """
Some utilities and things for testing various bits of SMPP.
"""
from twisted.internet.defer import DeferredQueue
from smpp.pdu_inspector import unpack_pdu
from vumi.transports.smpp.clientserver.server import SmscServer
class SmscTestServer(SmscServer):
"""
SMSC subclass that records inbound and outbound PDUs for later assertion.
"""
def __init__(self, delivery_report_string=None):
self.pdu_queue = DeferredQueue()
SmscServer.__init__(self, delivery_report_string)
def handle_data(self, data):
self.pdu_queue.put({
'direction': 'inbound',
'pdu': unpack_pdu(data),
})
return SmscServer.handle_data(self, data)
def send_pdu(self, pdu):
self.pdu_queue.put({
'direction': 'outbound',
'pdu': pdu.get_obj(),
})
return SmscServer.send_pdu(self, pdu)
|
the-stack_0_13057 | from __future__ import absolute_import
from datetime import datetime
import pytz
from django.views.generic import View
from sentry.models import (
Commit, CommitAuthor, GroupSubscriptionReason, Organization, Project,
Release, Team
)
from sentry.utils.http import absolute_uri
from .mail import MailPreview
class DebugNewReleaseEmailView(View):
def get(self, request):
org = Organization(
id=1,
slug='organization',
name='My Company',
)
team = Team(
id=1,
slug='team',
name='My Team',
organization=org,
)
project = Project(
id=1,
organization=org,
team=team,
slug='project',
name='My Project',
)
release = Release(
project=project,
organization_id=project.organization_id,
version='6c998f755f304593a4713abd123eaf8833a2de5e',
date_added=datetime(2016, 10, 12, 15, 39, tzinfo=pytz.utc)
)
release_link = absolute_uri('/{}/{}/releases/{}/'.format(
org.slug,
project.slug,
release.version,
))
project_link = absolute_uri('/{}/{}/'.format(
org.slug,
project.slug,
))
commit_list = [
Commit(key='48b86fcd677da3dba5679d7a738240ce6fb74b20'),
Commit(
key='a53a2756bb8d111b43196210b34df90b87ed336b',
message='Update README.rst',
author=CommitAuthor(
name='David Cramer',
email='[email protected]',
)
),
]
return MailPreview(
html_template='sentry/emails/activity/release.html',
text_template='sentry/emails/activity/release.txt',
context={
'release': release,
'project': project,
'release_link': release_link,
'project_link': project_link,
'commit_list': commit_list,
'reason': GroupSubscriptionReason.descriptions[
GroupSubscriptionReason.committed
],
},
).render(request)
|
the-stack_0_13058 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for creating input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
# A return type allowing input_fns to return multiple values in a well-
# defined way (analogous to ModelFnOps).
# The expected return values are:
# features: a dict of string to `Tensor` or `SparseTensor`, giving the features
# to be passed to the model.
# labels: a dict of string to `Tensor` or `SparseTensor`, giving labels (aka
# targets) for training.
# default_inputs: a dict of string to `Tensor` or `SparseTensor`, giving the
# input placeholders (if any) that this input_fn expects to be fed.
InputFnOps = collections.namedtuple('InputFnOps',
['features',
'labels',
'default_inputs'])
def build_parsing_serving_input_fn(feature_spec, default_batch_size=None):
"""Build an input_fn appropriate for serving, expecting fed tf.Examples.
Creates an input_fn that expects a serialized tf.Example fed into a string
placeholder. The function parses the tf.Example according to the provided
feature_spec, and returns all parsed Tensors as features. This input_fn is
for use at serving time, so the labels return value is always None.
Args:
feature_spec: a dict of string to `VarLenFeature`/`FixedLenFeature`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
An input_fn suitable for use in serving.
"""
def input_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
inputs = {'examples': serialized_tf_example}
features = parsing_ops.parse_example(serialized_tf_example, feature_spec)
labels = None # these are not known in serving!
return InputFnOps(features, labels, inputs)
return input_fn
def build_default_serving_input_fn(features, default_batch_size=None):
"""Build an input_fn appropriate for serving, expecting feature Tensors.
Creates an input_fn that expects all features to be fed directly.
This input_fn is for use at serving time, so the labels return value is always
None.
Args:
features: a dict of string to `Tensor`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
An input_fn suitable for use in serving.
"""
def input_fn():
"""an input_fn that expects all features to be fed directly."""
features_placeholders = {}
for name, t in features.items():
shape_list = t.get_shape().as_list()
shape_list[0] = default_batch_size
shape = tensor_shape.TensorShape(shape_list)
features_placeholders[name] = array_ops.placeholder(dtype=t.dtype,
shape=shape,
name=t.name)
labels = None # these are not known in serving!
return InputFnOps(features_placeholders, labels, features_placeholders)
return input_fn
|
the-stack_0_13059 | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from six.moves import queue
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceProperties as DSProp
from vitrage.common.constants import EdgeLabel
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import TemplateTopologyFields as TTFields
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.alarm_properties import AlarmProperties as AlarmProps
from vitrage.datasources.nagios import NAGIOS_DATASOURCE
from vitrage.datasources.nagios.properties import NagiosProperties as NProps
from vitrage.datasources.nagios.properties import NagiosTestStatus
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.entity_graph.mappings.operational_alarm_severity import \
OperationalAlarmSeverity
from vitrage.entity_graph.mappings.operational_resource_state import \
OperationalResourceState
from vitrage.evaluator.actions.action_executor import ActionExecutor
from vitrage.evaluator.actions.base import ActionMode
from vitrage.evaluator.actions.base import ActionType
from vitrage.evaluator.actions.evaluator_event_transformer \
import VITRAGE_DATASOURCE
from vitrage.evaluator.actions.recipes.action_steps import ADD_VERTEX
from vitrage.evaluator.actions.recipes.base import EVALUATOR_EVENT_TYPE
from vitrage.evaluator.template_data import ActionSpecs
from vitrage.evaluator.template_fields import TemplateFields as TFields
from vitrage.opts import register_opts
from vitrage.tests.functional.base import TestFunctionalBase
from vitrage.tests.functional.test_configuration import TestConfiguration
class TestActionExecutor(TestFunctionalBase, TestConfiguration):
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
super(TestActionExecutor, cls).setUpClass()
cls.conf = cfg.ConfigOpts()
cls.conf.register_opts(cls.PROCESSOR_OPTS, group='entity_graph')
cls.conf.register_opts(cls.DATASOURCES_OPTS, group='datasources')
cls.add_db(cls.conf)
for vitrage_type in cls.conf.datasources.types:
register_opts(cls.conf, vitrage_type, cls.conf.datasources.path)
def _init_executer(self):
event_queue = queue.Queue()
def actions_callback(event_type, data):
event_queue.put(data)
return event_queue, ActionExecutor(self.conf, actions_callback)
def test_execute_set_state(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host_vertex_before = host_vertices[0]
targets = {TFields.TARGET: host_vertex_before}
props = {TFields.STATE: OperationalResourceState.SUBOPTIMAL}
action_spec = ActionSpecs(0, ActionType.SET_STATE, targets, props)
event_queue, action_executor = self._init_executer()
# Test Action - do
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
host_vertex_after = processor.entity_graph.get_vertex(
host_vertex_before.vertex_id)
# Test Assertions
agg_state_before = \
host_vertex_before.get(VProps.VITRAGE_AGGREGATED_STATE)
self.assertNotEqual(agg_state_before,
OperationalResourceState.SUBOPTIMAL)
self.assertNotIn(VProps.VITRAGE_STATE, host_vertex_before.properties)
agg_state_after = \
host_vertex_after.get(VProps.VITRAGE_AGGREGATED_STATE)
self.assertEqual(agg_state_after, OperationalResourceState.SUBOPTIMAL)
v_state_after = host_vertex_after.get(VProps.VITRAGE_STATE)
self.assertEqual(v_state_after, OperationalResourceState.SUBOPTIMAL)
# Test Action - undo
action_executor.execute(action_spec, ActionMode.UNDO)
processor.process_event(event_queue.get())
host_vertex_after_undo = processor.entity_graph.get_vertex(
host_vertex_before.vertex_id)
# Test Assertions
agg_state_after_undo = \
host_vertex_before.get(VProps.VITRAGE_AGGREGATED_STATE)
self.assertEqual(agg_state_after_undo, agg_state_before)
self.assertNotIn(
VProps.VITRAGE_STATE, host_vertex_after_undo.properties)
def test_execute_mark_instance_down(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE}
instance_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
instance_vertex_before = instance_vertices[0]
targets = {TFields.TARGET: instance_vertex_before}
props = {}
action_spec = ActionSpecs(0, ActionType.MARK_DOWN, targets, props)
event_queue, action_executor = self._init_executer()
# Test Action - do
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
instance_vertex_after = processor.entity_graph.get_vertex(
instance_vertex_before.vertex_id)
# Test Assertions
self.assertTrue(instance_vertex_after.get(VProps.IS_MARKED_DOWN))
# Test Action - undo
action_executor.execute(action_spec, ActionMode.UNDO)
processor.process_event(event_queue.get())
instance_vertex_after_undo = processor.entity_graph.get_vertex(
instance_vertex_before.vertex_id)
# Test Assertions
self.assertFalse(instance_vertex_after_undo.get(VProps.IS_MARKED_DOWN))
def test_execute_mark_down(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host_vertex_before = host_vertices[0]
targets = {TFields.TARGET: host_vertex_before}
props = {}
action_spec = ActionSpecs(0, ActionType.MARK_DOWN, targets, props)
event_queue, action_executor = self._init_executer()
# Test Action - do
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
host_vertex_after = processor.entity_graph.get_vertex(
host_vertex_before.vertex_id)
# Test Assertions
self.assertTrue(host_vertex_after.get(VProps.IS_MARKED_DOWN))
# Test Action - undo
action_executor.execute(action_spec, ActionMode.UNDO)
processor.process_event(event_queue.get())
host_vertex_after_undo = processor.entity_graph.get_vertex(
host_vertex_before.vertex_id)
# Test Assertions
self.assertFalse(host_vertex_after_undo.get(VProps.IS_MARKED_DOWN))
def test_execute_add_edge(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host_1 = host_vertices[0]
nagios_event1 = TestActionExecutor._get_nagios_event(
host_1.get(VProps.ID), NOVA_HOST_DATASOURCE)
processor.process_event(nagios_event1)
host_2 = host_vertices[1]
nagios_event2 = TestActionExecutor._get_nagios_event(
host_2.get(VProps.ID), NOVA_HOST_DATASOURCE)
processor.process_event(nagios_event2)
alarms_attrs = {VProps.VITRAGE_TYPE: NAGIOS_DATASOURCE}
alarms_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=alarms_attrs)
alarm1 = alarms_vertices[0]
alarm2 = alarms_vertices[1]
targets = {
TFields.TARGET: alarm1,
TFields.SOURCE: alarm2
}
action_spec = ActionSpecs(
0, ActionType.ADD_CAUSAL_RELATIONSHIP, targets, {})
event_queue, action_executor = self._init_executer()
before_edge = processor.entity_graph.get_edge(alarm2.vertex_id,
alarm1.vertex_id,
EdgeLabel.CAUSES)
# Test Action - do
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
new_edge = processor.entity_graph.get_edge(alarm2.vertex_id,
alarm1.vertex_id,
EdgeLabel.CAUSES)
# Test Assertions
self.assertIsNone(before_edge)
self.assertIsNotNone(new_edge)
def test_execute_add_vertex(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host = host_vertices[0]
targets = {TFields.TARGET: host}
props = {
TFields.ALARM_NAME: 'VM_CPU_SUBOPTIMAL_PERFORMANCE',
TFields.SEVERITY: OperationalAlarmSeverity.CRITICAL,
VProps.STATE: AlarmProps.ACTIVE_STATE,
VProps.RESOURCE_ID: host[VProps.ID],
VProps.VITRAGE_ID: 'DUMMY_ID'
}
# Raise alarm action adds new vertex with type vitrage to the graph
action_spec = ActionSpecs(0, ActionType.RAISE_ALARM, targets, props)
alarm_vertex_attrs = {VProps.VITRAGE_TYPE: VITRAGE_DATASOURCE}
before_alarms = processor.entity_graph.get_vertices(
vertex_attr_filter=alarm_vertex_attrs)
event_queue, action_executor = self._init_executer()
# Test Action
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
after_alarms = processor.entity_graph.get_vertices(
vertex_attr_filter=alarm_vertex_attrs)
# Assertions
self.assertEqual(len(before_alarms) + 1, len(after_alarms))
self.assert_is_not_empty(after_alarms)
alarm = after_alarms[0]
self.assertEqual(alarm.properties[VProps.VITRAGE_CATEGORY],
EntityCategory.ALARM)
self.assertEqual(alarm.properties[VProps.VITRAGE_TYPE],
VITRAGE_DATASOURCE)
self.assertEqual(alarm.properties[VProps.SEVERITY],
props[TFields.SEVERITY])
self.assertEqual(alarm.properties[VProps.VITRAGE_OPERATIONAL_SEVERITY],
props[TFields.SEVERITY])
self.assertEqual(alarm.properties[VProps.STATE],
AlarmProps.ACTIVE_STATE)
self.assertEqual(alarm.properties[VProps.VITRAGE_RESOURCE_ID],
action_spec.targets
[TTFields.TARGET][VProps.VITRAGE_ID]),
self.assertEqual(alarm.properties[VProps.VITRAGE_RESOURCE_TYPE],
NOVA_HOST_DATASOURCE)
def test_execute_add_and_remove_vertex(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host = host_vertices[0]
targets = {TFields.TARGET: host}
props = {
TFields.ALARM_NAME: 'VM_CPU_SUBOPTIMAL_PERFORMANCE',
TFields.SEVERITY: OperationalAlarmSeverity.CRITICAL,
VProps.STATE: AlarmProps.ACTIVE_STATE,
VProps.RESOURCE_ID: host[VProps.ID]
}
action_spec = ActionSpecs(0, ActionType.RAISE_ALARM, targets, props)
add_vertex_event = TestActionExecutor._get_vitrage_add_vertex_event(
host,
props[TFields.ALARM_NAME],
props[TFields.SEVERITY])
processor.process_event(add_vertex_event)
alarm_vertex_attrs = {VProps.VITRAGE_TYPE: VITRAGE_DATASOURCE,
VProps.VITRAGE_IS_DELETED: False}
before_alarms = processor.entity_graph.get_vertices(
vertex_attr_filter=alarm_vertex_attrs)
event_queue, action_executor = self._init_executer()
# Test Action - undo
action_executor.execute(action_spec, ActionMode.UNDO)
event = event_queue.get()
processor.process_event(event)
after_alarms = processor.entity_graph.get_vertices(
vertex_attr_filter=alarm_vertex_attrs)
# Test Assertions
self.assertEqual(len(before_alarms) - 1, len(after_alarms))
@staticmethod
def _get_nagios_event(resource_name, resource_type):
return {NProps.LAST_CHECK: '2016-02-07 15:26:04',
NProps.RESOURCE_NAME: resource_name,
NProps.RESOURCE_TYPE: resource_type,
NProps.SERVICE: 'Check_MK',
NProps.STATUS: NagiosTestStatus.CRITICAL,
NProps.STATUS_INFO: 'test test test',
DSProp.DATASOURCE_ACTION: DatasourceAction.SNAPSHOT,
DSProp.ENTITY_TYPE: NAGIOS_DATASOURCE,
DSProp.SAMPLE_DATE: '2016-02-07 15:26:04'}
@staticmethod
def _get_vitrage_add_vertex_event(target_vertex, alarm_name, severity):
return {TTFields.TARGET: target_vertex.vertex_id,
VProps.UPDATE_TIMESTAMP: '2016-03-17 11:33:32.443002',
DSProp.DATASOURCE_ACTION: DatasourceAction.UPDATE,
TFields.ALARM_NAME: alarm_name,
VProps.STATE: 'Active',
EVALUATOR_EVENT_TYPE: ADD_VERTEX,
DSProp.ENTITY_TYPE: VITRAGE_DATASOURCE,
VProps.SEVERITY: OperationalAlarmSeverity.CRITICAL,
VProps.VITRAGE_ID: 'mock_vitrage_id',
VProps.VITRAGE_RESOURCE_TYPE: NOVA_HOST_DATASOURCE,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_SAMPLE_TIMESTAMP:
'2016-03-17 11:33:32.443002+00:00'}
|
the-stack_0_13061 | import argparse
import os
def parse_args(args) -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Make submission')
parser.add_argument(
'-i', '--input',
help='path to input file',
type=str,
required=True
)
parser.add_argument(
'-o', '--output',
help='path to output file',
type=str,
required=True
)
return parser.parse_args(args)
def get_score(motif_list):
col_list = [''.join(seq) for seq in zip(*motif_list)]
max_c = sum([max([c.count(x) for x in 'ACGT']) for c in col_list])
return len(motif_list[0])*len(motif_list) - max_c
def get_profile(motif_list):
col_list = [''.join(seq) for seq in zip(*motif_list)]
return [[(c.count(nuc) + 1) / (len(c) + 4) for nuc in 'ACGT'] for c in col_list]
def get_kmer(dna, k, profile):
nuc_loc = {
nucleotide: index for index, nucleotide in enumerate('ACGT')
}
max_prob = -1
for i in range(len(dna)-k+1):
current_prob = 1
for j, nuc in enumerate(dna[i:i+k]):
current_prob *= profile[j][nuc_loc[nuc]]
if current_prob > max_prob:
max_prob = current_prob
result = dna[i:i+k]
return result
def calculate(input_path: str) -> str:
with open(input_path, 'r') as file:
k, t = map(int, file.readline().split())
dna_list = [line.strip() for line in file]
best_s = t*k
for i in range(len(dna_list[0])-k+1):
motifs = [dna_list[0][i:i+k]]
for j in range(1, t):
current_profile = get_profile(motifs)
motifs.append(get_kmer(dna_list[j], k, current_profile))
current_s = get_score(motifs)
if current_s < best_s:
best_s = current_s
best_motifs = motifs
return '\n'.join(best_motifs)
def main(args=None) -> None:
args=parse_args(args)
assert os.path.exists(args.input), 'no input file'
result = calculate(args.input)
open(args.output, 'w').write(result)
if __name__ == '__main__':
main()
|
the-stack_0_13062 | import asyncio
import traceback
import os
import logging
import sys
from pathlib import Path
from typing import Any, Dict, Optional
from chives.plotting.create_plots import resolve_plot_keys
from chives.plotters.plotters_util import run_plotter, run_command
log = logging.getLogger(__name__)
MADMAX_PLOTTER_DIR = "madmax-plotter"
def is_madmax_supported() -> bool:
return sys.platform.startswith("linux") or sys.platform in ["darwin", "win32", "cygwin"]
def get_madmax_install_path(plotters_root_path: Path) -> Path:
return plotters_root_path / MADMAX_PLOTTER_DIR
def get_madmax_package_path() -> Path:
return Path(os.path.dirname(sys.executable)) / "madmax"
def get_madmax_executable_path_for_ksize(plotters_root_path: Path, ksize: int = 29) -> Path:
madmax_dir: Path = get_madmax_package_path()
madmax_exec: str = "chia_plot"
# if ksize > 32:
# madmax_exec += "_k34" # Use the chia_plot_k34 executable for k-sizes > 32
if sys.platform in ["win32", "cygwin"]:
madmax_exec += ".exe"
if not madmax_dir.exists():
madmax_dir = get_madmax_install_path(plotters_root_path) / "build"
return madmax_dir / madmax_exec
def get_madmax_install_info(plotters_root_path: Path) -> Optional[Dict[str, Any]]:
info: Dict[str, Any] = {"display_name": "madMAx Plotter"}
installed: bool = False
supported: bool = is_madmax_supported()
if get_madmax_executable_path_for_ksize(plotters_root_path).exists():
try:
proc = run_command(
[os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path)), "--version"],
"Failed to call madmax with --version option",
capture_output=True,
text=True,
)
version = proc.stdout.strip()
except Exception as e:
print(f"Failed to determine madmax version: {e}")
if version is not None:
installed = True
info["version"] = version
else:
installed = False
info["installed"] = installed
if installed is False:
info["can_install"] = supported
return info
def install_madmax(plotters_root_path: Path):
if is_madmax_supported():
print("Installing dependencies.")
if sys.platform.startswith("linux"):
run_command(
[
"sudo",
"apt",
"install",
"-y",
"libsodium-dev",
"cmake",
"g++",
"git",
"build-essential",
],
"Could not install dependencies",
)
if sys.platform.startswith("darwin"):
run_command(
[
"brew",
"install",
"libsodium",
"cmake",
"git",
"autoconf",
"automake",
"libtool",
"wget",
],
"Could not install dependencies",
)
run_command(["git", "--version"], "Error checking Git version.")
print("Cloning git repository.")
run_command(
[
"git",
"clone",
"https://github.com/Chia-Network/chia-plotter-madmax.git",
MADMAX_PLOTTER_DIR,
],
"Could not clone madmax git repository",
cwd=os.fspath(plotters_root_path),
)
print("Installing git submodules.")
madmax_path: str = os.fspath(get_madmax_install_path(plotters_root_path))
run_command(
[
"git",
"submodule",
"update",
"--init",
"--recursive",
],
"Could not initialize git submodules",
cwd=madmax_path,
)
print("Running install script.")
run_command(["./make_devel.sh"], "Error while running install script", cwd=madmax_path)
else:
raise RuntimeError("Platform not supported yet for madmax plotter.")
progress = {
"[P1] Table 1 took": 0.01,
"[P1] Table 2 took": 0.06,
"[P1] Table 3 took": 0.12,
"[P1] Table 4 took": 0.2,
"[P1] Table 5 took": 0.28,
"[P1] Table 6 took": 0.36,
"[P1] Table 7 took": 0.42,
"[P2] Table 7 rewrite took": 0.43,
"[P2] Table 6 rewrite took": 0.48,
"[P2] Table 5 rewrite took": 0.51,
"[P2] Table 4 rewrite took": 0.55,
"[P2] Table 3 rewrite took": 0.58,
"[P2] Table 2 rewrite took": 0.61,
"[P3-2] Table 2 took": 0.66,
"[P3-2] Table 3 took": 0.73,
"[P3-2] Table 4 took": 0.79,
"[P3-2] Table 5 took": 0.85,
"[P3-2] Table 6 took": 0.92,
"[P3-2] Table 7 took": 0.98,
}
def dir_with_trailing_slash(dir: str) -> str:
return dir if dir[-1] == os.path.sep else dir + os.path.sep
def plot_madmax(args, chives_root_path: Path, plotters_root_path: Path):
if sys.platform not in ["win32", "cygwin"]:
import resource
# madMAx has a ulimit -n requirement > 296:
# "Cannot open at least 296 files, please raise maximum open file limit in OS."
resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512))
if not os.path.exists(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)):
print("Installing madmax plotter.")
try:
install_madmax(plotters_root_path)
except Exception as e:
print(f"Exception while installing madmax plotter: {e}")
return
plot_keys = asyncio.get_event_loop().run_until_complete(
resolve_plot_keys(
None if args.farmerkey == b"" else args.farmerkey.hex(),
None,
None if args.pool_key == b"" else args.pool_key.hex(),
None if args.contract == "" else args.contract,
chives_root_path,
log,
args.connect_to_daemon,
)
)
call_args = []
call_args.append(os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)))
call_args.append("-f")
call_args.append(bytes(plot_keys.farmer_public_key).hex())
if plot_keys.pool_public_key is not None:
call_args.append("-p")
call_args.append(bytes(plot_keys.pool_public_key).hex())
call_args.append("-t")
# s if s[-1] == os.path.sep else s + os.path.sep
call_args.append(dir_with_trailing_slash(args.tmpdir))
if len(args.tmpdir2) > 0:
call_args.append("-2")
call_args.append(dir_with_trailing_slash(args.tmpdir2))
call_args.append("-d")
call_args.append(dir_with_trailing_slash(args.finaldir))
if plot_keys.pool_contract_address is not None:
call_args.append("-c")
call_args.append(plot_keys.pool_contract_address)
call_args.append("-n")
call_args.append(str(args.count))
call_args.append("-r")
call_args.append(str(args.threads))
call_args.append("-u")
call_args.append(str(args.buckets))
call_args.append("-v")
call_args.append(str(args.buckets3))
if args.waitforcopy:
call_args.append("-w")
if args.tmptoggle:
call_args.append("-G")
call_args.append("-K")
call_args.append(str(args.rmulti2))
call_args.append("-k")
call_args.append(str(args.size))
call_args.append("-x")
call_args.append("9699")
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_plotter(call_args, progress))
except Exception as e:
print(f"Exception while plotting: {type(e)} {e}")
print(f"Traceback: {traceback.format_exc()}")
|
the-stack_0_13063 | import json
from ..constants.path import get_cache_path
from ..Utils.decorators import change_working_directory, cache_data
from ..Utils.utils import search
from .text import TextEntityAnnotation
TASK_TYPE = {
'TextEntityAnnotation':TextEntityAnnotation
}
@change_working_directory
@cache_data
def list_datasets(*args, **kwargs):
'''
Lists all the datasets in the user profile
'''
data = kwargs['data']
if data == None:
print("Using Cached data...")
with open('./dumps.json', 'r') as f:
data = json.load(f)
dataset_names = list()
for task in data['annotation_data']:
dataset_names.append(task["task_name"])
return dataset_names
@change_working_directory
@cache_data
def show_dataset(dataset_name:str, samples:int=1,*args, **kwargs):
with open('./dumps.json', 'r') as f:
user_data = json.load(f)
user_dataset = search(user_data['annotation_data'], dataset_name)
if user_dataset == -1:
raise ValueError("Dataset not found. Check dataset name or recache the environment with `show_datasets(refresh=True)`")
task = TASK_TYPE[user_dataset['task_type']](user_dataset)
sents, ets = task.get_dataset(samples)
for i, (tokens, labels) in enumerate(zip(sents, ets)):
print(f"Sample {i}")
print(*tokens)
print(*labels)
print()
@change_working_directory
@cache_data
def get_dataset(dataset_name:str, *args, **kwargs):
with open('./dumps.json', 'r') as f:
user_data = json.load(f)
user_dataset = search(user_data['annotation_data'], dataset_name)
if user_dataset == -1:
raise ValueError("Dataset not found. Check dataset name or recache the environment with `show_datasets(refresh=True)`")
task = TASK_TYPE[user_dataset['task_type']](user_dataset)
return task
|
the-stack_0_13064 | from setuptools import setup
import os
from codecs import open
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, 'inputimeout', '__version__.py'),
'r', 'utf-8') as f:
exec(f.read(), about)
tests_requirements = [
'pytest-cov', 'pytest', 'flake8',
]
setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
long_description=readme,
packages=['inputimeout'],
python_requires='>=3.4',
license=about['__license__'],
url=about['__url__'],
py_modules=['inputimeout'],
keyword=['input', 'timeout', 'stdin'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
],
tests_require=tests_requirements,
)
|
the-stack_0_13065 | import atexit
import glob
import logging
import numpy as np
import os
import subprocess
from typing import Dict, List, Optional, Any
from mlagents_envs.side_channel.side_channel import SideChannel
from mlagents_envs.base_env import (
BaseEnv,
BatchedStepResult,
AgentGroupSpec,
AgentGroup,
AgentId,
)
from mlagents_envs.timers import timed, hierarchical_timer
from mlagents_envs.exception import (
UnityEnvironmentException,
UnityCommunicationException,
UnityActionException,
UnityTimeOutException,
)
from mlagents_envs.communicator_objects.command_pb2 import STEP, RESET
from mlagents_envs.rpc_utils import (
agent_group_spec_from_proto,
batched_step_result_from_proto,
)
from mlagents_envs.communicator_objects.unity_rl_input_pb2 import UnityRLInputProto
from mlagents_envs.communicator_objects.unity_rl_output_pb2 import UnityRLOutputProto
from mlagents_envs.communicator_objects.agent_action_pb2 import AgentActionProto
from mlagents_envs.communicator_objects.unity_output_pb2 import UnityOutputProto
from mlagents_envs.communicator_objects.unity_rl_initialization_input_pb2 import (
UnityRLInitializationInputProto,
)
from mlagents_envs.communicator_objects.unity_input_pb2 import UnityInputProto
from .rpc_communicator import RpcCommunicator
from sys import platform
import signal
import struct
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("mlagents_envs")
class UnityEnvironment(BaseEnv):
SCALAR_ACTION_TYPES = (int, np.int32, np.int64, float, np.float32, np.float64)
SINGLE_BRAIN_ACTION_TYPES = SCALAR_ACTION_TYPES + (list, np.ndarray)
API_VERSION = "API-12"
def __init__(
self,
file_name: Optional[str] = None,
worker_id: int = 0,
base_port: int = 5005,
seed: int = 0,
docker_training: bool = False,
no_graphics: bool = False,
timeout_wait: int = 60,
args: Optional[List[str]] = None,
side_channels: Optional[List[SideChannel]] = None,
):
"""
Starts a new unity environment and establishes a connection with the environment.
Notice: Currently communication between Unity and Python takes place over an open socket without authentication.
Ensure that the network where training takes place is secure.
:string file_name: Name of Unity environment binary.
:int base_port: Baseline port number to connect to Unity environment over. worker_id increments over this.
:int worker_id: Number to add to communication port (5005) [0]. Used for asynchronous agent scenarios.
:bool docker_training: Informs this class whether the process is being run within a container.
:bool no_graphics: Whether to run the Unity simulator in no-graphics mode
:int timeout_wait: Time (in seconds) to wait for connection from environment.
:bool train_mode: Whether to run in training mode, speeding up the simulation, by default.
:list args: Addition Unity command line arguments
:list side_channels: Additional side channel for no-rl communication with Unity
"""
args = args or []
atexit.register(self._close)
self.port = base_port + worker_id
self._buffer_size = 12000
self._version_ = UnityEnvironment.API_VERSION
# If true, this means the environment was successfully loaded
self._loaded = False
# The process that is started. If None, no process was started
self.proc1 = None
self.timeout_wait: int = timeout_wait
self.communicator = self.get_communicator(worker_id, base_port, timeout_wait)
self.worker_id = worker_id
self.side_channels: Dict[int, SideChannel] = {}
if side_channels is not None:
for _sc in side_channels:
if _sc.channel_type in self.side_channels:
raise UnityEnvironmentException(
"There cannot be two side channels with the same channel type {0}.".format(
_sc.channel_type
)
)
self.side_channels[_sc.channel_type] = _sc
# If the environment name is None, a new environment will not be launched
# and the communicator will directly try to connect to an existing unity environment.
# If the worker-id is not 0 and the environment name is None, an error is thrown
if file_name is None and worker_id != 0:
raise UnityEnvironmentException(
"If the environment name is None, "
"the worker-id must be 0 in order to connect with the Editor."
)
if file_name is not None:
self.executable_launcher(file_name, docker_training, no_graphics, args)
else:
logger.info(
f"Listening on port {self.port}. "
f"Start training by pressing the Play button in the Unity Editor."
)
self._loaded = True
rl_init_parameters_in = UnityRLInitializationInputProto(seed=seed)
try:
aca_output = self.send_academy_parameters(rl_init_parameters_in)
aca_params = aca_output.rl_initialization_output
except UnityTimeOutException:
self._close()
raise
# TODO : think of a better way to expose the academyParameters
self._unity_version = aca_params.version
if self._unity_version != self._version_:
self._close()
raise UnityEnvironmentException(
f"The API number is not compatible between Unity and python. "
f"Python API: {self._version_}, Unity API: {self._unity_version}.\n"
f"Please go to https://github.com/Unity-Technologies/ml-agents/releases/tag/latest_release"
f"to download the latest version of ML-Agents."
)
self._env_state: Dict[str, BatchedStepResult] = {}
self._env_specs: Dict[str, AgentGroupSpec] = {}
self._env_actions: Dict[str, np.ndarray] = {}
self._is_first_message = True
self._update_group_specs(aca_output)
@staticmethod
def get_communicator(worker_id, base_port, timeout_wait):
return RpcCommunicator(worker_id, base_port, timeout_wait)
def executable_launcher(self, file_name, docker_training, no_graphics, args):
cwd = os.getcwd()
file_name = (
file_name.strip()
.replace(".app", "")
.replace(".exe", "")
.replace(".x86_64", "")
.replace(".x86", "")
)
true_filename = os.path.basename(os.path.normpath(file_name))
logger.debug("The true file name is {}".format(true_filename))
launch_string = None
if platform == "linux" or platform == "linux2":
candidates = glob.glob(os.path.join(cwd, file_name) + ".x86_64")
if len(candidates) == 0:
candidates = glob.glob(os.path.join(cwd, file_name) + ".x86")
if len(candidates) == 0:
candidates = glob.glob(file_name + ".x86_64")
if len(candidates) == 0:
candidates = glob.glob(file_name + ".x86")
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == "darwin":
candidates = glob.glob(
os.path.join(
cwd, file_name + ".app", "Contents", "MacOS", true_filename
)
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(file_name + ".app", "Contents", "MacOS", true_filename)
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(cwd, file_name + ".app", "Contents", "MacOS", "*")
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(file_name + ".app", "Contents", "MacOS", "*")
)
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == "win32":
candidates = glob.glob(os.path.join(cwd, file_name + ".exe"))
if len(candidates) == 0:
candidates = glob.glob(file_name + ".exe")
if len(candidates) > 0:
launch_string = candidates[0]
if launch_string is None:
self._close()
raise UnityEnvironmentException(
"Couldn't launch the {0} environment. "
"Provided filename does not match any environments.".format(
true_filename
)
)
else:
logger.debug("This is the launch string {}".format(launch_string))
# Launch Unity environment
if not docker_training:
subprocess_args = [launch_string]
if no_graphics:
subprocess_args += ["-nographics", "-batchmode"]
subprocess_args += ["--port", str(self.port)]
subprocess_args += args
try:
self.proc1 = subprocess.Popen(
subprocess_args,
# start_new_session=True means that signals to the parent python process
# (e.g. SIGINT from keyboard interrupt) will not be sent to the new process on POSIX platforms.
# This is generally good since we want the environment to have a chance to shutdown,
# but may be undesirable in come cases; if so, we'll add a command-line toggle.
# Note that on Windows, the CTRL_C signal will still be sent.
start_new_session=True,
)
except PermissionError as perm:
# This is likely due to missing read or execute permissions on file.
raise UnityEnvironmentException(
f"Error when trying to launch environment - make sure "
f"permissions are set correctly. For example "
f'"chmod -R 755 {launch_string}"'
) from perm
else:
# Comments for future maintenance:
# xvfb-run is a wrapper around Xvfb, a virtual xserver where all
# rendering is done to virtual memory. It automatically creates a
# new virtual server automatically picking a server number `auto-servernum`.
# The server is passed the arguments using `server-args`, we are telling
# Xvfb to create Screen number 0 with width 640, height 480 and depth 24 bits.
# Note that 640 X 480 are the default width and height. The main reason for
# us to add this is because we'd like to change the depth from the default
# of 8 bits to 24.
# Unfortunately, this means that we will need to pass the arguments through
# a shell which is why we set `shell=True`. Now, this adds its own
# complications. E.g SIGINT can bounce off the shell and not get propagated
# to the child processes. This is why we add `exec`, so that the shell gets
# launched, the arguments are passed to `xvfb-run`. `exec` replaces the shell
# we created with `xvfb`.
#
docker_ls = (
"exec xvfb-run --auto-servernum"
" --server-args='-screen 0 640x480x24'"
" {0} --port {1}"
).format(launch_string, str(self.port))
self.proc1 = subprocess.Popen(
docker_ls,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
def _update_group_specs(self, output: UnityOutputProto) -> None:
init_output = output.rl_initialization_output
for brain_param in init_output.brain_parameters:
# Each BrainParameter in the rl_initialization_output should have at least one AgentInfo
# Get that agent, because we need some of its observations.
agent_infos = output.rl_output.agentInfos[brain_param.brain_name]
if agent_infos.value:
agent = agent_infos.value[0]
new_spec = agent_group_spec_from_proto(brain_param, agent)
self._env_specs[brain_param.brain_name] = new_spec
logger.info(f"Connected new brain:\n{brain_param.brain_name}")
def _update_state(self, output: UnityRLOutputProto) -> None:
"""
Collects experience information from all external brains in environment at current step.
"""
for brain_name in self._env_specs.keys():
if brain_name in output.agentInfos:
agent_info_list = output.agentInfos[brain_name].value
self._env_state[brain_name] = batched_step_result_from_proto(
agent_info_list, self._env_specs[brain_name]
)
else:
self._env_state[brain_name] = BatchedStepResult.empty(
self._env_specs[brain_name]
)
self._parse_side_channel_message(self.side_channels, output.side_channel)
def reset(self) -> None:
if self._loaded:
outputs = self.communicator.exchange(self._generate_reset_input())
if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
self._update_group_specs(outputs)
rl_output = outputs.rl_output
self._update_state(rl_output)
self._is_first_message = False
self._env_actions.clear()
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
@timed
def step(self) -> None:
if self._is_first_message:
return self.reset()
if not self._loaded:
raise UnityEnvironmentException("No Unity environment is loaded.")
# fill the blanks for missing actions
for group_name in self._env_specs:
if group_name not in self._env_actions:
n_agents = 0
if group_name in self._env_state:
n_agents = self._env_state[group_name].n_agents()
self._env_actions[group_name] = self._env_specs[
group_name
].create_empty_action(n_agents)
step_input = self._generate_step_input(self._env_actions)
with hierarchical_timer("communicator.exchange"):
outputs = self.communicator.exchange(step_input)
if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
self._update_group_specs(outputs)
rl_output = outputs.rl_output
self._update_state(rl_output)
self._env_actions.clear()
def get_agent_groups(self) -> List[AgentGroup]:
return list(self._env_specs.keys())
def _assert_group_exists(self, agent_group: str) -> None:
if agent_group not in self._env_specs:
raise UnityActionException(
"The group {0} does not correspond to an existing agent group "
"in the environment".format(agent_group)
)
def set_actions(self, agent_group: AgentGroup, action: np.ndarray) -> None:
self._assert_group_exists(agent_group)
if agent_group not in self._env_state:
return
spec = self._env_specs[agent_group]
expected_type = np.float32 if spec.is_action_continuous() else np.int32
expected_shape = (self._env_state[agent_group].n_agents(), spec.action_size)
if action.shape != expected_shape:
raise UnityActionException(
"The group {0} needs an input of dimension {1} but received input of dimension {2}".format(
agent_group, expected_shape, action.shape
)
)
if action.dtype != expected_type:
action = action.astype(expected_type)
self._env_actions[agent_group] = action
def set_action_for_agent(
self, agent_group: AgentGroup, agent_id: AgentId, action: np.ndarray
) -> None:
self._assert_group_exists(agent_group)
if agent_group not in self._env_state:
return
spec = self._env_specs[agent_group]
expected_shape = (spec.action_size,)
if action.shape != expected_shape:
raise UnityActionException(
"The Agent {0} in group {1} needs an input of dimension {2} but received input of dimension {3}".format(
agent_id, agent_group, expected_shape, action.shape
)
)
expected_type = np.float32 if spec.is_action_continuous() else np.int32
if action.dtype != expected_type:
action = action.astype(expected_type)
if agent_group not in self._env_actions:
self._env_actions[agent_group] = spec.create_empty_action(
self._env_state[agent_group].n_agents()
)
try:
index = np.where(self._env_state[agent_group].agent_id == agent_id)[0][0]
except IndexError as ie:
raise IndexError(
"agent_id {} is did not request a decision at the previous step".format(
agent_id
)
) from ie
self._env_actions[agent_group][index] = action
def get_step_result(self, agent_group: AgentGroup) -> BatchedStepResult:
self._assert_group_exists(agent_group)
return self._env_state[agent_group]
def get_agent_group_spec(self, agent_group: AgentGroup) -> AgentGroupSpec:
self._assert_group_exists(agent_group)
return self._env_specs[agent_group]
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the socket connection.
"""
if self._loaded:
self._close()
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
def _close(self):
self._loaded = False
self.communicator.close()
if self.proc1 is not None:
# Wait a bit for the process to shutdown, but kill it if it takes too long
try:
self.proc1.wait(timeout=self.timeout_wait)
signal_name = self.returncode_to_signal_name(self.proc1.returncode)
signal_name = f" ({signal_name})" if signal_name else ""
return_info = f"Environment shut down with return code {self.proc1.returncode}{signal_name}."
logger.info(return_info)
except subprocess.TimeoutExpired:
logger.info("Environment timed out shutting down. Killing...")
self.proc1.kill()
# Set to None so we don't try to close multiple times.
self.proc1 = None
@classmethod
def _flatten(cls, arr: Any) -> List[float]:
"""
Converts arrays to list.
:param arr: numpy vector.
:return: flattened list.
"""
if isinstance(arr, cls.SCALAR_ACTION_TYPES):
arr = [float(arr)]
if isinstance(arr, np.ndarray):
arr = arr.tolist()
if len(arr) == 0:
return arr
if isinstance(arr[0], np.ndarray):
# pylint: disable=no-member
arr = [item for sublist in arr for item in sublist.tolist()]
if isinstance(arr[0], list):
# pylint: disable=not-an-iterable
arr = [item for sublist in arr for item in sublist]
arr = [float(x) for x in arr]
return arr
@staticmethod
def _parse_side_channel_message(
side_channels: Dict[int, SideChannel], data: bytes
) -> None:
offset = 0
while offset < len(data):
try:
channel_type, message_len = struct.unpack_from("<ii", data, offset)
offset = offset + 8
message_data = data[offset : offset + message_len]
offset = offset + message_len
except Exception:
raise UnityEnvironmentException(
"There was a problem reading a message in a SideChannel. "
"Please make sure the version of MLAgents in Unity is "
"compatible with the Python version."
)
if len(message_data) != message_len:
raise UnityEnvironmentException(
"The message received by the side channel {0} was "
"unexpectedly short. Make sure your Unity Environment "
"sending side channel data properly.".format(channel_type)
)
if channel_type in side_channels:
side_channels[channel_type].on_message_received(message_data)
else:
logger.warning(
"Unknown side channel data received. Channel type "
": {0}.".format(channel_type)
)
@staticmethod
def _generate_side_channel_data(side_channels: Dict[int, SideChannel]) -> bytearray:
result = bytearray()
for channel_type, channel in side_channels.items():
for message in channel.message_queue:
result += struct.pack("<ii", channel_type, len(message))
result += message
channel.message_queue = []
return result
@timed
def _generate_step_input(
self, vector_action: Dict[str, np.ndarray]
) -> UnityInputProto:
rl_in = UnityRLInputProto()
for b in vector_action:
n_agents = self._env_state[b].n_agents()
if n_agents == 0:
continue
for i in range(n_agents):
action = AgentActionProto(vector_actions=vector_action[b][i])
rl_in.agent_actions[b].value.extend([action])
rl_in.command = STEP
rl_in.side_channel = bytes(self._generate_side_channel_data(self.side_channels))
return self.wrap_unity_input(rl_in)
def _generate_reset_input(self) -> UnityInputProto:
rl_in = UnityRLInputProto()
rl_in.command = RESET
rl_in.side_channel = bytes(self._generate_side_channel_data(self.side_channels))
return self.wrap_unity_input(rl_in)
def send_academy_parameters(
self, init_parameters: UnityRLInitializationInputProto
) -> UnityOutputProto:
inputs = UnityInputProto()
inputs.rl_initialization_input.CopyFrom(init_parameters)
return self.communicator.initialize(inputs)
@staticmethod
def wrap_unity_input(rl_input: UnityRLInputProto) -> UnityInputProto:
result = UnityInputProto()
result.rl_input.CopyFrom(rl_input)
return result
@staticmethod
def returncode_to_signal_name(returncode: int) -> Optional[str]:
"""
Try to convert return codes into their corresponding signal name.
E.g. returncode_to_signal_name(-2) -> "SIGINT"
"""
try:
# A negative value -N indicates that the child was terminated by signal N (POSIX only).
s = signal.Signals(-returncode) # pylint: disable=no-member
return s.name
except Exception:
# Should generally be a ValueError, but catch everything just in case.
return None
|
the-stack_0_13066 | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: [email protected]
#
# pylint: disable=invalid-name,missing-docstring
"""
Take two files containing left & right reads from a paired-end sequencing run,
and interleave them.
% scripts/interleave-reads.py <R1> <R2> [ -o <outputfile> ]
By default, output is sent to stdout; or use -o. Use '-h' for parameter help.
"""
# TODO: take fa as well?
# support gzip option?
import screed
import sys
import itertools
import os
import textwrap
import argparse
import khmer
from khmer.kfile import check_file_status, check_space
from khmer.khmer_args import info
from khmer.utils import (write_record_pair, check_is_left, check_is_right,
check_is_pair)
def get_parser():
epilog = """
The output is an interleaved set of reads, with each read in <R1> paired
with a read in <R2>. By default, the output goes to stdout unless
:option:`-o`/:option:`--output` is specified.
As a "bonus", this file ensures that if read names are not already
formatted properly, they are reformatted consistently, such that
they look like the pre-1.8 Casava format (@name/1, @name/2).
Example::
""" " interleave-reads.py tests/test-data/paired.fq.1 tests/test-data/paired.fq.2 -o paired.fq" # noqa
parser = argparse.ArgumentParser(
description='Produce interleaved files from R1/R2 paired files',
epilog=textwrap.dedent(epilog),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infiles', nargs='+')
parser.add_argument('-o', '--output', metavar="filename",
type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('--version', action='version', version='%(prog)s '
+ khmer.__version__)
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exists')
return parser
def main():
info('interleave-reads.py')
args = get_parser().parse_args()
for _ in args.infiles:
check_file_status(_, args.force)
check_space(args.infiles, args.force)
s1_file = args.infiles[0]
if len(args.infiles) == 2:
s2_file = args.infiles[1]
else:
s2_file = s1_file.replace('_R1_', '_R2_')
if s1_file == s2_file:
print >>sys.stderr, ("ERROR: given only one filename, that "
"doesn't contain _R1_. Exiting.")
sys.exit(1)
print >> sys.stderr, ("given only one file; "
"guessing that R2 file is %s" % s2_file)
fail = False
if not os.path.exists(s1_file):
print >> sys.stderr, "Error! R1 file %s does not exist" % s1_file
fail = True
if not os.path.exists(s2_file):
print >> sys.stderr, "Error! R2 file %s does not exist" % s2_file
fail = True
if fail and not args.force:
sys.exit(1)
print >> sys.stderr, "Interleaving:\n\t%s\n\t%s" % (s1_file, s2_file)
counter = 0
screed_iter_1 = screed.open(s1_file, parse_description=False)
screed_iter_2 = screed.open(s2_file, parse_description=False)
for read1, read2 in itertools.izip_longest(screed_iter_1, screed_iter_2):
if read1 is None or read2 is None:
print >>sys.stderr, ("ERROR: Input files contain different number"
" of records.")
sys.exit(1)
if counter % 100000 == 0:
print >> sys.stderr, '...', counter, 'pairs'
counter += 1
name1 = read1.name
if not check_is_left(name1):
name1 += '/1'
name2 = read2.name
if not check_is_right(name2):
name2 += '/2'
read1.name = name1
read2.name = name2
if not check_is_pair(read1, read2):
print >>sys.stderr, "ERROR: This doesn't look like paired data! " \
"%s %s" % (read1.name, read2.name)
sys.exit(1)
write_record_pair(read1, read2, args.output)
print >> sys.stderr, 'final: interleaved %d pairs' % counter
print >> sys.stderr, 'output written to', args.output.name
if __name__ == '__main__':
main()
|
the-stack_0_13068 | import collections
from packaging.version import Version
import inspect
import logging
from numbers import Number
import numpy as np
import time
import warnings
from mlflow.tracking.client import MlflowClient
from mlflow.utils.file_utils import TempDir
from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID
from mlflow.utils.arguments_utils import _get_arg_names
_logger = logging.getLogger(__name__)
# The earliest version we're guaranteed to support. Autologging utilities may not work properly
# on scikit-learn older than this version.
_MIN_SKLEARN_VERSION = "0.20.3"
# The prefix to note that all calculated metrics and artifacts are solely based on training datasets
_TRAINING_PREFIX = "training_"
_SAMPLE_WEIGHT = "sample_weight"
# _SklearnArtifact represents a artifact (e.g confusion matrix) that will be computed and
# logged during the autologging routine for a particular model type (eg, classifier, regressor).
_SklearnArtifact = collections.namedtuple(
"_SklearnArtifact", ["name", "function", "arguments", "title"]
)
# _SklearnMetric represents a metric (e.g, precision_score) that will be computed and
# logged during the autologging routine for a particular model type (eg, classifier, regressor).
_SklearnMetric = collections.namedtuple("_SklearnMetric", ["name", "function", "arguments"])
def _get_estimator_info_tags(estimator):
"""
:return: A dictionary of MLflow run tag keys and values
describing the specified estimator.
"""
return {
"estimator_name": estimator.__class__.__name__,
"estimator_class": (estimator.__class__.__module__ + "." + estimator.__class__.__name__),
}
def _get_args_for_metrics(fit_func, fit_args, fit_kwargs):
"""
Get arguments to pass to metric computations in the following steps.
1. Extract X and y from fit_args and fit_kwargs.
2. If the sample_weight argument exists in fit_func,
extract it from fit_args or fit_kwargs and return (X, y, sample_weight),
otherwise return (X, y)
:param fit_func: A fit function object.
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:returns: A tuple of either (X, y, sample_weight), where `y` and `sample_weight` may be
`None` if the specified `fit_args` and `fit_kwargs` do not specify labels or
a sample weighting.
"""
def _get_Xy(args, kwargs, X_var_name, y_var_name):
# corresponds to: model.fit(X, y)
if len(args) >= 2:
return args[:2]
# corresponds to: model.fit(X, <y_var_name>=y)
if len(args) == 1:
return args[0], kwargs.get(y_var_name)
# corresponds to: model.fit(<X_var_name>=X, <y_var_name>=y)
return kwargs[X_var_name], kwargs.get(y_var_name)
def _get_sample_weight(arg_names, args, kwargs):
sample_weight_index = arg_names.index(_SAMPLE_WEIGHT)
# corresponds to: model.fit(X, y, ..., sample_weight)
if len(args) > sample_weight_index:
return args[sample_weight_index]
# corresponds to: model.fit(X, y, ..., sample_weight=sample_weight)
if _SAMPLE_WEIGHT in kwargs:
return kwargs[_SAMPLE_WEIGHT]
return None
fit_arg_names = _get_arg_names(fit_func)
# In most cases, X_var_name and y_var_name become "X" and "y", respectively.
# However, certain sklearn models use different variable names for X and y.
# E.g., see: https://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html#sklearn.multioutput.MultiOutputClassifier.fit # noqa: E501
X_var_name, y_var_name = fit_arg_names[:2]
Xy = _get_Xy(fit_args, fit_kwargs, X_var_name, y_var_name)
sample_weight = (
_get_sample_weight(fit_arg_names, fit_args, fit_kwargs)
if (_SAMPLE_WEIGHT in fit_arg_names)
else None
)
return (*Xy, sample_weight)
def _get_metrics_value_dict(metrics_list):
metric_value_dict = {}
for metric in metrics_list:
try:
metric_value = metric.function(**metric.arguments)
except Exception as e:
_log_warning_for_metrics(metric.name, metric.function, e)
else:
metric_value_dict[metric.name] = metric_value
return metric_value_dict
def _get_classifier_metrics(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Compute and record various common metrics for classifiers
For (1) precision score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html
(2) recall score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html
(3) f1_score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
By default, we choose the parameter `labels` to be `None`, `pos_label` to be `1`,
`average` to be `weighted` to compute the weighted precision score.
For (4) accuracy score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html
we choose the parameter `normalize` to be `True` to output the percentage of accuracy,
as opposed to `False` that outputs the absolute correct number of sample prediction
We log additional metrics if certain classifier has method `predict_proba`
(5) log loss:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html
(6) roc_auc_score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html
By default, for roc_auc_score, we pick `average` to be `weighted`, `multi_class` to be `ovo`,
to make the output more insensitive to dataset imbalance.
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and compute y_pred.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, ...... sample_weight), otherwise as (y_true, y_pred, ......)
3. return a dictionary of metric(name, value)
:param fitted_estimator: The already fitted classifier
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: dictionary of (function name, computed value)
"""
import sklearn
y_pred = fitted_estimator.predict(X)
classifier_metrics = [
_SklearnMetric(
name=prefix + "precision_score",
function=sklearn.metrics.precision_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "recall_score",
function=sklearn.metrics.recall_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "f1_score",
function=sklearn.metrics.f1_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "accuracy_score",
function=sklearn.metrics.accuracy_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, normalize=True, sample_weight=sample_weight
),
),
]
if hasattr(fitted_estimator, "predict_proba"):
y_pred_proba = fitted_estimator.predict_proba(X)
classifier_metrics.extend(
[
_SklearnMetric(
name=prefix + "log_loss",
function=sklearn.metrics.log_loss,
arguments=dict(y_true=y_true, y_pred=y_pred_proba, sample_weight=sample_weight),
),
]
)
if _is_metric_supported("roc_auc_score"):
# For binary case, the parameter `y_score` expect scores must be
# the scores of the class with the greater label.
if len(y_pred_proba[0]) == 2:
y_pred_proba = y_pred_proba[:, 1]
classifier_metrics.extend(
[
_SklearnMetric(
name=prefix + "roc_auc_score",
function=sklearn.metrics.roc_auc_score,
arguments=dict(
y_true=y_true,
y_score=y_pred_proba,
average="weighted",
sample_weight=sample_weight,
multi_class="ovo",
),
),
]
)
return _get_metrics_value_dict(classifier_metrics)
def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Draw and record various common artifacts for classifier
For all classifiers, we always log:
(1) confusion matrix:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html
For only binary classifiers, we will log:
(2) precision recall curve:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html
(3) roc curve:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)
3. return a list of artifacts path to be logged
:param fitted_estimator: The already fitted regressor
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: List of artifacts to be logged
"""
import sklearn
if not _is_plotting_supported():
return []
classifier_artifacts = [
_SklearnArtifact(
name=prefix + "confusion_matrix",
function=sklearn.metrics.plot_confusion_matrix,
arguments=dict(
estimator=fitted_estimator,
X=X,
y_true=y_true,
sample_weight=sample_weight,
normalize="true",
cmap="Blues",
),
title="Normalized confusion matrix",
),
]
# The plot_roc_curve and plot_precision_recall_curve can only be
# supported for binary classifier
if len(set(y_true)) == 2:
classifier_artifacts.extend(
[
_SklearnArtifact(
name=prefix + "roc_curve",
function=sklearn.metrics.plot_roc_curve,
arguments=dict(
estimator=fitted_estimator,
X=X,
y=y_true,
sample_weight=sample_weight,
),
title="ROC curve",
),
_SklearnArtifact(
name=prefix + "precision_recall_curve",
function=sklearn.metrics.plot_precision_recall_curve,
arguments=dict(
estimator=fitted_estimator,
X=X,
y=y_true,
sample_weight=sample_weight,
),
title="Precision recall curve",
),
]
)
return classifier_artifacts
def _get_regressor_metrics(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Compute and record various common metrics for regressors
For (1) (root) mean squared error:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html
(2) mean absolute error:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html
(3) r2 score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html
By default, we choose the parameter `multioutput` to be `uniform_average`
to average outputs with uniform weight.
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and compute y_pred.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)
3. return a dictionary of metric(name, value)
:param fitted_estimator: The already fitted regressor
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: dictionary of (function name, computed value)
"""
import sklearn
y_pred = fitted_estimator.predict(X)
regressor_metrics = [
_SklearnMetric(
name=prefix + "mse",
function=sklearn.metrics.mean_squared_error,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
_SklearnMetric(
name=prefix + "mae",
function=sklearn.metrics.mean_absolute_error,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
_SklearnMetric(
name=prefix + "r2_score",
function=sklearn.metrics.r2_score,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
]
# To be compatible with older versions of scikit-learn (below 0.22.2), where
# `sklearn.metrics.mean_squared_error` does not have "squared" parameter to calculate `rmse`,
# we compute it through np.sqrt(<value of mse>)
metrics_value_dict = _get_metrics_value_dict(regressor_metrics)
metrics_value_dict[prefix + "rmse"] = np.sqrt(metrics_value_dict[prefix + "mse"])
return metrics_value_dict
def _log_warning_for_metrics(func_name, func_call, err):
msg = (
func_call.__qualname__
+ " failed. The metric "
+ func_name
+ " will not be recorded."
+ " Metric error: "
+ str(err)
)
_logger.warning(msg)
def _log_warning_for_artifacts(func_name, func_call, err):
msg = (
func_call.__qualname__
+ " failed. The artifact "
+ func_name
+ " will not be recorded."
+ " Artifact error: "
+ str(err)
)
_logger.warning(msg)
def _log_specialized_estimator_content(
autologging_client, fitted_estimator, run_id, prefix, X, y_true=None, sample_weight=None
):
import sklearn
metrics = dict()
if y_true is not None:
try:
if sklearn.base.is_classifier(fitted_estimator):
metrics = _get_classifier_metrics(
fitted_estimator, prefix, X, y_true, sample_weight
)
elif sklearn.base.is_regressor(fitted_estimator):
metrics = _get_regressor_metrics(fitted_estimator, prefix, X, y_true, sample_weight)
except Exception as err:
msg = (
"Failed to autolog metrics for "
+ fitted_estimator.__class__.__name__
+ ". Logging error: "
+ str(err)
)
_logger.warning(msg)
else:
autologging_client.log_metrics(run_id=run_id, metrics=metrics)
if sklearn.base.is_classifier(fitted_estimator):
try:
artifacts = _get_classifier_artifacts(
fitted_estimator, prefix, X, y_true, sample_weight
)
except Exception as e:
msg = (
"Failed to autolog artifacts for "
+ fitted_estimator.__class__.__name__
+ ". Logging error: "
+ str(e)
)
_logger.warning(msg)
return
with TempDir() as tmp_dir:
for artifact in artifacts:
try:
display = artifact.function(**artifact.arguments)
display.ax_.set_title(artifact.title)
artifact_path = "{}.png".format(artifact.name)
filepath = tmp_dir.path(artifact_path)
display.figure_.savefig(filepath)
import matplotlib.pyplot as plt
plt.close(display.figure_)
except Exception as e:
_log_warning_for_artifacts(artifact.name, artifact.function, e)
MlflowClient().log_artifacts(run_id, tmp_dir.path())
return metrics
def _log_estimator_content(
autologging_client, estimator, run_id, prefix, X, y_true=None, sample_weight=None
):
"""
Logs content for the given estimator, which includes metrics and artifacts that might be
tailored to the estimator's type (e.g., regression vs classification). Training labels
are required for metric computation; metrics will be omitted if labels are not available.
:param autologging_client: An instance of `MlflowAutologgingQueueingClient` used for
efficiently logging run data to MLflow Tracking.
:param estimator: The estimator used to compute metrics and artifacts.
:param run_id: The run under which the content is logged.
:param prefix: A prefix used to name the logged content. Typically it's 'training_' for
training-time content and user-controlled for evaluation-time content.
:param X: The data samples.
:param y_true: Labels.
:param sample_weight: Per-sample weights used in the computation of metrics and artifacts.
:return: A dict of the computed metrics.
"""
metrics = _log_specialized_estimator_content(
autologging_client=autologging_client,
fitted_estimator=estimator,
run_id=run_id,
prefix=prefix,
X=X,
y_true=y_true,
sample_weight=sample_weight,
)
if hasattr(estimator, "score") and y_true is not None:
try:
# Use the sample weight only if it is present in the score args
score_arg_names = _get_arg_names(estimator.score)
score_args = (
(X, y_true, sample_weight) if _SAMPLE_WEIGHT in score_arg_names else (X, y_true)
)
score = estimator.score(*score_args)
except Exception as e:
msg = (
estimator.score.__qualname__
+ " failed. The 'training_score' metric will not be recorded. Scoring error: "
+ str(e)
)
_logger.warning(msg)
else:
score_key = prefix + "score"
autologging_client.log_metrics(run_id=run_id, metrics={score_key: score})
metrics[score_key] = score
return metrics
def _get_meta_estimators_for_autologging():
"""
:return: A list of meta estimator class definitions
(e.g., `sklearn.model_selection.GridSearchCV`) that should be included
when patching training functions for autologging
"""
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.pipeline import Pipeline
return [
GridSearchCV,
RandomizedSearchCV,
Pipeline,
]
def _is_parameter_search_estimator(estimator):
"""
:return: `True` if the specified scikit-learn estimator is a parameter search estimator,
such as `GridSearchCV`. `False` otherwise.
"""
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
parameter_search_estimators = [
GridSearchCV,
RandomizedSearchCV,
]
return any(
[
isinstance(estimator, param_search_estimator)
for param_search_estimator in parameter_search_estimators
]
)
def _log_parameter_search_results_as_artifact(cv_results_df, run_id):
"""
Records a collection of parameter search results as an MLflow artifact
for the specified run.
:param cv_results_df: A Pandas DataFrame containing the results of a parameter search
training session, which may be obtained by parsing the `cv_results_`
attribute of a trained parameter search estimator such as
`GridSearchCV`.
:param run_id: The ID of the MLflow Run to which the artifact should be recorded.
"""
with TempDir() as t:
results_path = t.path("cv_results.csv")
cv_results_df.to_csv(results_path, index=False)
MlflowClient().log_artifact(run_id, results_path)
# Log how many child runs will be created vs omitted based on `max_tuning_runs`.
def _log_child_runs_info(max_tuning_runs, total_runs):
rest = total_runs - max_tuning_runs
# Set logging statement for runs to be logged.
if max_tuning_runs == 0:
logging_phrase = "no runs"
elif max_tuning_runs == 1:
logging_phrase = "the best run"
else:
logging_phrase = "the {} best runs".format(max_tuning_runs)
# Set logging statement for runs to be omitted.
if rest <= 0:
omitting_phrase = "no runs"
elif rest == 1:
omitting_phrase = "one run"
else:
omitting_phrase = "{} runs".format(rest)
_logger.info("Logging %s, %s will be omitted.", logging_phrase, omitting_phrase)
def _create_child_runs_for_parameter_search(
autologging_client, cv_estimator, parent_run, max_tuning_runs, child_tags=None
):
"""
Creates a collection of child runs for a parameter search training session.
Runs are reconstructed from the `cv_results_` attribute of the specified trained
parameter search estimator - `cv_estimator`, which provides relevant performance
metrics for each point in the parameter search space. One child run is created
for each point in the parameter search space. For additional information, see
`https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html`_. # noqa: E501
:param autologging_client: An instance of `MlflowAutologgingQueueingClient` used for
efficiently logging run data to MLflow Tracking.
:param cv_estimator: The trained parameter search estimator for which to create
child runs.
:param parent_run: A py:class:`mlflow.entities.Run` object referring to the parent
parameter search run for which child runs should be created.
:param child_tags: An optional dictionary of MLflow tag keys and values to log
for each child run.
"""
import pandas as pd
def first_custom_rank_column(df):
column_names = df.columns.values
for col_name in column_names:
if "rank_test_" in col_name:
return col_name
# Use the start time of the parent parameter search run as a rough estimate for the
# start time of child runs, since we cannot precisely determine when each point
# in the parameter search space was explored
child_run_start_time = parent_run.info.start_time
child_run_end_time = int(time.time() * 1000)
seed_estimator = cv_estimator.estimator
# In the unlikely case that a seed of a parameter search estimator is,
# itself, a parameter search estimator, we should avoid logging the untuned
# parameters of the seeds's seed estimator
should_log_params_deeply = not _is_parameter_search_estimator(seed_estimator)
# Each row of `cv_results_` only provides parameters that vary across
# the user-specified parameter grid. In order to log the complete set
# of parameters for each child run, we fetch the parameters defined by
# the seed estimator and update them with parameter subset specified
# in the result row
base_params = seed_estimator.get_params(deep=should_log_params_deeply)
cv_results_df = pd.DataFrame.from_dict(cv_estimator.cv_results_)
if max_tuning_runs is None:
cv_results_best_n_df = cv_results_df
else:
rank_column_name = "rank_test_score"
if rank_column_name not in cv_results_df.columns.values:
rank_column_name = first_custom_rank_column(cv_results_df)
warnings.warn(
"Top {} child runs will be created based on ordering in {} column.".format(
max_tuning_runs,
rank_column_name,
)
+ " You can choose not to limit the number of child runs created by"
+ " setting `max_tuning_runs=None`."
)
cv_results_best_n_df = cv_results_df.nsmallest(max_tuning_runs, rank_column_name)
# Log how many child runs will be created vs omitted.
_log_child_runs_info(max_tuning_runs, len(cv_results_df))
for _, result_row in cv_results_best_n_df.iterrows():
tags_to_log = dict(child_tags) if child_tags else {}
tags_to_log.update({MLFLOW_PARENT_RUN_ID: parent_run.info.run_id})
tags_to_log.update(_get_estimator_info_tags(seed_estimator))
pending_child_run_id = autologging_client.create_run(
experiment_id=parent_run.info.experiment_id,
start_time=child_run_start_time,
tags=tags_to_log,
)
params_to_log = dict(base_params)
params_to_log.update(result_row.get("params", {}))
autologging_client.log_params(run_id=pending_child_run_id, params=params_to_log)
# Parameters values are recorded twice in the set of search `cv_results_`:
# once within a `params` column with dictionary values and once within
# a separate dataframe column that is created for each parameter. To prevent
# duplication of parameters, we log the consolidated values from the parameter
# dictionary column and filter out the other parameter-specific columns with
# names of the form `param_{param_name}`. Additionally, `cv_results_` produces
# metrics for each training split, which is fairly verbose; accordingly, we filter
# out per-split metrics in favor of aggregate metrics (mean, std, etc.)
excluded_metric_prefixes = ["param", "split"]
metrics_to_log = {
key: value
for key, value in result_row.iteritems()
if not any([key.startswith(prefix) for prefix in excluded_metric_prefixes])
and isinstance(value, Number)
}
autologging_client.log_metrics(
run_id=pending_child_run_id,
metrics=metrics_to_log,
)
autologging_client.set_terminated(run_id=pending_child_run_id, end_time=child_run_end_time)
def _is_supported_version():
import sklearn
return Version(sklearn.__version__) >= Version(_MIN_SKLEARN_VERSION)
# Util function to check whether a metric is able to be computed in given sklearn version
def _is_metric_supported(metric_name):
import sklearn
# This dict can be extended to store special metrics' specific supported versions
_metric_supported_version = {"roc_auc_score": "0.22.2"}
return Version(sklearn.__version__) >= Version(_metric_supported_version[metric_name])
# Util function to check whether artifact plotting functions are able to be computed
# in given sklearn version (should >= 0.22.0)
def _is_plotting_supported():
import sklearn
return Version(sklearn.__version__) >= Version("0.22.0")
def _all_estimators():
try:
from sklearn.utils import all_estimators
return all_estimators()
except ImportError:
return _backported_all_estimators()
def _backported_all_estimators(type_filter=None):
"""
Backported from scikit-learn 0.23.2:
https://github.com/scikit-learn/scikit-learn/blob/0.23.2/sklearn/utils/__init__.py#L1146
Use this backported `all_estimators` in old versions of sklearn because:
1. An inferior version of `all_estimators` that old versions of sklearn use for testing,
might function differently from a newer version.
2. This backported `all_estimators` works on old versions of sklearn that don’t even define
the testing utility variant of `all_estimators`.
========== original docstring ==========
Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
# lazy import to avoid circular imports from sklearn.base
import pkgutil
import platform
import sklearn
from importlib import import_module
from operator import itemgetter
# pylint: disable=no-name-in-module, import-error
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
ClusterMixin,
)
IS_PYPY = platform.python_implementation() == "PyPy"
def is_abstract(c):
if not (hasattr(c, "__abstractmethods__")):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
modules_to_ignore = {"tests", "externals", "setup", "conftest"}
root = sklearn.__path__[0] # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, modname, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
mod_parts = modname.split(".")
if any(part in modules_to_ignore for part in mod_parts) or "._" in modname:
continue
module = import_module(modname)
classes = inspect.getmembers(module, inspect.isclass)
classes = [(name, est_cls) for name, est_cls in classes if not name.startswith("_")]
# TODO: Remove when FeatureHasher is implemented in PYPY
# Skips FeatureHasher for PYPY
if IS_PYPY and "feature_extraction" in modname:
classes = [(name, est_cls) for name, est_cls in classes if name == "FeatureHasher"]
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [
c for c in all_classes if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {
"classifier": ClassifierMixin,
"regressor": RegressorMixin,
"transformer": TransformerMixin,
"cluster": ClusterMixin,
}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError(
"Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter)
)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
|
the-stack_0_13069 | import json
import threading
import time
import os
import stat
from copy import deepcopy
from .util import user_dir, print_error, print_stderr, PrintError
from .bitcoin import MAX_FEE_RATE, FEE_TARGETS
SYSTEM_CONFIG_PATH = "/etc/electrum.conf"
config = None
def get_config():
global config
return config
def set_config(c):
global config
config = c
class SimpleConfig(PrintError):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are 3 different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
3. System configuration (in /etc/)
They are taken in order (1. overrides config options set in 2., that
override config set in 3.)
"""
fee_rates = [5000, 10000, 20000, 30000, 50000, 70000, 100000, 150000, 200000, 300000]
def __init__(self, options={}, read_system_config_function=None,
read_user_config_function=None, read_user_dir_function=None):
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.fee_estimates = {}
self.fee_estimates_last_updated = {}
self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees
# The following two functions are there for dependency injection when
# testing.
if read_system_config_function is None:
read_system_config_function = read_system_config
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# Portable wallets don't use a system config
if self.cmdline_options.get('portable', False):
self.system_config = {}
else:
self.system_config = read_system_config_function()
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
# Upgrade obsolete keys
self.fixup_keys({'auto_cycle': 'auto_connect'})
# Make a singleton instance of 'self'
set_config(self)
def electrum_path(self):
# Read electrum_path from command line / system configuration
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
if self.get('testnet'):
path = os.path.join(path, 'testnet')
# Make directory if it does not yet exist.
if not os.path.exists(path):
if os.path.islink(path):
raise BaseException('Dangling link: ' + path)
#os.mkdir(path)
os.makedirs(path, exist_ok=True)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
self.print_error("electrum directory", path)
return path
def fixup_config_keys(self, config, keypairs):
updated = False
for old_key, new_key in keypairs.items():
if old_key in config:
if not new_key in config:
config[new_key] = config[old_key]
del config[old_key]
updated = True
return updated
def fixup_keys(self, keypairs):
'''Migrate old key names to new ones'''
self.fixup_config_keys(self.cmdline_options, keypairs)
self.fixup_config_keys(self.system_config, keypairs)
if self.fixup_config_keys(self.user_config, keypairs):
self.save_user_config()
def set_key(self, key, value, save = True):
if not self.is_modifiable(key):
print_stderr("Warning: not changing config key '%s' set on the command line" % key)
return
with self.lock:
self.user_config[key] = value
if save:
self.save_user_config()
return
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key)
if out is None:
out = self.system_config.get(key, default)
return out
def is_modifiable(self, key):
return not key in self.cmdline_options
def save_user_config(self):
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
with open(path, "w") as f:
f.write(s)
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
def get_wallet_path(self):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd'), self.get('wallet_path'))
# path in config file
path = self.get('default_wallet_path')
if path and os.path.exists(path):
return path
# default path
dirpath = os.path.join(self.path, "wallets")
if not os.path.exists(dirpath):
if os.path.islink(dirpath):
raise BaseException('Dangling link: ' + dirpath)
os.mkdir(dirpath)
os.chmod(dirpath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.print_error("session timeout -> %d seconds" % seconds)
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def open_last_wallet(self):
if self.get('wallet_path') is None:
last_wallet = self.get('gui_last_wallet')
if last_wallet is not None and os.path.exists(last_wallet):
self.cmdline_options['default_wallet_path'] = last_wallet
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def max_fee_rate(self):
f = self.get('max_fee_rate', MAX_FEE_RATE)
if f==0:
f = MAX_FEE_RATE
return f
def dynfee(self, i):
if i < 4:
j = FEE_TARGETS[i]
fee = self.fee_estimates.get(j)
else:
assert i == 4
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee/2
if fee is not None:
fee = min(5*MAX_FEE_RATE, fee)
return fee
def reverse_dynfee(self, fee_per_kb):
import operator
l = list(self.fee_estimates.items()) + [(1, self.dynfee(4))]
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(25)/2:
min_target = -1
return min_target
def static_fee(self, i):
return self.fee_rates[i]
def static_fee_index(self, value):
dist = list(map(lambda x: abs(x - value), self.fee_rates))
return min(range(len(dist)), key=dist.__getitem__)
def has_fee_estimates(self):
return len(self.fee_estimates)==4
def is_dynfee(self):
#return self.get('dynamic_fees', True)
return self.get('dynamic_fees', False)
def fee_per_kb(self):
dyn = self.is_dynfee()
if dyn:
fee_rate = self.dynfee(self.get('fee_level', 2))
else:
fee_rate = self.get('fee_per_kb', self.max_fee_rate()/2)
return fee_rate
def estimate_fee(self, size):
return int(self.fee_per_kb() * size / 1000.)
def update_fee_estimates(self, key, value):
self.fee_estimates[key] = value
self.fee_estimates_last_updated[key] = time.time()
def is_fee_estimates_update_required(self):
"""Checks time since last requested and updated fee estimates.
Returns True if an update should be requested.
"""
now = time.time()
prev_updates = self.fee_estimates_last_updated.values()
oldest_fee_time = min(prev_updates) if prev_updates else 0
stale_fees = now - oldest_fee_time > 7200
old_request = now - self.last_time_fee_estimates_requested > 60
return stale_fees and old_request
def requested_fee_estimates(self):
self.last_time_fee_estimates_requested = time.time()
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def read_system_config(path=SYSTEM_CONFIG_PATH):
"""Parse and return the system config settings in /etc/electrum.conf."""
result = {}
if os.path.exists(path):
import configparser
p = configparser.ConfigParser()
try:
p.read(path)
for k, v in p.items('client'):
result[k] = v
except (configparser.NoSectionError, configparser.MissingSectionHeaderError):
pass
return result
def read_user_config(path):
"""Parse and store the user config settings in electrum.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r") as f:
data = f.read()
result = json.loads(data)
except:
print_error("Warning: Cannot read config file.", config_path)
return {}
if not type(result) is dict:
return {}
return result
|
the-stack_0_13071 | """
Pulls data from specified iLO and presents as Prometheus metrics
"""
from __future__ import print_function
from _socket import gaierror
import sys
import os
import hpilo
import time
import prometheus_metrics
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from SocketServer import ForkingMixIn
from prometheus_client import generate_latest, Summary
from urlparse import parse_qs
from urlparse import urlparse
# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('request_processing_seconds',
'Time spent processing request')
def print_err(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class ForkingHTTPServer(ForkingMixIn, HTTPServer):
max_children = 30
timeout = 30
class RequestHandler(BaseHTTPRequestHandler):
"""
Endpoint handler
"""
def return_error(self):
self.send_response(500)
self.end_headers()
def _health(self):
# get health at glance
health_at_glance = self.ilo.get_embedded_health()['health_at_a_glance']
if health_at_glance is not None:
for key, value in health_at_glance.items():
for status in value.items():
if status[0] == 'status':
gauge = 'hpilo_{}_gauge'.format(key)
if status[1].upper() == 'OK':
prometheus_metrics.gauges[gauge].labels(
product_name=self.product_name, server_name=self.server_name).set(0)
elif status[1].upper() == 'DEGRADED':
prometheus_metrics.gauges[gauge].labels(
product_name=self.product_name, server_name=self.server_name).set(1)
else:
prometheus_metrics.gauges[gauge].labels(
product_name=self.product_name, server_name=self.server_name).set(2)
def _host_power(self):
_power = self.ilo.get_host_power_status()
_gauge = 'hpilo_{}_gauge'.format('host_power')
if _power == 'ON':
prometheus_metrics.gauges[_gauge].labels(
product_name=self.product_name,
server_name=self.server_name).set(0)
else:
prometheus_metrics.gauges[_gauge].labels(
product_name=self.product_name,
server_name=self.server_name).set(1)
def _firmware(self):
_version = self.ilo.get_fw_version()["firmware_version"]
prometheus_metrics.hpilo_firmware_version.labels(
product_name=self.product_name,
server_name=self.server_name).set(_version)
def _power_readings(self):
(_present, _) = self.ilo.get_power_readings()['present_power_reading']
prometheus_metrics.hpilo_present_power_reading.labels(
product_name=self.product_name, server_name=self.server_name).set(_present)
def do_GET(self):
"""
Process GET request
:return: Response with Prometheus metrics
"""
# get parameters from the URL
_url = urlparse(self.path)
if _url.path == self.server.endpoint:
query_components = parse_qs(urlparse(self.path).query)
_host = None
_port = None
_user = None
_password = None
try:
_host = query_components['target'][0]
except KeyError as e:
print_err("** missing parameter 'target' in url **")
self.return_error()
return
try:
_port = os.environ['ilo_port']
_user = os.environ['ilo_user']
_password = os.environ['ilo_password']
except KeyError as e:
print_err("** missing environment parameter %s **" % e)
self.return_error()
return
self.server_name = _host
self.ilo = None
if _host and _user and _password and _port:
try:
self.ilo = hpilo.Ilo(hostname=_host,
login=_user,
password=_password,
port=int(_port), timeout=10)
except hpilo.IloLoginFailed:
print("ILO login failed")
self.return_error()
except gaierror:
print("ILO invalid address or port")
self.return_error()
except hpilo.IloCommunicationError as e:
print(e)
# this will be used to return the total amount of time the request
# took
start_time = time.time()
try:
self.product_name = self.ilo.get_product_name()
except BaseException:
self.product_name = "Unknown HP Server"
self._health()
self._host_power()
self._firmware()
self._power_readings()
# get the amount of time the request took
REQUEST_TIME.observe(time.time() - start_time)
# generate and publish metrics
metrics = generate_latest(prometheus_metrics.registry)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(metrics)
return
# tell users the /metrics endpoint
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write("""<html>
<head><title>HP iLO Exporter</title></head>
<body>
<h1>HP iLO Exporter</h1>
<p>Visit <a href="/metrics">Metrics</a> to use.</p>
</body>
</html>""")
class ILOExporterServer(object):
"""
Basic server implementation that exposes metrics to Prometheus
"""
def __init__(self, address='0.0.0.0', port=8080, endpoint="/metrics"):
self._address = address
self._port = port
self.endpoint = endpoint
def print_info(self):
print_err("Starting exporter on: http://{}:{}{}".format(self._address,
self._port,
self.endpoint))
print_err("Press Ctrl+C to quit")
def run(self):
self.print_info()
server = ForkingHTTPServer((self._address, self._port), RequestHandler)
server.endpoint = self.endpoint
try:
while True:
server.handle_request()
except KeyboardInterrupt:
print_err("Killing exporter")
server.server_close()
|
the-stack_0_13073 | """
Tests for the company model database migrations
"""
from django_test_migrations.contrib.unittest_case import MigratorTestCase
from InvenTree import helpers
class TestForwardMigrations(MigratorTestCase):
migrate_from = ('company', helpers.getOldestMigrationFile('company'))
migrate_to = ('company', helpers.getNewestMigrationFile('company'))
def prepare(self):
"""
Create some simple Company data, and ensure that it migrates OK
"""
Company = self.old_state.apps.get_model('company', 'company')
Company.objects.create(
name='MSPC',
description='Michael Scotts Paper Company',
is_supplier=True
)
def test_migrations(self):
Company = self.new_state.apps.get_model('company', 'company')
self.assertEqual(Company.objects.count(), 1)
class TestManufacturerField(MigratorTestCase):
"""
Tests for migration 0019 which migrates from old 'manufacturer_name' field to new 'manufacturer' field
"""
migrate_from = ('company', '0018_supplierpart_manufacturer')
migrate_to = ('company', '0019_auto_20200413_0642')
def prepare(self):
"""
Prepare the database by adding some test data 'before' the change:
- Part object
- Company object (supplier)
- SupplierPart object
"""
Part = self.old_state.apps.get_model('part', 'part')
Company = self.old_state.apps.get_model('company', 'company')
SupplierPart = self.old_state.apps.get_model('company', 'supplierpart')
# Create an initial part
part = Part.objects.create(
name='Screw',
description='A single screw'
)
# Create a company to act as the supplier
supplier = Company.objects.create(
name='Supplier',
description='A supplier of parts',
is_supplier=True,
is_customer=False,
)
# Add some SupplierPart objects
SupplierPart.objects.create(
part=part,
supplier=supplier,
SKU='SCREW.001',
manufacturer_name='ACME',
)
SupplierPart.objects.create(
part=part,
supplier=supplier,
SKU='SCREW.002',
manufacturer_name='Zero Corp'
)
self.assertEqual(Company.objects.count(), 1)
def test_company_objects(self):
"""
Test that the new companies have been created successfully
"""
# Two additional company objects should have been created
Company = self.new_state.apps.get_model('company', 'company')
self.assertEqual(Company.objects.count(), 3)
# The new company/ies must be marked as "manufacturers"
acme = Company.objects.get(name='ACME')
self.assertTrue(acme.is_manufacturer)
SupplierPart = self.new_state.apps.get_model('company', 'supplierpart')
parts = SupplierPart.objects.filter(manufacturer=acme)
self.assertEqual(parts.count(), 1)
part = parts.first()
# Checks on the SupplierPart object
self.assertEqual(part.manufacturer_name, 'ACME')
self.assertEqual(part.manufacturer.name, 'ACME')
class TestCurrencyMigration(MigratorTestCase):
"""
Tests for upgrade from basic currency support to django-money
"""
migrate_from = ('company', '0025_auto_20201110_1001')
migrate_to = ('company', '0026_auto_20201110_1011')
def prepare(self):
"""
Prepare some data:
- A part to buy
- A supplier to buy from
- A supplier part
- Multiple currency objects
- Multiple supplier price breaks
"""
Part = self.old_state.apps.get_model('part', 'part')
part = Part.objects.create(
name="PART", description="A purchaseable part",
purchaseable=True,
level=0,
tree_id=0,
lft=0,
rght=0
)
Company = self.old_state.apps.get_model('company', 'company')
supplier = Company.objects.create(name='Supplier', description='A supplier', is_supplier=True)
SupplierPart = self.old_state.apps.get_model('company', 'supplierpart')
sp = SupplierPart.objects.create(part=part, supplier=supplier, SKU='12345')
Currency = self.old_state.apps.get_model('common', 'currency')
aud = Currency.objects.create(symbol='$', suffix='AUD', description='Australian Dollars', value=1.0)
usd = Currency.objects.create(symbol='$', suffix='USD', description='US Dollars', value=1.0)
PB = self.old_state.apps.get_model('company', 'supplierpricebreak')
PB.objects.create(part=sp, quantity=10, cost=5, currency=aud)
PB.objects.create(part=sp, quantity=20, cost=3, currency=aud)
PB.objects.create(part=sp, quantity=30, cost=2, currency=aud)
PB.objects.create(part=sp, quantity=40, cost=2, currency=usd)
PB.objects.create(part=sp, quantity=50, cost=2, currency=usd)
for pb in PB.objects.all():
self.assertIsNone(pb.price)
def test_currency_migration(self):
PB = self.new_state.apps.get_model('company', 'supplierpricebreak')
for pb in PB.objects.all():
# Test that a price has been assigned
self.assertIsNotNone(pb.price)
|
the-stack_0_13075 | import unittest
from siobrultech_protocols.gem import packets
from tests.gem.packet_test_data import assert_packet, read_packet
class TestPacketFormats(unittest.TestCase):
def test_bin32_abs(self):
check_packet("BIN32-ABS.bin", packets.BIN32_ABS)
def test_bin32_net(self):
check_packet("BIN32-NET.bin", packets.BIN32_NET)
def test_bin48_abs(self):
check_packet("BIN48-ABS.bin", packets.BIN48_ABS)
def test_bin48_net(self):
check_packet("BIN48-NET.bin", packets.BIN48_NET)
def test_bin48_net_time(self):
check_packet("BIN48-NET-TIME.bin", packets.BIN48_NET_TIME)
def test_bin48_net_time_tricky(self):
"""BIN48_NET and BIN48_NET_TIME packets both have the same packet type
code, so in order to detect the difference you must try to parse as
BIN48_NET first, and if that fails try BIN48_NET_TIME. However, if
the parser just checks the checksum and not the footer, it's possible
for a BIN48_NET_TIME packet to be mistaken for a BIN48_NET. This is
one such packet."""
try:
parse_packet("BIN48-NET-TIME_tricky.bin", packets.BIN48_NET)
self.fail("Should have thrown")
except packets.MalformedPacketException:
pass
check_packet("BIN48-NET-TIME_tricky.bin", packets.BIN48_NET_TIME)
def test_short_packet(self):
packet = read_packet("BIN32-NET.bin")
with self.assertRaisesRegex(
packets.MalformedPacketException, "Packet too short."
):
packets.BIN32_NET.parse(packet[:-1])
def test_packet_with_extra_after(self):
data = bytearray()
data.extend(read_packet("BIN32-NET.bin"))
data.extend(read_packet("BIN32-ABS.bin"))
packet = packets.BIN32_NET.parse(data)
assert_packet("BIN32-NET.bin", packet)
class TestPacketDeltaComputation(unittest.TestCase):
def test_packet_delta_seconds(self):
packet = parse_packet("BIN32-ABS.bin", packets.BIN32_ABS)
self.assertEqual(997492, packet.seconds)
self.assertEqual(997493, packet.delta_seconds(2 ** 24 - 1))
self.assertEqual(1000000, packet.delta_seconds(2 ** 24 - (1000000 - 997492)))
def test_packet_delta_pulses(self):
packet = parse_packet("BIN48-NET-TIME_tricky.bin", packets.BIN48_NET_TIME)
# All the pulse counts in our packets are 0, so let's fake some out
packet.pulse_counts = [100, 200, 300, 400]
self.assertEqual(
[1100, 1200, 1300, 1400],
[
packet.delta_pulse_count(i, 2 ** 24 - 1000)
for i in range(0, len(packet.pulse_counts))
],
)
def test_packet_delta_absolute_watt_seconds(self):
packet = parse_packet("BIN32-ABS.bin", packets.BIN32_ABS)
self.assertEqual(
[
3123664,
9249700,
195388151,
100917236,
7139112,
1440,
4,
3,
14645520,
111396601,
33259670,
38296448,
1108415,
2184858,
5191049,
1,
71032651,
60190845,
47638292,
12017483,
36186563,
14681918,
69832947,
37693,
60941899,
1685614,
902,
799182,
302590,
3190972,
5,
647375119,
],
packet.absolute_watt_seconds,
)
self.assertEqual(
[
packet.absolute_watt_seconds[i] + 1000
for i in range(0, len(packet.absolute_watt_seconds))
],
[
packet.delta_absolute_watt_seconds(i, 2 ** 40 - 1000)
for i in range(0, len(packet.absolute_watt_seconds))
],
)
def test_packet_delta_polarized_watt_seconds(self):
packet = parse_packet("BIN32-NET.bin", packets.BIN32_NET)
# Packet didn't have any negative numbers, so let's do some manual ones
packet.polarized_watt_seconds = [
-1600 + 100 * i for i in range(0, packet.num_channels)
]
self.assertEqual(
[
packet.polarized_watt_seconds[i] + 1000 + 2 ** 39
for i in range(0, len(packet.polarized_watt_seconds))
],
[
packet.delta_polarized_watt_seconds(i, 2 ** 39 - 1000)
for i in range(0, len(packet.polarized_watt_seconds))
],
)
def check_packet(packet_file_name: str, packet_format: packets.PacketFormat):
packet = parse_packet(packet_file_name, packet_format)
assert_packet(packet_file_name, packet)
def parse_packet(packet_file_name: str, packet_format: packets.PacketFormat):
return packet_format.parse(read_packet(packet_file_name))
if __name__ == "__main__":
unittest.main()
|
the-stack_0_13076 | import sys, time, cv2
from matplotlib import pyplot as plt
sys.path.insert(0, sys.path[0].replace('examples', 'src'))
from robot import Robot
from utils import *
def display_image(image):
"""
Displays a image with matplotlib.
Args:
image: The BGR image numpy array. See src/utils.py.
"""
plt.imshow(image, cmap = 'gray', interpolation = 'bicubic')
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show()
robot = Robot()
#Reading ultrassonic sensors
ultrassonic = robot.read_ultrassonic_sensors()
print("Ultrassonic: ", ultrassonic)
#Reading laser sensor
laser = robot.read_laser()
print("Laser: ", laser)
#Reading camera
resolution, raw_img = robot.read_vision_sensor()
img = vrep2array(raw_img, resolution)
display_image(img)
|
the-stack_0_13078 | # this is chenqi's modification for custom datasets!
# version 2: based on v1, do the following updates:
# (1) in the func test(), instead of kNN on class, do kNN on img index! <-- then each image represents a class during implementation.
# (2) in data-aug for training, replace color jitter with Gaussian blur (+ Gaussian noise?) .
# (3) During training simCLR, add fake (gan generated) images to the original dataset to train!
import argparse
import os
import pandas as pd
import torch
import torch.optim as optim
from thop import profile, clever_format
from torch.utils.data import DataLoader
from tqdm import tqdm
#import utils_chenqi # import utils
import utils_chenqi_v2
from model import Model
# newly added:
from PIL import Image
from torchvision import transforms, datasets
# train for one epoch to learn unique features
def train(net, data_loader, train_optimizer):
net.train()
total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader)
for pos_1, pos_2, target in train_bar: # target.shape: torch.Size([batch_size])
pos_1, pos_2 = pos_1.cuda(non_blocking=True), pos_2.cuda(non_blocking=True) # pos_1.shape: torch.Size([batch_size, img_ch, img_h, img_w])
# note: feature: h (the embedding we want to do NN query), of shape: torch.Size([batch_size, 2048])
# out: z (the projection used to maximize agreement) of shape: torch.Size([batch_size, feature_dim]).
feature_1, out_1 = net(pos_1)
feature_2, out_2 = net(pos_2)
# [2*B, D]
out = torch.cat([out_1, out_2], dim=0) # shape: torch.Size([2*batch_size, feature_dim])
# [2*B, 2*B]
sim_matrix = torch.exp(torch.mm(out, out.t().contiguous()) / temperature)
mask = (torch.ones_like(sim_matrix) - torch.eye(2 * batch_size, device=sim_matrix.device)).bool()
# [2*B, 2*B-1]
sim_matrix = sim_matrix.masked_select(mask).view(2 * batch_size, -1)
# compute loss
pos_sim = torch.exp(torch.sum(out_1 * out_2, dim=-1) / temperature)
# [2*B]
pos_sim = torch.cat([pos_sim, pos_sim], dim=0)
loss = (- torch.log(pos_sim / sim_matrix.sum(dim=-1))).mean()
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += batch_size
total_loss += loss.item() * batch_size
train_bar.set_description('Train Epoch: [{}/{}] Loss: {:.4f}'.format(epoch, epochs, total_loss / total_num))
return total_loss / total_num
# test for one epoch, use weighted knn to find the most similar images' label to assign the test image
def test(net, memory_data_loader, test_data_loader):
net.eval()
total_top1, total_top5, total_num, feature_bank = 0.0, 0.0, 0, []
with torch.no_grad():
# generate feature bank
for data, _, target in tqdm(memory_data_loader, desc='Feature extracting'):
feature, out = net(data.cuda(non_blocking=True))
feature_bank.append(feature)
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.tensor(memory_data_loader.dataset.targets, device=feature_bank.device)
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_loader)
for data, _, target in test_bar:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
feature, out = net(data)
total_num += data.size(0)
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=k, dim=-1)
# [B, K]
sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)
sim_weight = (sim_weight / temperature).exp()
# counts for each class
one_hot_label = torch.zeros(data.size(0) * k, c, device=sim_labels.device)
# [B*K, C]
# to check error: for debug:
#torch.max(sim_labels.view(-1, 1)) # cls_num-1
#torch.min(sim_labels.view(-1, 1)) # 0
# error here!!!
one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0)
# weighted score ---> [B, C]
pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True) # torch.Size([26, 102])
total_top1 += torch.sum((pred_labels[:, :1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
total_top5 += torch.sum((pred_labels[:, :5] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}% Acc@5:{:.2f}%'
.format(epoch, epochs, total_top1 / total_num * 100, total_top5 / total_num * 100))
return total_top1 / total_num * 100, total_top5 / total_num * 100
def get_mean_std_forDataset(data_dir,img_size,batch_size,isGray):
# newly added: compute the mean and std for transforms.Normalize using whole dataset:
tmp_data = datasets.ImageFolder(root=data_dir, transform=transforms.Compose([transforms.Resize(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor()]))
tmp_loader = DataLoader(tmp_data, batch_size=batch_size, shuffle=False, num_workers=16)
mean = 0.
std = 0.
nb_samples = 0.
if not isGray:
for data, _ in tmp_loader:
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
mean += data.mean(2).sum(0)
std += data.std(2).sum(0)
nb_samples += batch_samples
#else: for MNIST
mean /= nb_samples
std /= nb_samples
return (mean, std)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train SimCLR')
parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector')
parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax')
parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')
parser.add_argument('--batch_size', default=26, type=int, help='Number of images in each mini-batch')
parser.add_argument('--epochs', default=2000, type=int, help='Number of sweeps over the dataset to train')
# newly added:
parser.add_argument('--dataset', default='FLOWER_128', type=str, help='Name of the training dataset, eg, FLOWER_128')
parser.add_argument('--data_dir', default='/eecf/cbcsl/data100b/Chenqi/new_metrics/SimCLR/data/FLOWER_gan/', type=str, help='Dir of the original & GAN generated fake training dataset')
#parser.add_argument('--label_file', default='/eecf/cbcsl/data100b/Chenqi/data/flower_labels.txt', type=str, help='Path to the txt file with class labels')
# maybe also add arg like: choices of data-aug...
# args parse
args = parser.parse_args()
feature_dim, temperature, k = args.feature_dim, args.temperature, args.k
batch_size, epochs = args.batch_size, args.epochs
# newly added:
dataset, data_dir = args.dataset, args.data_dir
img_size = int(dataset.split('_')[-1])
#label_file = args.label_file
# newly added: note: we should compute transforms.Normalize for our custom dataset each time! <-- will later modify it
# also note: for MNIST (gray-scale imgs), needs to modify color jitter & random gray & normalize!! <-- will later modify it
if 'MNIST' not in dataset:
# newly added: compute the mean and std for transforms.Normalize using whole dataset:
img_means, img_stds = get_mean_std_forDataset(data_dir,img_size,batch_size,isGray=False)
if 'FLOWER' in dataset:
train_transform = transforms.Compose([
transforms.Resize(img_size),transforms.CenterCrop(img_size), # NOT use random crop! use resize & center crop!!!
#transforms.RandomHorizontalFlip(p=0.5), # for FLOWER & MNIST: NOT do this!
transforms.GaussianBlur(51, sigma=(0.1, 1.0)), # NOT jitter that much for FLOWER!! Add Gaussian blurring.
#transforms.RandomGrayscale(p=0.2),
transforms.RandomAffine(degrees=10, translate=None, scale=None, shear=10), # maybe also add affain warping?
transforms.ToTensor(),
transforms.Normalize(img_means, img_stds)]) # ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]) cifar10
elif 'CelebA' in dataset:
train_transform = transforms.Compose([
transforms.Resize(img_size),transforms.CenterCrop(img_size), # NOT use random crop! use resize & center crop!!!
transforms.RandomHorizontalFlip(p=0.5), # for FLOWER & MNIST: NOT do this!
transforms.GaussianBlur(51, sigma=(0.1, 1.0)), # NOT jitter that much for FLOWER!! Add Gaussian blurring.
transforms.RandomGrayscale(p=0.2),
#transforms.RandomAffine(degrees=5, translate=None, scale=None, shear=5), # maybe also add affain warping?
transforms.ToTensor(),
transforms.Normalize(img_means, img_stds)]) # ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]) cifar10
test_transform = transforms.Compose([
transforms.Resize(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
transforms.Normalize(img_means, img_stds)]) # ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]) for cifar10
# else:... (for MNIST)
# data prepare
# newly modified: to adjust to custom dataset!
"""
# original old code:
train_data = utils.CIFAR10Pair(root='data', train=True, transform=utils.train_transform, download=True)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True,
drop_last=True)
"""
# new code for custom dataset:
#train_data = datasets.ImageFolder(root=data_dir, transform=train_transform)
train_data = utils_chenqi_v2.MyCustomDataset_v2(root=data_dir, transform=train_transform)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True,
drop_last=True)
"""
# original old code:
memory_data = utils.CIFAR10Pair(root='data', train=True, transform=utils.test_transform, download=True)
memory_loader = DataLoader(memory_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
"""
# new code for custom dataset:
#memory_data = datasets.ImageFolder(root=data_dir, transform=test_transform)
memory_data = utils_chenqi_v2.MyCustomDataset_v2(root=data_dir, transform=test_transform)
memory_loader = DataLoader(memory_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
"""
# original old code:
test_data = utils.CIFAR10Pair(root='data', train=False, transform=utils.test_transform, download=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
"""
# new code for custom dataset:
#test_data = datasets.ImageFolder(root=data_dir, transform=test_transform)
test_data = utils_chenqi_v2.MyCustomDataset_v2(root=data_dir, transform=train_transform) # make the testing set to be the transformed original image!!!
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)
# model setup and optimizer config
model = Model(feature_dim).cuda()
flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32).cuda(),))
flops, params = clever_format([flops, params])
print('# Model Params: {} FLOPs: {}'.format(params, flops))
optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
c = len(memory_data.classes)
"""
# for debug:
print('***************** DEBUG *****************')
print('c = ' + str(c))
print('memory_data.classes = ')
print(memory_data.classes)
assert(False)
"""
# training loop
results = {'train_loss': [], 'test_acc@1': [], 'test_acc@5': []}
save_name_pre = '{}_{}_{}_{}_{}'.format(feature_dim, temperature, k, batch_size, epochs)
# newly modified:
if not os.path.exists('results_v2/' + dataset + '/'):
os.mkdir('results_v2/' + dataset + '/')
best_acc = 0.0
for epoch in range(1, epochs + 1):
train_loss = train(model, train_loader, optimizer)
results['train_loss'].append(train_loss)
test_acc_1, test_acc_5 = test(model, memory_loader, test_loader)
results['test_acc@1'].append(test_acc_1)
results['test_acc@5'].append(test_acc_5)
# save statistics
data_frame = pd.DataFrame(data=results, index=range(1, epoch + 1))
# newly modified:
data_frame.to_csv('results_v2/' + dataset + '/' + '{}_statistics.csv'.format(save_name_pre), index_label='epoch')
"""
# original code: only save the "best" model:
if test_acc_1 > best_acc:
best_acc = test_acc_1
# newly modified:
torch.save(model.state_dict(), 'results_v2/' + dataset + '/' + '{}_model.pth'.format(save_name_pre))
"""
# new code: save all the models!!! (while also keep track on the "best" model):
torch.save(model.state_dict(), 'results_v2/' + dataset + '/' + 'epoch{}'.format(epoch) + '_{}_model.pth'.format(save_name_pre))
if test_acc_1 > best_acc:
best_acc = test_acc_1
# newly modified:
torch.save(model.state_dict(), 'results_v2/' + dataset + '/' + 'best_{}_model.pth'.format(save_name_pre))
|
the-stack_0_13079 | import pytest
from mock import MagicMock
from mock import AsyncMock
from datetime import datetime
from robot_server.service.dependencies import get_session_manager
from robot_server.service.errors import RobotServerError
from robot_server.service.session.errors import (
SessionCreationException, UnsupportedCommandException,
CommandExecutionException)
from robot_server.service.session.manager import SessionManager
from robot_server.service.session.models.command import (
SimpleCommandRequest, SimpleCommandResponse, CommandStatus
)
from robot_server.service.session.models.common import EmptyModel
from robot_server.service.session.models.command_definitions import (
ProtocolCommand)
from robot_server.service.session import router
from robot_server.service.session.session_types import BaseSession
@pytest.fixture
def mock_session_manager():
return AsyncMock(spec=SessionManager)
@pytest.fixture
def mock_session():
session = AsyncMock(spec=BaseSession)
session.meta.identifier = "some id"
session.meta.created_at = datetime(2020, 1, 1)
session.meta.create_params = None
session.get_response_model.return_value = {
"createdAt": session.meta.created_at,
"details": EmptyModel(),
"id": session.meta.identifier,
"createParams": session.meta.create_params
}
return session
@pytest.fixture
def sessions_api_client(mock_session_manager, api_client):
"""Test api client that overrides get_session_manager dependency."""
async def get():
return mock_session_manager
api_client.app.dependency_overrides[get_session_manager] = get
return api_client
def test_get_session(mock_session_manager):
"""It gets the session from session manager"""
session_id = "sess"
mock_session = MagicMock()
mock_session_manager.get_by_id.return_value = mock_session
session = router.get_session(mock_session_manager, session_id)
mock_session_manager.get_by_id.called_once_with(session_id)
assert session is mock_session
def test_get_session_not_found(mock_session_manager):
"""It raises an exception if session is not found"""
session_id = "sess"
mock_session_manager.get_by_id.return_value = None
with pytest.raises(RobotServerError):
router.get_session(mock_session_manager, session_id)
def test_sessions_create_error(
sessions_api_client,
mock_session_manager):
"""It raises an error if session manager raises an exception."""
async def raiser(*args, **kwargs):
raise SessionCreationException(
"Please attach pipettes before proceeding"
)
mock_session_manager.add.side_effect = raiser
response = sessions_api_client.post("/sessions", json={
"data": {
"sessionType": "liveProtocol"
}
})
assert response.json() == {
'errors': [{
'id': 'UncategorizedError',
'detail': "Please attach pipettes before proceeding",
'title': 'Action Forbidden'}
]}
assert response.status_code == 403
def test_sessions_create(
sessions_api_client,
mock_session_manager,
mock_session):
"""It creates a session."""
mock_session_manager.add.return_value = mock_session
response = sessions_api_client.post("/sessions", json={
"data": {
"sessionType": "liveProtocol"
}
})
assert response.json() == {
'data': {
'details': {},
'sessionType': 'liveProtocol',
'createdAt': mock_session.meta.created_at.isoformat(),
'createParams': None,
'id': mock_session.meta.identifier,
},
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session.meta.identifier}/commands/execute', # noqa: E501
'meta': None,
},
'self': {
'href': f'/sessions/{mock_session.meta.identifier}',
'meta': None,
},
'sessions': {
'href': '/sessions', 'meta': None,
},
'sessionById': {
'href': '/sessions/{sessionId}', 'meta': None,
}
}
}
assert response.status_code == 201
def test_sessions_delete_not_found(
sessions_api_client,
mock_session_manager):
"""It fails when session is not found"""
mock_session_manager.get_by_id.return_value = None
response = sessions_api_client.delete("/sessions/check")
assert response.json() == {
'errors': [{
'id': 'UncategorizedError',
'title': 'Resource Not Found',
'detail': "Resource type 'session' with id 'check' was not found",
}],
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
}
assert response.status_code == 404
def test_sessions_delete(
sessions_api_client,
mock_session_manager,
mock_session):
"""It deletes a found session."""
mock_session_manager.get_by_id.return_value = mock_session
response = sessions_api_client.delete(
f"/sessions/{mock_session.meta.identifier}")
mock_session_manager.remove.assert_called_once_with(
mock_session.meta.identifier)
assert response.json() == {
'data': {
'details': {},
'sessionType': 'liveProtocol',
'createdAt': mock_session.meta.created_at.isoformat(),
'createParams': None,
'id': mock_session.meta.identifier
},
'links': {
'self': {
'href': '/sessions', 'meta': None,
},
'sessionById': {
'href': '/sessions/{sessionId}', 'meta': None,
},
}
}
assert response.status_code == 200
def test_sessions_get_not_found(
mock_session_manager,
sessions_api_client):
"""It returns an error when session is not found."""
mock_session_manager.get_by_id.return_value = None
response = sessions_api_client.get("/sessions/1234")
assert response.json() == {
'errors': [{
'id': 'UncategorizedError',
'detail': "Resource type 'session' with id '1234' was not found",
'title': 'Resource Not Found'
}],
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
}
assert response.status_code == 404
def test_sessions_get(
sessions_api_client,
mock_session_manager,
mock_session):
"""It returns the found session."""
mock_session_manager.get_by_id.return_value = mock_session
response = sessions_api_client.get(
f"/sessions/{mock_session.meta.identifier}")
assert response.json() == {
'data': {
'details': {},
'sessionType': 'liveProtocol',
'createdAt': mock_session.meta.created_at.isoformat(),
'createParams': None,
'id': mock_session.meta.identifier
},
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session.meta.identifier}/commands/execute', # noqa: E501
'meta': None,
},
'self': {
'href': f'/sessions/{mock_session.meta.identifier}',
'meta': None,
},
'sessions': {
'href': '/sessions',
'meta': None,
},
'sessionById': {
'href': '/sessions/{sessionId}',
'meta': None,
}
}
}
assert response.status_code == 200
def test_sessions_get_all_no_sessions(
sessions_api_client,
mock_session_manager):
"""It returns a response when there are no sessions."""
mock_session_manager.get.return_value = []
response = sessions_api_client.get("/sessions")
assert response.json() == {
'data': [], 'links': None
}
assert response.status_code == 200
def test_sessions_get_all(
sessions_api_client,
mock_session_manager,
mock_session):
"""It returns the sessions."""
mock_session_manager.get.return_value = [mock_session]
response = sessions_api_client.get("/sessions")
assert response.json() == {
'data': [{
'details': {},
'sessionType': 'liveProtocol',
'createdAt': mock_session.meta.created_at.isoformat(),
'createParams': None,
'id': mock_session.meta.identifier
}], 'links': None
}
assert response.status_code == 200
def test_sessions_execute_command_no_session(
sessions_api_client,
mock_session_manager):
"""It rejects command if there's no session"""
mock_session_manager.get_by_id.return_value = None
response = sessions_api_client.post(
"/sessions/1234/commands/execute",
json={
"data": {
"command": "protocol.pause",
"data": {}
}
}
)
mock_session_manager.get_by_id.assert_called_once_with("1234")
assert response.json() == {
'errors': [{
'id': 'UncategorizedError',
'title': 'Resource Not Found',
'detail': "Resource type 'session' with id '1234' was not found", # noqa: E501
}],
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
}
assert response.status_code == 404
def test_sessions_execute_command(
sessions_api_client,
mock_session_manager,
mock_session):
"""It accepts the session command"""
mock_session_manager.get_by_id.return_value = mock_session
mock_session.execute_command.return_value = SimpleCommandResponse(
id="44",
command=ProtocolCommand.pause,
data=EmptyModel(),
createdAt=datetime(2020, 1, 2),
startedAt=datetime(2020, 1, 3),
completedAt=datetime(2020, 1, 4),
status=CommandStatus.executed
)
response = sessions_api_client.post(
f"/sessions/{mock_session.meta.identifier}/commands/execute",
json={
"data": {
"command": "protocol.pause",
"data": {}
}
}
)
mock_session.execute_command.assert_called_once_with(
SimpleCommandRequest(command=ProtocolCommand.pause,
data=EmptyModel())
)
assert response.json() == {
'data': {
'command': 'protocol.pause',
'data': {},
'status': 'executed',
'createdAt': '2020-01-02T00:00:00',
'startedAt': '2020-01-03T00:00:00',
'completedAt': '2020-01-04T00:00:00',
'result': None,
'id': "44",
},
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session.meta.identifier}/commands/execute', # noqa: E501
'meta': None,
},
'self': {
'href': f'/sessions/{mock_session.meta.identifier}',
'meta': None,
},
'sessions': {
'href': '/sessions',
'meta': None,
},
'sessionById': {
'href': '/sessions/{sessionId}',
'meta': None,
},
},
}
assert response.status_code == 200
@pytest.mark.parametrize(argnames="exception,expected_status",
argvalues=[
[UnsupportedCommandException, 403],
[CommandExecutionException, 403],
])
def test_execute_command_error(sessions_api_client,
mock_session_manager,
mock_session,
exception,
expected_status):
"""Test that we handle executor errors correctly"""
mock_session_manager.get_by_id.return_value = mock_session
async def raiser(*args, **kwargs):
raise exception("Cannot do it")
mock_session.execute_command.side_effect = raiser
response = sessions_api_client.post(
f"/sessions/{mock_session.meta.identifier}/commands/execute",
json={
'data': {
'command': 'protocol.pause',
'data': {}
}
}
)
assert response.json() == {
'errors': [
{
'detail': 'Cannot do it',
'title': 'Action Forbidden',
'id': 'UncategorizedError',
}
]
}
assert response.status_code == expected_status
def test_execute_command_session_inactive(
sessions_api_client,
mock_session_manager,
mock_session,
):
"""Test that only the active session can execute commands"""
mock_session_manager.get_by_id.return_value = mock_session
mock_session_manager.is_active.return_value = False
response = sessions_api_client.post(
f"/sessions/{mock_session.meta.identifier}/commands/execute",
json={
'data': {
'command': 'protocol.pause',
'data': {}
}
}
)
mock_session_manager.is_active.assert_called_once_with(
mock_session.meta.identifier)
assert response.json() == {
'errors': [
{
'id': 'UncategorizedError',
'title': 'Action Forbidden',
'detail': f"Session '{mock_session.meta.identifier}'"
f" is not active. Only the active session can "
f"execute commands"
}
]
}
assert response.status_code == 403
|
the-stack_0_13082 | from django.utils.deprecation import MiddlewareMixin#中间件基类
from django.core.cache import cache
class CountMiddleware(MiddlewareMixin):
#中间件类必须接受一个response参数,就是说必须在中间件类中定义一个__init__函数和一个__call__函数
#def __init__(self, get_response):
#self.get_response = get_response
#def __call__(self, request):
#return self.get_response(request)
def process_request(self, request):#在处理url请求之前执行
#self.online_ips = get_online_count()
#获取用户IP并设置到cache
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
#ip作为key,时间重置,定时5分
cache.set(ip, 0, 5 * 60)
#online_ips用来存放所有没有失效的ip的元组
online_ips = cache.get("online_ips", [])
if online_ips:
#根据ip List获取所有没有失效的ip Key,即IP值(更新online_ips)
online_ips = cache.get_many(online_ips).keys()
#此时online_ips为dict.keys()类型,需要转为元组
online_ips=list(online_ips)
#添加新IP在表中
if ip not in online_ips:
online_ips.append(ip)
#online_ips做为key,用来存放所有的ip[]
cache.set("online_ips", online_ips) |
the-stack_0_13084 | ################################################################
### various add-ons to the SciPy morphology package
################################################################
from numpy import *
import pylab
from pylab import *
from scipy.ndimage import morphology,measurements,filters
from scipy.ndimage.morphology import *
from .toplevel import *
@checks(ABINARY2)
def label(image,**kw):
"""Redefine the scipy.ndimage.measurements.label function to
work with a wider range of data types. The default function
is inconsistent about the data types it accepts on different
platforms."""
try: return measurements.label(image,**kw)
except: pass
types = ["int32","uint32","int64","uint64","int16","uint16"]
for t in types:
try: return measurements.label(array(image,dtype=t),**kw)
except: pass
# let it raise the same exception as before
return measurements.label(image,**kw)
@checks(AINT2)
def find_objects(image,**kw):
"""Redefine the scipy.ndimage.measurements.find_objects function to
work with a wider range of data types. The default function
is inconsistent about the data types it accepts on different
platforms."""
try: return measurements.find_objects(image,**kw)
except: pass
types = ["int32","uint32","int64","uint64","int16","uint16"]
for t in types:
try: return measurements.find_objects(array(image,dtype=t),**kw)
except: pass
# let it raise the same exception as before
return measurements.find_objects(image,**kw)
def check_binary(image):
assert image.dtype=='B' or image.dtype=='i' or image.dtype==dtype('bool'),\
"array should be binary, is %s %s"%(image.dtype,image.shape)
assert amin(image)>=0 and amax(image)<=1,\
"array should be binary, has values %g to %g"%(amin(image),amax(image))
@checks(ABINARY2,uintpair)
def r_dilation(image,size,origin=0):
"""Dilation with rectangular structuring element using maximum_filter"""
return filters.maximum_filter(image,size,origin=origin)
@checks(ABINARY2,uintpair)
def r_erosion(image,size,origin=0):
"""Erosion with rectangular structuring element using maximum_filter"""
return filters.minimum_filter(image,size,origin=origin, mode='constant', cval=1)
@checks(ABINARY2,uintpair)
def r_opening(image,size,origin=0):
"""Opening with rectangular structuring element using maximum/minimum filter"""
check_binary(image)
image = r_erosion(image,size,origin=origin)
return r_dilation(image,size,origin=origin)
@checks(ABINARY2,uintpair)
def r_closing(image,size,origin=0):
"""Closing with rectangular structuring element using maximum/minimum filter"""
check_binary(image)
image = r_dilation(image,size,origin=0)
return r_erosion(image,size,origin=0)
@checks(ABINARY2,uintpair)
def rb_dilation(image,size,origin=0):
"""Binary dilation using linear filters."""
output = zeros(image.shape,'f')
filters.uniform_filter(image,size,output=output,origin=origin)
return array(output>0,'i')
@checks(ABINARY2,uintpair)
def rb_erosion(image,size,origin=0):
"""Binary erosion using linear filters."""
output = zeros(image.shape,'f')
filters.uniform_filter(image,size,output=output,origin=origin, mode='constant', cval=1)
return array(output==1,'i')
@checks(ABINARY2,uintpair)
def rb_opening(image,size,origin=0):
"""Binary opening using linear filters."""
image = rb_erosion(image,size,origin=origin)
return rb_dilation(image,size,origin=origin)
@checks(ABINARY2,uintpair)
def rb_closing(image,size,origin=0):
"""Binary closing using linear filters."""
image = rb_dilation(image,size,origin=origin)
return rb_erosion(image,size,origin=origin)
@checks(GRAYSCALE,uintpair)
def rg_dilation(image,size,origin=0):
"""Grayscale dilation with maximum/minimum filters."""
return filters.maximum_filter(image,size,origin=origin)
@checks(GRAYSCALE,uintpair)
def rg_erosion(image,size,origin=0):
"""Grayscale erosion with maximum/minimum filters."""
return filters.minimum_filter(image,size,origin=origin, mode='constant', cval=1)
@checks(GRAYSCALE,uintpair)
def rg_opening(image,size,origin=0):
"""Grayscale opening with maximum/minimum filters."""
image = r_erosion(image,size,origin=origin)
return r_dilation(image,size,origin=origin)
@checks(GRAYSCALE,uintpair)
def rg_closing(image,size,origin=0):
"""Grayscale closing with maximum/minimum filters."""
image = r_dilation(image,size,origin=0)
return r_erosion(image,size,origin=0)
@checks(SEGMENTATION)
def showlabels(x,n=7):
pylab.imshow(where(x>0,x%n+1,0),cmap=pylab.cm.gist_stern)
@checks(SEGMENTATION)
def spread_labels(labels,maxdist=9999999):
"""Spread the given labels to the background"""
distances,features = morphology.distance_transform_edt(labels==0,return_distances=1,return_indices=1)
indexes = features[0]*labels.shape[1]+features[1]
spread = labels.ravel()[indexes.ravel()].reshape(*labels.shape)
spread *= (distances<maxdist)
return spread
@checks(ABINARY2,ABINARY2)
def keep_marked(image,markers):
"""Given a marker image, keep only the connected components
that overlap the markers."""
labels,_ = label(image)
marked = unique(labels*(markers!=0))
kept = in1d(labels.ravel(),marked)
return (image!=0)*kept.reshape(*labels.shape)
@checks(ABINARY2,ABINARY2)
def remove_marked(image,markers):
"""Given a marker image, remove all the connected components
that overlap markers."""
marked = keep_marked(image,markers)
return image*(marked==0)
@checks(SEGMENTATION,SEGMENTATION)
def correspondences(labels1,labels2):
"""Given two labeled images, compute an array giving the correspondences
between labels in the two images (as tuples of label in `labels1`,
label in `labels2`, and pixel count)."""
q = 100000
assert amin(labels1)>=0 and amin(labels2)>=0
assert amax(labels2)<q
combo = labels1*q+labels2
result, counts = unique(combo, return_counts=True)
result = array([result//q,result%q,counts])
return result
@checks(ABINARY2,SEGMENTATION)
def propagate_labels_simple(regions,labels):
"""Given an image and a set of labels, apply the labels
to all the connected components in the image that overlap a label."""
rlabels,_ = label(regions)
cors = correspondences(rlabels,labels)
outputs = zeros(amax(rlabels)+1,'i')
for o,i,_ in cors.T: outputs[o] = i
outputs[0] = 0
return outputs[rlabels]
@checks(ABINARY2,SEGMENTATION)
def propagate_labels_majority(image,labels):
"""Given an image and a set of labels, apply the labels
to all the connected components in the image that overlap a label.
For each component that has a conflict, select the label
with the largest overlap."""
rlabels,_ = label(image)
cors = correspondences(rlabels,labels)
outputs = zeros(amax(rlabels)+1,'i')
counts = zeros(amax(rlabels)+1,'i')
for rlabel, label_, count in cors.T:
if not rlabel or not label_:
# ignore background correspondences
continue
if counts[rlabel] < count:
outputs[rlabel] = label_
counts[rlabel] = count
outputs[0] = 0
return outputs[rlabels]
@checks(ABINARY2,SEGMENTATION)
def propagate_labels(image,labels,conflict=0):
"""Given an image and a set of labels, apply the labels
to all the connected components in the image that overlap a label.
Assign the value `conflict` to any components that have a conflict."""
rlabels,_ = label(image)
cors = correspondences(rlabels,labels)
outputs = zeros(amax(rlabels)+1,'i')
oops = -(1<<30)
for o,i,_ in cors.T:
if outputs[o]!=0: outputs[o] = oops
else: outputs[o] = i
outputs[outputs==oops] = conflict
outputs[0] = 0
return outputs[rlabels]
@checks(ABINARY2,True)
def select_regions(binary,f,min=0,nbest=100000):
"""Given a scoring function f over slice tuples (as returned by
find_objects), keeps at most nbest components whose scores is higher
than min."""
labels,n = label(binary)
objects = find_objects(labels)
scores = [f(o) for o in objects]
best = argsort(scores)
keep = zeros(len(objects)+1,'i')
if nbest > 0:
for i in best[-nbest:]:
if scores[i]<=min: continue
keep[i+1] = 1
# print scores,best[-nbest:],keep
# print sorted(list(set(labels.ravel())))
# print sorted(list(set(keep[labels].ravel())))
return keep[labels]
@checks(SEGMENTATION)
def all_neighbors(image):
"""Given an image with labels, find all pairs of labels
that are directly neighboring each other."""
q = 100000
assert amax(image)<q
assert amin(image)>=0
u = unique(q*image+roll(image,1,0))
d = unique(q*image+roll(image,-1,0))
l = unique(q*image+roll(image,1,1))
r = unique(q*image+roll(image,-1,1))
all = unique(r_[u,d,l,r])
all = c_[all//q,all%q]
all = unique(array([sorted(x) for x in all]))
return all
################################################################
### Iterate through the regions of a color image.
################################################################
@checks(SEGMENTATION)
def renumber_labels_ordered(a,correspondence=0):
"""Renumber the labels of the input array in numerical order so
that they are arranged from 1...N"""
assert amin(a)>=0
assert amax(a)<=2**25
labels = sorted(unique(ravel(a)))
renum = zeros(amax(labels)+1,dtype='i')
renum[labels] = arange(len(labels),dtype='i')
if correspondence:
return renum[a],labels
else:
return renum[a]
@checks(SEGMENTATION)
def renumber_labels(a):
"""Alias for renumber_labels_ordered"""
return renumber_labels_ordered(a)
def pyargsort(seq,cmp=None,key=lambda x:x):
"""Like numpy's argsort, but using the builtin Python sorting
function. Takes an optional cmp."""
return sorted(list(range(len(seq))),key=lambda x:key(seq.__getitem__(x)),cmp=None)
@checks(SEGMENTATION)
def renumber_by_xcenter(seg):
"""Given a segmentation (as a color image), change the labels
assigned to each region such that when the labels are considered
in ascending sequence, the x-centers of their bounding boxes
are non-decreasing. This is used for sorting the components
of a segmented text line into left-to-right reading order."""
objects = [(slice(0,0),slice(0,0))]+find_objects(seg)
def xc(o):
# if some labels of the segmentation are missing, we
# return a very large xcenter, which will move them all
# the way to the right (they don't show up in the final
# segmentation anyway)
if o is None: return 999999
return mean((o[1].start,o[1].stop))
xs = array([xc(o) for o in objects])
order = argsort(xs)
segmap = zeros(amax(seg)+1,'i')
for i,j in enumerate(order): segmap[j] = i
return segmap[seg]
@checks(SEGMENTATION)
def ordered_by_xcenter(seg):
"""Verify that the labels of a segmentation are ordered
spatially (as determined by the x-center of their bounding
boxes) in left-to-right reading order."""
objects = [(slice(0,0),slice(0,0))]+find_objects(seg)
def xc(o): return mean((o[1].start,o[1].stop))
xs = array([xc(o) for o in objects])
for i in range(1,len(xs)):
if xs[i-1]>xs[i]: return 0
return 1
|
the-stack_0_13085 | import os, sys, tempfile
import datetime, time, re
from seiscomp import mseedlite as mseed
def _timeparse(t, format):
"""Parse a time string that might contain fractions of a second.
Fractional seconds are supported using a fragile, miserable hack.
Given a time string like '02:03:04.234234' and a format string of
'%H:%M:%S', time.strptime() will raise a ValueError with this
message: 'unconverted data remains: .234234'. If %S is in the
format string and the ValueError matches as above, a datetime
object will be created from the part that matches and the
microseconds in the time string.
"""
try:
return datetime.datetime(*time.strptime(t, format)[0:6]).time()
except ValueError as msg:
if "%S" in format:
msg = str(msg)
mat = re.match(r"unconverted data remains:"
" \.([0-9]{1,6})$", msg)
if mat is not None:
# fractional seconds are present - this is the style
# used by datetime's isoformat() method
frac = "." + mat.group(1)
t = t[:-len(frac)]
t = datetime.datetime(*time.strptime(t, format)[0:6])
microsecond = int(float(frac)*1e6)
return t.replace(microsecond=microsecond)
else:
mat = re.match(r"unconverted data remains:"
" \,([0-9]{3,3})$", msg)
if mat is not None:
# fractional seconds are present - this is the style
# used by the logging module
frac = "." + mat.group(1)
t = t[:-len(frac)]
t = datetime.datetime(*time.strptime(t, format)[0:6])
microsecond = int(float(frac)*1e6)
return t.replace(microsecond=microsecond)
raise
def timeparse(t):
return _timeparse(t, "%Y/%m/%d %H:%M:%S")
class Input(mseed.Input):
def __init__(self, server, streams,
stime=None, etime=None, timeout=None, verbose=0):
# XXX Add the possibility for supplying stime and etime as
# individual times for each stream.
"""
'streams' must be a list containing tuples of (net,sta,loc,cha)
"""
import subprocess
streams = [ "%-3s %5s %s%3s.D" % s for s in streams ]
streams.sort()
self.tmp = tempfile.NamedTemporaryFile(mode="w", prefix="slinktool.")
self.tmp.write("\n".join(streams)+"\n")
self.tmp.flush()
if verbose:
sys.stderr.write("\n".join(streams)+"\n")
slinktool = os.getenv("SLINKTOOL")
if not slinktool:
slinktool = "slinktool"
args = [slinktool, "-l", self.tmp.name, "-o", "-"]
if stime:
args.append("-tw")
tw = "%d,%d,%d,%d,%d,%d:" % (stime.year,stime.month,stime.day,stime.hour,stime.minute,stime.second)
if etime:
rw += "%d,%d,%d,%d,%d,%d" % (etime.year,etime.month,etime.day,etime.hour,etime.minute,etime.second)
args.append(tw)
if verbose: args.append("-v")
if timeout:
try: assert int(timeout) > 0
except: raise TypeError("illegal timeout parameter")
args += ["-nt", "%d" % int(timeout)]
args.append(server)
# start 'slinktool' as sub-process
self.popen = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False)
infile = self.popen.stdout
mseed.Input.__init__(self, infile)
def __del__(self):
"""
Shut down SeedLink connections and close input.
"""
sys.stderr.write("shutting down slinktool\n")
sys.stderr.flush()
slinktool_pid = self.popen.pid
# It would of course be much better to send SIGTERM,
# but somehow slinktool often appears to ignore it.
# XXX Need to figure out why, and perhaps fix it (not critical).
self.popen.kill()
self.popen.communicate()
# mseed.Input.__del__(self) # closes the input file
class Input2(mseed.Input):
def __init__(self, server, streams, stime=None, etime=None, verbose=0):
"""
XXX information not uptodate!!! XXX
'streams' must be a dict containing tuples of (stime, etime),
with the key being the stream_id and stime and etime being
the starting and end time of the time window, respectively.
The times must be seis.Time objects. For instance
stime = seis.Time(...)
etime = seis.Time(...)
streams["GE.KBS.00.BHZ.D"] = (stime, etime)
It is more efficient to request the same time interval for
all streams. Wildcards for the channels are allowed. If
stime is None, only new data are retrieved as they come in.
"""
streams = [ "%-3s %5s %s%3s.D" % tuple(s.split(".")[:4])
for s in streams ]
streams.sort()
self.tmp = tempfile.NamedTemporaryFile(mode="w", prefix="slinktool.")
self.tmp.write("\n".join(streams)+"\n")
sys.stderr.write("\n".join(streams)+"\n")
self.tmp.flush()
cmd = "slinktool -l %s -o -" % self.tmp.name
if stime:
assert isinstance(stime, seis.Time)
cmd += " -tw %d,%d,%d,%d,%d,%d:" % stime.asDate
if etime:
assert isinstance(etime, seis.Time)
cmd += "%d,%d,%d,%d,%d,%d" % etime.asDate
cmd = cmd + "%s '%s'" % (verbose*" -v", server)
infile = os.popen(cmd)
mseed.Input.__init__(self, infile)
def available(server="localhost:18000",
time_window=None, stream_ids=None, verbose=0):
"""
Connects to server and returns a dictionary of lists of available
time windows as tuples of (start_time, end_time) for each available
stream. The stream set can be limited by specifying a list of
stream_ids in the format usual format, i.e. net.sta.loc.cha.type,
e.g. "GE.KBS.00.BHZ.D".
Note that often the returned lists contain only one time tuple,
corresponding to one contiguous time window available.
NEW:
The search for available data can be limited to a time window by
specifying the "time_window" parameter, which must be a tuple
containing the starting and end time as seis.Time objects.
"""
import re
if time_window:
stime, etime = time_window
assert stime <= etime
else:
stime, etime = None, None
cmd = "slinktool -Q %s %s " % (verbose*"-v ", server)
infile = os.popen(cmd)
windows = {}
# parse the output of "slinktool -Q"
# It is assumed that the lines consist of the fields
# net,sta,[loc,], cha, type, date1, time1, "-", date2, time2
# Since the location code (loc) may or may not be present, we
# determine the position of the dash "-" to determine where the
# other fields are.
regex = re.compile("^[A-Z][A-Z]\ [A-Z].*[12][0-9]{3}(/[0-9]{2}){2}.*$")
for line in infile:
if regex.match(line): # line containing a time window, a bit crude
line = line.split()
try:
dash = line.index("-")
except ValueError:
continue
if dash==7: # location code is present
loc = line[2]
else: loc = ""
net, sta, cha, typ = line[0], line[1], line[dash-4], line[dash-3]
stream_id = "%s.%s.%s.%s.%s" % (net, sta, loc, cha, typ)
if stream_ids and stream_id not in stream_ids:
continue
t1 = seis.Time("%s %s" % (line[dash-2], line[dash-1]))
t2 = seis.Time("%s %s" % (line[dash+1], line[dash+2]))
if stime and t2<stime or etime and t1>etime:
continue # non-overlapping time windows
if stime and t1<stime:
t1 = stime
if etime and t2>etime:
t2 = etime
if not stream_id in windows:
windows[stream_id] = []
windows[stream_id].append((t1,t2))
elif verbose:
# probably some diagnostic output
sys.stdout.write("%s\n" % line.strip())
return windows
def server_version(host, port=18000):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
except:
return None
s.send("HELLO\n")
data = s.recv(1024)
s.close()
if data[:8] != "SeedLink":
return None
return data[10:13]
def server_running(host, port=18000):
if server_version(host, port):
return True
return False
|
the-stack_0_13087 | from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint
from lipnet.lipreading.generators import BasicGenerator
from lipnet.lipreading.callbacks import Statistics, Visualize
from lipnet.lipreading.curriculums import Curriculum
from lipnet.core.decoders import Decoder
from lipnet.lipreading.helpers import labels_to_text
from lipnet.utils.spell import Spell
from lipnet.model2 import LipNet
import numpy as np
import datetime
import os
np.random.seed(55)
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
DATASET_DIR = os.path.join(CURRENT_PATH, 'datasets')
OUTPUT_DIR = os.path.join(CURRENT_PATH, 'results')
LOG_DIR = os.path.join(CURRENT_PATH, 'logs')
PREDICT_GREEDY = False
PREDICT_BEAM_WIDTH = 200
PREDICT_DICTIONARY = os.path.join(CURRENT_PATH,'..','..','common','dictionaries','grid.txt')
def curriculum_rules(epoch):
return { 'sentence_length': -1, 'flip_probability': 0.5, 'jitter_probability': 0.05 }
def train(run_name, start_epoch, stop_epoch, img_c, img_w, img_h, frames_n, absolute_max_string_len, minibatch_size):
curriculum = Curriculum(curriculum_rules)
lip_gen = BasicGenerator(dataset_path=DATASET_DIR,
minibatch_size=minibatch_size,
img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len,
curriculum=curriculum, start_epoch=start_epoch).build()
lipnet = LipNet(img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len, output_size=lip_gen.get_output_size())
lipnet.summary()
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
lipnet.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)
# load weight if necessary
if start_epoch > 0:
weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
lipnet.model.load_weights(weight_file)
spell = Spell(path=PREDICT_DICTIONARY)
decoder = Decoder(greedy=PREDICT_GREEDY, beam_width=PREDICT_BEAM_WIDTH,
postprocessors=[labels_to_text, spell.sentence])
# define callbacks
statistics = Statistics(lipnet, lip_gen.next_val(), decoder, 256, output_dir=os.path.join(OUTPUT_DIR, run_name))
visualize = Visualize(os.path.join(OUTPUT_DIR, run_name), lipnet, lip_gen.next_val(), decoder, num_display_sentences=minibatch_size)
tensorboard = TensorBoard(log_dir=os.path.join(LOG_DIR, run_name))
csv_logger= CSVLogger(os.path.join(LOG_DIR, "{}-{}.csv".format('training',run_name)), separator=',', append=True)
checkpoint = ModelCheckpoint(os.path.join(OUTPUT_DIR, run_name, "weights{epoch:02d}.h5"), monitor='val_loss', save_weights_only=True, mode='auto', period=1)
lipnet.model.fit_generator(generator=lip_gen.next_train(),
steps_per_epoch=lip_gen.default_training_steps, epochs=stop_epoch,
validation_data=lip_gen.next_val(), validation_steps=lip_gen.default_validation_steps,
callbacks=[checkpoint, statistics, visualize, lip_gen, tensorboard, csv_logger],
initial_epoch=start_epoch,
verbose=1,
max_q_size=5,
workers=2,
pickle_safe=True)
if __name__ == '__main__':
run_name = datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S')
train(run_name, 0, 5000, 3, 100, 50, 75, 32, 50)
|
the-stack_0_13088 | # -*- coding: utf-8 -*-
import logging
from . import config, models
from .models.util import _fetch_data
class Nvdb(object):
""" The main class for interfacing with the API.
:param client: Name of client using the API
:type client: str
:param contact: Contact information of user of the API
:type contact: str
:param autoupdate: Indicated wether constants should be up to date with latest API-Version. Default value = True
:type autoupdate: Bool
:returns: Nvdb Class
:usage:
>>> import pnvdb
>>> nvdb = pnvdb.Nvdb(client='Your-App-Name', contact='Your-contact-information')
"""
def __init__(self, client='pnvdb', contact='', autoupdate=True):
self.base_url = config.lesapi_base_url
self.headers = {'X-Client': client, 'X-Kontaktperson': contact}
self.srid = ''
self.antall = 1000
self.name2id = None
"""
status = _fetch_data(self, 'status')
if autoupdate and last_seen_version != float(status['datakatalog']['versjon']):
try:
update_CONST()
except:
print('Autoupdate of the CONST.py file failed.\nTry initializing with adminstrative privleleges, or set autoupdate = False')
logging.info('Updated name2id and kommune values from version: {} to version {}'.
format(last_seen_version, status['datakatalog']['versjon']))
"""
def _generator(self, url, _payload, objekt_type, data):
while True:
returnert = data['metadata']['returnert']
if returnert == 0:
break
_payload.update({'start': data['metadata']['neste']['start']})
for obj in enumerate(data['objekter']):
yield models.Objekt(self, objekt_type, obj[1]['id'], obj)
data = _fetch_data(self, url, _payload)
def status(self):
""" Method for getting information about the current status of the API
:returns: Dict
:keys: ['datakatalog', 'datagrunnlag']
:usage:
>>> status = nvdb.status()
>>> print(status['datakatalog']['versjon'])
2.13
"""
return _fetch_data(self, 'status')
def objekt(self, objekt_type, nvdb_id):
""" Method for creating a spesific nvdb python Objekt
:param objekt_type: nvdb objekttype id.
:type objekt_type: int
:param nvdb_id: the unique nvdb id
:type nvdb_id: int
:returns: :class:`.Objekt`
:usage:
>>> obj = nvdb.objekt(objekt_type=67, nvdb_id=89204552)
>>> print(obj.metadata)
{'versjon': 3, 'type':P {'navn': 'Tunnelløp', 'id': 67}, 'startdato': '2014-01-17',
'sist_modifisert': '2017-10-23 15:15:50'}
"""
return models.Objekt(self, objekt_type, nvdb_id)
def objekt_type(self, objekt_type):
""" Method for creating a spesific nvdb python
:param objekt_type: nvdb objekttype id.
:type objekt_type: int
:returns: :class:`.ObjektType`
:usage:
>>> obj = nvdb.objekt_type(objekt_type=67)
>>> print(obj.metadata['sosinvdbnavn'])
Tunnelløp_67
"""
return models.ObjektType(self, objekt_type)
def objekt_typer(self):
""" Returns objekt_type of every avaliable obj type in nvdb
:returns: List of :class:`.ObjektType`
:usage:
>>> obj_types = nvdb.objekt_typer()
>>> print(obj_types[0].metadata['sosinvdbnavn'])
Skjerm_3
"""
data = _fetch_data(self, 'vegobjekttyper')
objekt_typer = []
for objekt_type in data:
objekt_type_id = objekt_type['id']
objekt_typer.append(models.ObjektType(
self, objekt_type_id, meta=objekt_type))
return objekt_typer
def hent(self, objekt_type, kriterie=None):
""" Return a generator object that can be itterated over
to fetch the results of the query.
:param objekt_type: nvdb objekttype id.
:type objekt_type: int
:param payload: filters for the query
:type payload: dict
:returns: generator of :class:`.Objekt`
:usage:
>>> criteria = {'fylke':'2','egenskap':'1820>=20'}
>>> bomstasjoner = nvdb.hent(45, kriterie=criteria)
>>> for bomstasjon in bomstasjoner:
>>> print(bomstasjon)
"""
_payload = dict()
if kriterie:
_payload = kriterie.copy()
_payload.update(
{'antall': self.antall, 'segmentering': 'false', 'inkluder': 'alle'})
url = 'vegobjekter/{objekt_type}'.format(objekt_type=objekt_type)
data = _fetch_data(self, url, payload=_payload)
if data['metadata']['returnert'] == 0:
return None
else:
return self._generator(url, _payload, objekt_type, data)
def vegreferanse(self, vegreferanse):
""" Return vegreferanse object.
PS : Only support point refferences
:param vegreferanse: The road refferences to objectify
:type vegreferanse: string
:returns: :class:`.Vegreferanse`
:usage:
>>> print(nvdb.vegreferanse('1600Ev6hp12m1000'))
"""
if isinstance(vegreferanse, list):
return [models.Vegreferanse(self, vegref)
for vegref in vegreferanse]
return models.Vegreferanse(self, vegreferanse)
def posisjon(self, x_coordinate=None, y_coordinate=None, lat=None, lon=None):
"""Returns a posisjon object for a given location
:param x: X-coordinate in EUREF89 UTM 33
:type x: float
:param y: Y-coordinate in EUREF89 UTM 33
:type y: float
:param lat: Lattitude in EUREF89
:type lat: float
:param lon: Longitude in EUREF89
:type lon: float
:returns: :class:`.Posisjon`
:usage:
>>> pos = nvdb.posisjon(x=269815,y=7038165)
>>> print(pos.vegreferanse)
"""
if x_coordinate and y_coordinate:
payload = {'nord': y_coordinate, 'ost': x_coordinate}
elif lat and lon:
payload = {'lat': lat, 'lon': lon}
return models.Posisjon(self, payload)
def regioner(self):
""" Returns an Area object for all regions
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/regioner', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def fylker(self):
""" Returns an mArea object for all fylker
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/fylker', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def vegavdelinger(self):
""" Returns an Area object for all vegavdelinger
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/vegavdelinger', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def kommuner(self):
""" Returns an Area object for all kommuner
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/kommuner', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def kontraktsomrader(self):
""" Returns an Area object for all kontraktsomrader
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/kontraktsomrader', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def riksvegruter(self):
""" Returns an Area object for all riksvegruter
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/riksvegruter', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
|
the-stack_0_13089 | import base64
import io
import os
import threading
import time
from typing import Optional, List
from platypush import Config
from platypush.context import get_bus
from platypush.message.event.qrcode import QrcodeScannedEvent
from platypush.message.response.qrcode import QrcodeGeneratedResponse, QrcodeDecodedResponse, ResultModel
from platypush.plugins import Plugin, action
from platypush.plugins.camera import CameraPlugin
from platypush.utils import get_plugin_class_by_name
class QrcodePlugin(Plugin):
"""
Plugin to generate and scan QR and bar codes.
Requires:
* **numpy** (``pip install numpy``).
* **qrcode** (``pip install 'qrcode[pil]'``) for QR generation.
* **pyzbar** (``pip install pyzbar``) for decoding code from images.
* **Pillow** (``pip install Pillow``) for image management.
"""
def __init__(self, camera_plugin: Optional[str] = None, **kwargs):
"""
:param camera_plugin: Name of the plugin that will be used as a camera to capture images (e.g.
``camera.cv`` or ``camera.pi``).
"""
super().__init__(**kwargs)
self.camera_plugin = camera_plugin
self._capturing = threading.Event()
def _get_camera(self, camera_plugin: Optional[str] = None, **config) -> CameraPlugin:
camera_plugin = camera_plugin or self.camera_plugin
if not config:
config = Config.get(camera_plugin) or {}
config['stream_raw_frames'] = True
cls = get_plugin_class_by_name(camera_plugin)
assert cls and issubclass(cls, CameraPlugin), '{} is not a valid camera plugin'.format(camera_plugin)
return cls(**config)
# noinspection PyShadowingBuiltins
@action
def generate(self, content: str, output_file: Optional[str] = None, show: bool = False,
format: str = 'png', camera_plugin: Optional[str] = None) -> QrcodeGeneratedResponse:
"""
Generate a QR code.
If you configured the :class:`platypush.backend.http.HttpBackend` then you can also generate
codes directly from the browser through ``http://<host>:<port>/qrcode?content=...``.
:param content: Text, URL or content of the QR code.
:param output_file: If set then the QR code will be exported in the specified image file.
Otherwise, a base64-encoded representation of its binary content will be returned in
the response as ``data``.
:param show: If True, and if the device where the application runs has an active display,
then the generated QR code will be shown on display.
:param format: Output image format (default: ``png``).
:param camera_plugin: If set then this plugin (e.g. ``camera`` or ``camera.pi``) will be used to capture
live images from the camera and search for bar codes or QR-codes.
:return: :class:`platypush.message.response.qrcode.QrcodeGeneratedResponse`.
"""
import qrcode
qr = qrcode.make(content)
img = qr.get_image()
ret = {
'content': content,
'format': format,
}
if show:
img.show()
if output_file:
output_file = os.path.abspath(os.path.expanduser(output_file))
img.save(output_file, format=format)
ret['image_file'] = output_file
else:
f = io.BytesIO()
img.save(f, format=format)
ret['data'] = base64.encodebytes(f.getvalue()).decode()
return QrcodeGeneratedResponse(**ret)
@action
def decode(self, image_file: str) -> QrcodeDecodedResponse:
"""
Decode a QR code from an image file.
:param image_file: Path of the image file.
"""
from pyzbar import pyzbar
from PIL import Image
image_file = os.path.abspath(os.path.expanduser(image_file))
img = Image.open(image_file)
results = pyzbar.decode(img)
return QrcodeDecodedResponse(results)
def _convert_frame(self, frame):
import numpy as np
from PIL import Image
assert isinstance(frame, np.ndarray), \
'Image conversion only works with numpy arrays for now (got {})'.format(type(frame))
mode = 'RGB'
if len(frame.shape) > 2 and frame.shape[2] == 4:
mode = 'RGBA'
return Image.frombuffer(mode, (frame.shape[1], frame.shape[0]), frame, 'raw', mode, 0, 1)
@action
def start_scanning(self, camera_plugin: Optional[str] = None, duration: Optional[float] = None,
n_codes: Optional[int] = None) -> Optional[List[ResultModel]]:
"""
Decode QR-codes and bar codes using a camera.
Triggers:
- :class:`platypush.message.event.qrcode.QrcodeScannedEvent` when a code is successfully scanned.
:param camera_plugin: Camera plugin (overrides default ``camera_plugin``).
:param duration: How long the capturing phase should run (default: until ``stop_scanning`` or app termination).
:param n_codes: Stop after decoding this number of codes (default: None).
:return: When ``duration`` or ``n_codes`` are specified or ``stop_scanning`` is called, it will return a list of
:class:`platypush.message.response.qrcode.ResultModel` instances with the scanned results,
"""
from pyzbar import pyzbar
assert not self._capturing.is_set(), 'A capturing process is already running'
camera = self._get_camera(camera_plugin)
codes = []
last_results = {}
last_results_timeout = 10.0
last_results_time = 0
self._capturing.set()
try:
with camera:
start_time = time.time()
while self._capturing.is_set() \
and (not duration or time.time() < start_time + duration) \
and (not n_codes or len(codes) < n_codes):
output = camera.get_stream()
with output.ready:
output.ready.wait()
img = self._convert_frame(output.raw_frame)
results = pyzbar.decode(img)
if results:
results = [
result for result in QrcodeDecodedResponse(results).output['results']
if result['data'] not in last_results
or time.time() >= last_results_time + last_results_timeout
]
if results:
codes.extend(results)
get_bus().post(QrcodeScannedEvent(results=results))
last_results = {result['data']: result for result in results}
last_results_time = time.time()
finally:
self._capturing.clear()
return codes
@action
def stop_scanning(self):
self._capturing.clear()
# vim:sw=4:ts=4:et:
|
the-stack_0_13090 | from uuid import uuid4
from django.conf import settings
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
MiddlewareMixin = object
from .locals import set_cid, get_cid, log_output
class CidMiddleware(MiddlewareMixin):
"""
Middleware class to extract the correlation id from incoming headers
and add them to outgoing headers
"""
def __init__(self, *args, **kwargs):
super(CidMiddleware, self).__init__(*args, **kwargs)
self.cid_request_header = getattr(
settings, 'CID_HEADER', 'X_CORRELATION_ID'
)
self.cid_response_header = getattr(
settings, 'CID_RESPONSE_HEADER', self.cid_request_header
)
self.generate_cid = getattr(settings, 'CID_GENERATE', False)
def process_request(self, request):
cid = request.META.get(self.cid_request_header, None)
if cid is None and self.generate_cid:
cid = str(uuid4())
request.correlation_id = cid
set_cid(request.correlation_id)
def process_response(self, request, response):
cid = get_cid()
if cid and self.cid_response_header:
response[self.cid_response_header] = cid
# Intercept 5XX errors and log them
log_output(request, response)
return response
|
the-stack_0_13091 | import numpy as np
from numpy.random import normal
from scipy.sparse import issparse
import scipy.sparse.linalg as slinalg
from scipy import linalg, stats
__all__ = [
"quad_potential",
"QuadPotentialDiag",
"QuadPotentialDiagAdapt",
"isquadpotential",
"QuadPotentialLowRank",
]
def quad_potential(C, is_cov):
"""
Compute a QuadPotential object from a scaling matrix.
Parameters
----------
C : arraylike, 0 <= ndim <= 2
scaling matrix for the potential
vector treated as diagonal matrix.
is_cov : Boolean
whether C is provided as a covariance matrix or hessian
Returns
-------
q : Quadpotential
"""
if issparse(C):
if not chol_available:
raise ImportError("Sparse mass matrices require scikits.sparse")
elif is_cov:
return QuadPotentialSparse(C)
else:
raise ValueError("Sparse precision matrices are not supported")
partial_check_positive_definite(C)
if C.ndim == 1:
if is_cov:
return QuadPotentialDiag(C)
else:
return QuadPotentialDiag(1.0 / C)
else:
raise NotImplementedError("QuadPotentialFull and QuadPotentialFullInv not yet implemented")
def partial_check_positive_definite(C):
"""Make a simple but partial check for Positive Definiteness."""
if C.ndim == 1:
d = C
else:
d = np.diag(C)
(i,) = np.nonzero(np.logical_or(np.isnan(d), d <= 0))
if len(i):
raise PositiveDefiniteError("Simple check failed. Diagonal contains negatives", i)
class PositiveDefiniteError(ValueError):
def __init__(self, msg, idx):
super(PositiveDefiniteError, self).__init__(msg)
self.idx = idx
self.msg = msg
def __str__(self):
return "Scaling is not positive definite: %s. Check indexes %s." % (self.msg, self.idx)
class QuadPotential(object):
def velocity(self, x, out=None):
"""Compute the current velocity at a position in parameter space."""
raise NotImplementedError("Abstract method")
def energy(self, x, velocity=None):
raise NotImplementedError("Abstract method")
def random(self, x):
raise NotImplementedError("Abstract method")
def velocity_energy(self, x, v_out):
raise NotImplementedError("Abstract method")
def update(self, sample, grad, tune):
"""Inform the potential about a new sample during tuning.
This can be used by adaptive potentials to change the
mass matrix.
"""
pass
def raise_ok(self, vmap=None):
"""Check if the mass matrix is ok, and raise ValueError if not.
Parameters
----------
vmap : blocking.ArrayOrdering.vmap
List of `VarMap`s, which are namedtuples with var, slc, shp, dtyp
Raises
------
ValueError if any standard deviations are 0 or infinite
Returns
-------
None
"""
return None
def reset(self):
pass
def isquadpotential(value):
"""Check whether an object might be a QuadPotential object."""
return isinstance(value, QuadPotential)
class QuadPotentialDiagAdapt(QuadPotential):
"""Adapt a diagonal mass matrix from the sample variances."""
def __init__(
self,
n,
initial_mean,
initial_diag=None,
initial_weight=0,
adaptation_window=101,
dtype=None,
):
"""Set up a diagonal mass matrix."""
if initial_diag is not None and initial_diag.ndim != 1:
raise ValueError("Initial diagonal must be one-dimensional.")
if initial_mean.ndim != 1:
raise ValueError("Initial mean must be one-dimensional.")
if initial_diag is not None and len(initial_diag) != n:
raise ValueError(
"Wrong shape for initial_diag: expected %s got %s" % (n, len(initial_diag))
)
if len(initial_mean) != n:
raise ValueError(
"Wrong shape for initial_mean: expected %s got %s" % (n, len(initial_mean))
)
if dtype is None:
dtype = "float32"
if initial_diag is None:
initial_diag = np.ones(n, dtype=dtype)
initial_weight = 1
else:
initial_diag = initial_diag.astype(dtype)
self.dtype = dtype
self._n = n
self._var = np.array(initial_diag, dtype=self.dtype, copy=True)
# self._var_theano = theano.shared(self._var)
self._stds = np.sqrt(initial_diag)
self._inv_stds = 1.0 / self._stds
self._foreground_var = _WeightedVariance(
self._n, initial_mean, initial_diag, initial_weight, self.dtype
)
self._background_var = _WeightedVariance(self._n, dtype=self.dtype)
self._n_samples = 0
self.adaptation_window = adaptation_window
def velocity(self, x, out=None):
"""Compute the current velocity at a position in parameter space."""
return np.multiply(self._var, x, out=out)
def energy(self, x, velocity=None):
"""Compute kinetic energy at a position in parameter space."""
if velocity is not None:
return 0.5 * x.dot(velocity)
return 0.5 * x.dot(self._var * x)
def velocity_energy(self, x, v_out):
"""Compute velocity and return kinetic energy at a position in parameter space."""
self.velocity(x, out=v_out)
return 0.5 * np.dot(x, v_out)
def random(self):
"""Draw random value from QuadPotential."""
vals = normal(size=self._n).astype(self.dtype)
return self._inv_stds * vals
def _update_from_weightvar(self, weightvar):
weightvar.current_variance(out=self._var)
np.sqrt(self._var, out=self._stds)
np.divide(1, self._stds, out=self._inv_stds)
# self._var_theano.set_value(self._var)
def update(self, sample, grad, tune):
"""Inform the potential about a new sample during tuning."""
if not tune:
return
window = self.adaptation_window
self._foreground_var.add_sample(sample, weight=1)
self._background_var.add_sample(sample, weight=1)
self._update_from_weightvar(self._foreground_var)
if self._n_samples > 0 and self._n_samples % window == 0:
self._foreground_var = self._background_var
self._background_var = _WeightedVariance(self._n, dtype=self.dtype)
self._n_samples += 1
def raise_ok(self, vmap):
"""Check if the mass matrix is ok, and raise ValueError if not.
Parameters
----------
vmap : blocking.ArrayOrdering.vmap
List of `VarMap`s, which are namedtuples with var, slc, shp, dtyp
Raises
------
ValueError if any standard deviations are 0 or infinite
Returns
-------
None
"""
if np.any(self._stds == 0):
name_slc = []
tmp_hold = list(range(self._stds.size))
for vmap_ in vmap:
slclen = len(tmp_hold[vmap_.slc])
for i in range(slclen):
name_slc.append((vmap_.var, i))
index = np.where(self._stds == 0)[0]
errmsg = ["Mass matrix contains zeros on the diagonal. "]
for ii in index:
errmsg.append(
"The derivative of RV `{}`.ravel()[{}]" " is zero.".format(*name_slc[ii])
)
raise ValueError("\n".join(errmsg))
if np.any(~np.isfinite(self._stds)):
name_slc = []
tmp_hold = list(range(self._stds.size))
for vmap_ in vmap:
slclen = len(tmp_hold[vmap_.slc])
for i in range(slclen):
name_slc.append((vmap_.var, i))
index = np.where(~np.isfinite(self._stds))[0]
errmsg = ["Mass matrix contains non-finite values on the diagonal. "]
for ii in index:
errmsg.append(
"The derivative of RV `{}`.ravel()[{}]" " is non-finite.".format(*name_slc[ii])
)
raise ValueError("\n".join(errmsg))
class QuadPotentialDiagAdaptGrad(QuadPotentialDiagAdapt):
"""Adapt a diagonal mass matrix from the variances of the gradients.
This is experimental, and may be removed without prior deprication.
"""
def __init__(self, *args, **kwargs):
super(QuadPotentialDiagAdaptGrad, self).__init__(*args, **kwargs)
self._grads1 = np.zeros(self._n, dtype=self.dtype)
self._ngrads1 = 0
self._grads2 = np.zeros(self._n, dtype=self.dtype)
self._ngrads2 = 0
def _update(self, var):
self._var[:] = var
np.sqrt(self._var, out=self._stds)
np.divide(1, self._stds, out=self._inv_stds)
# self._var_theano.set_value(self._var)
def update(self, sample, grad, tune):
"""Inform the potential about a new sample during tuning."""
if not tune:
return
self._grads1[:] += np.abs(grad)
self._grads2[:] += np.abs(grad)
self._ngrads1 += 1
self._ngrads2 += 1
if self._n_samples <= 150:
super().update(sample, grad)
else:
self._update((self._ngrads1 / self._grads1) ** 2)
if self._n_samples > 100 and self._n_samples % 100 == 50:
self._ngrads1 = self._ngrads2
self._ngrads2 = 1
self._grads1[:] = self._grads2
self._grads2[:] = 1
class _WeightedVariance(object):
"""Online algorithm for computing mean of variance."""
def __init__(
self, nelem, initial_mean=None, initial_variance=None, initial_weight=0, dtype="d"
):
self._dtype = dtype
self.w_sum = float(initial_weight)
self.w_sum2 = float(initial_weight) ** 2
if initial_mean is None:
self.mean = np.zeros(nelem, dtype="d")
else:
self.mean = np.array(initial_mean, dtype="d", copy=True)
if initial_variance is None:
self.raw_var = np.zeros(nelem, dtype="d")
else:
self.raw_var = np.array(initial_variance, dtype="d", copy=True)
self.raw_var[:] *= self.w_sum
if self.raw_var.shape != (nelem,):
raise ValueError("Invalid shape for initial variance.")
if self.mean.shape != (nelem,):
raise ValueError("Invalid shape for initial mean.")
def add_sample(self, x, weight):
x = np.asarray(x)
self.w_sum += weight
self.w_sum2 += weight * weight
prop = weight / self.w_sum
old_diff = x - self.mean
self.mean[:] += prop * old_diff
new_diff = x - self.mean
self.raw_var[:] += weight * old_diff * new_diff
def current_variance(self, out=None):
if self.w_sum == 0:
raise ValueError("Can not compute variance without samples.")
if out is not None:
return np.divide(self.raw_var, self.w_sum, out=out)
else:
return (self.raw_var / self.w_sum).astype(self._dtype)
def current_mean(self):
return self.mean.copy(dtype=self._dtype)
class QuadPotentialDiag(QuadPotential):
"""Quad potential using a diagonal covariance matrix."""
def __init__(self, v, dtype=None):
"""Use a vector to represent a diagonal matrix for a covariance matrix.
Parameters
----------
v : vector, 0 <= ndim <= 1
Diagonal of covariance matrix for the potential vector
"""
if dtype is None:
dtype = "float32"
self.dtype = dtype
v = v.astype(self.dtype)
s = v ** 0.5
self.s = s
self.inv_s = 1.0 / s
self.v = v
def velocity(self, x, out=None):
"""Compute the current velocity at a position in parameter space."""
if out is not None:
np.multiply(x, self.v, out=out)
return
return self.v * x
def random(self):
"""Draw random value from QuadPotential."""
return normal(size=self.s.shape) * self.inv_s
def energy(self, x, velocity=None):
"""Compute kinetic energy at a position in parameter space."""
if velocity is not None:
return 0.5 * np.dot(x, velocity)
return 0.5 * x.dot(self.v * x)
def velocity_energy(self, x, v_out):
"""Compute velocity and return kinetic energy at a position in parameter space."""
np.multiply(x, self.v, out=v_out)
return 0.5 * np.dot(x, v_out)
def add_ADATv(A, v, out, diag=None, beta=0.0, work=None):
"""Run out = beta * out + A @ np.diag(D) @ A.T @ v."""
if work is None:
work = np.empty(A.shape[1])
linalg.blas.dgemv(1.0, A, v, y=work, trans=1, beta=0.0, overwrite_y=True)
if diag is not None:
work *= diag
linalg.blas.dgemv(1.0, A, work, y=out, beta=beta, overwrite_y=True)
class Covariance:
def __init__(self, n_dim, n_svd, n_approx, values, grads, diag=None):
assert n_svd <= len(values)
assert values.shape == grads.shape
self.values = values - values.mean(0)
self.grads = grads - grads.mean(0)
val_variance = self.values.var(0)
grd_variance = self.grads.var(0)
self._val_var = val_variance
self._grd_var = grd_variance
if diag == "mean":
self.diag = np.sqrt(val_variance / grd_variance)
elif diag == "values":
self.diag = np.sqrt(val_variance)
elif isinstance(diag, np.ndarray):
self.diag = np.sqrt(diag)
else:
raise ValueError("Unknown diag approximation: %s" % diag)
self.invsqrtdiag = 1 / np.sqrt(self.diag)
self.values /= self.diag[None, :]
self.grads *= self.diag[None, :]
_, svdvals, vecs = linalg.svd(self.values, full_matrices=False)
self.vals_eigs = 2 * np.log(svdvals[:n_svd]) - np.log(len(values))
self.vals_vecs = vecs.T[:, :n_svd].copy()
_, svdvals, vecs = linalg.svd(self.grads, full_matrices=False)
self.grad_eigs = -2 * np.log(svdvals[:n_svd]) + np.log(len(grads))
self.grad_vecs = vecs.T[:, :n_svd].copy()
self.n_dim = n_dim
self.n_svd = n_svd
self.n_approx = n_approx
if n_svd < n_dim // 3:
center_slice = slice(n_svd // 3, None)
else:
center_slice = slice(2 * n_svd // 3, (2 * n_dim) // 3)
self.center = 0.5 * (
self.grad_eigs[center_slice].mean() + self.vals_eigs[center_slice].mean()
)
self.vals_eigs -= self.center
self.grad_eigs -= self.center
weight = stats.beta(0.5, 0.5).cdf(np.linspace(0, 1, n_dim))
self.weight = 1 - weight[:n_svd]
self._make_operators(n_approx)
def to_dense(self):
vecs, eigs = self.vals_vecs, self.vals_eigs
A = (vecs * eigs * self.weight) @ vecs.T
vecs, eigs = self.grad_vecs, self.grad_eigs
B = (vecs * eigs * self.weight) @ vecs.T
corr = np.exp(self.center) * linalg.expm(A + B)
corr *= self.diag[:, None]
corr *= self.diag[None, :]
return corr
def invsqrt_to_dense(self):
assert False # TODO This is wrong
vecs, eigs = self.vals_vecs, self.vals_eigs
A = (vecs * eigs * self.weight) @ vecs.T
vecs, eigs = self.grad_vecs, self.grad_eigs
B = (vecs * eigs * self.weight) @ vecs.T
corr = np.exp(-0.5 * self.center) * linalg.expm(-0.5 * (A + B))
corr *= self.invsqrtdiag[:, None]
corr *= self.invsqrtdiag[None, :]
return corr
def matmul(self, x, out=None):
if out is None:
out = np.empty_like(x)
self._matmul(x * self.diag, out)
out *= self.diag
return out
def invsqrtmul(self, x, out=None):
if out is None:
out = np.empty_like(x)
self._matmul_invsqrt(x, out)
return out / self.diag
def _make_operators(self, n_eigs, exponent=1):
vecs1, eigs1 = self.vals_vecs, self.vals_eigs
vecs2, eigs2 = self.grad_vecs, self.grad_eigs
vecs1 = np.ascontiguousarray(vecs1)
vecs2 = np.ascontiguousarray(vecs2)
def upper_matmul(x):
out = np.empty_like(x)
work = np.empty(len(eigs1))
add_ADATv(vecs1, x, out, diag=eigs1 * self.weight, beta=0.0, work=work)
add_ADATv(vecs2, x, out, diag=eigs2 * self.weight, beta=1.0, work=work)
return out
upper = slinalg.LinearOperator((self.n_dim, self.n_dim), upper_matmul)
eigs, vecs = slinalg.eigsh(upper, k=n_eigs, mode="buckling")
self._matrix_logeigs = eigs
eigs_exp = np.exp(eigs)
eigs_invsqrtexp = np.exp(-0.5 * eigs)
def matmul_exp(x, out):
work = np.empty(len(eigs))
add_ADATv(vecs, x, out, diag=None, beta=0.0, work=work)
add_ADATv(vecs, x, out, diag=eigs_exp, beta=-1.0, work=work)
out += x
out *= np.exp(self.center)
def matmul_invsqrtexp(x, out):
work = np.empty(len(eigs))
add_ADATv(vecs, x, out, diag=None, beta=0.0, work=work)
add_ADATv(vecs, x, out, diag=eigs_invsqrtexp, beta=-1.0, work=work)
out += x
out *= np.exp(-0.5 * self.center)
self._matmul = matmul_exp
self._matmul_invsqrt = matmul_invsqrtexp
class QuadPotentialLowRank(object):
def __init__(self, ndim, n_approx, diag):
self._cov = None
self._iter = 0
self._ndim = ndim
self._n_approx = n_approx
self._diag = diag
self._old_covs = []
self._grad_store = []
self._sample_store = []
self.dtype = "float64"
def velocity(self, x, out=None):
if self._cov is None:
if out is None:
out = np.empty_like(x)
out[:] = x
return out
return self._cov.matmul(x, out=out)
def energy(self, x, velocity=None):
if velocity is None:
velocity = self.velocity(x)
return 0.5 * x.dot(velocity)
def random(self):
rand = np.random.randn(self._ndim)
if self._cov is None:
return rand
return self._cov.invsqrtmul(rand)
def velocity_energy(self, x, v_out):
self.velocity(x, out=v_out)
return 0.5 * np.dot(x, v_out)
def raise_ok(self, *args, **kwargs):
pass
def update(self, sample, grad, tune):
self._iter += 1
if not tune:
return
if self._iter < 50:
return
renew_iters = [120, 240, 400, 850]
if self._iter not in renew_iters:
self._grad_store.append(grad.copy())
self._sample_store.append(sample.copy())
return
n_samples = len(self._grad_store)
samples = np.array(self._sample_store)
grads = np.array(self._grad_store)
self._sample_store.clear()
self._grad_store.clear()
if self._iter <= 160:
n_approx = 4
else:
n_approx = self._n_approx
if self._cov is not None:
self._old_covs.append(self._cov)
n_svd = min(self._ndim - 5, n_samples - 5)
self._cov = Covariance(self._ndim, n_svd, n_approx, samples, grads, diag=self._diag)
|
the-stack_0_13093 | # %% Load packages
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.mcmc.constants import num_chains
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.mcmc.dataloaders import test_dataloader
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.mcmc.hmc.constants import sampler_output_run_paths
# %% Load test data and labels
_, test_labels = next(iter(test_dataloader))
test_labels = test_labels.squeeze().detach().cpu().numpy()
# %% Plot predictive posteriors
pred_colors = {'correct': '#bcbd22', 'wrong': '#d62728'}
# '#bcbd22': rio grande, similar to yellow green
# ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
patch_list = []
for key in pred_colors:
patch_list.append(mpatches.Patch(color=pred_colors[key], label=key))
legend_patches = [mpatches.Patch(color=pred_colors[key], label=key.capitalize()) for key in pred_colors]
for i in range(num_chains):
test_pred_df = pd.read_csv(
sampler_output_run_paths[i].joinpath('pred_posterior_on_test.csv'),
header=None,
names=['class0', 'class1']
)
test_pred_df['preds'] = np.loadtxt(
sampler_output_run_paths[i].joinpath('preds_via_bm.txt'), dtype=np.int, delimiter=',', skiprows=0
)
test_pred_df['labels'] = test_labels
test_pred_df.sort_values(['labels'], ascending=True, inplace=True)
test_pred_df = pd.concat([
test_pred_df.loc[test_pred_df['labels'] == 0].sort_values(['class0'], ascending=True),
test_pred_df.loc[test_pred_df['labels'] == 1].sort_values(['class1'], ascending=True)
])
test_pred_df['color'] = [
pred_colors['correct'] if cmp else pred_colors['wrong'] for cmp in test_pred_df['preds'] == test_pred_df['labels']
]
test_pred_df.to_csv(sampler_output_run_paths[i].joinpath('pred_posterior_on_test_for_fig.csv'))
test_pred_label_counts = test_pred_df['labels'].value_counts()
test_pred_label_cumsum = [
test_pred_label_counts.loc[0],
test_pred_label_counts.loc[0] + test_pred_label_counts.loc[1]
]
plt.figure(figsize=[8, 4])
plt.ylim([0, 1])
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['axes.titlesize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 12
plt.vlines(
x=range(len(test_labels)),
ymin=0,
ymax=pd.concat([
test_pred_df['class0'][:test_pred_label_cumsum[0]],
test_pred_df['class1'][test_pred_label_cumsum[0]:]
]),
color=test_pred_df['color'],
linewidth=2
)
#plt.bar(
# range(len(test_labels)),
# pd.concat([
# test_pred_df['class0'][:test_pred_label_cumsum[0]],
# test_pred_df['class1'][test_pred_label_cumsum[0]:]
# ]),
# width=0.7,
# color=test_pred_df['color'],
# align='edge'
#)
plt.legend(handles=legend_patches, loc='upper left', ncol=1)
plt.axhline(y=0.5, xmin=0, xmax=len(test_labels), color='black', linestyle='dashed', linewidth=1.5)
plt.axvline(x=0.5*len(test_labels), ymin=0, ymax=1, color='black', linestyle='dotted', linewidth=1.5)
plt.savefig(
sampler_output_run_paths[i].joinpath('pred_posterior_on_test.png'),
pil_kwargs={'quality': 100},
transparent=True,
bbox_inches='tight',
pad_inches=0.1
)
plt.close()
|
the-stack_0_13094 | from overwatch import app
import xmltodict
import asyncio
import aiohttp
loop = asyncio.get_event_loop()
semaphore = asyncio.Semaphore(5)
def fetch_urls(urls, parser):
async def fetch(url):
with (await semaphore):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.read()
await asyncio.sleep(1)
if app.debug:
print('Fetch: {url} <Status {status}>'.format(
url=url, status=response.status))
return parser(content)
urls_to_fetch = [fetch(url) for url in urls]
parsed_urls = loop.run_until_complete(asyncio.gather(*urls_to_fetch))
return parsed_urls
def parse_xml(url, params=[]):
if not params:
urls = [url]
else:
urls = [url.format(**a) for a in params]
return fetch_urls(urls, xmltodict.parse)
|
the-stack_0_13095 | # -*- coding: utf-8 -*-
import json
from odoo import api, models, _
from odoo.tools import float_round
class ReportBomStructure(models.AbstractModel):
_name = 'report.mrp.report_bom_structure'
_description = 'BOM Structure Report'
@api.model
def _get_report_values(self, docids, data=None):
docs = []
for bom_id in docids:
bom = self.env['mrp.bom'].browse(bom_id)
candidates = bom.product_id or bom.product_tmpl_id.product_variant_ids
for product_variant_id in candidates:
if data and data.get('childs'):
doc = self._get_pdf_line(bom_id, product_id=product_variant_id, qty=float(data.get('quantity')), child_bom_ids=json.loads(data.get('childs')))
else:
doc = self._get_pdf_line(bom_id, product_id=product_variant_id, unfolded=True)
doc['report_type'] = 'pdf'
doc['report_structure'] = data and data.get('report_type') or 'all'
docs.append(doc)
if not candidates:
if data and data.get('childs'):
doc = self._get_pdf_line(bom_id, qty=float(data.get('quantity')), child_bom_ids=json.loads(data.get('childs')))
else:
doc = self._get_pdf_line(bom_id, unfolded=True)
doc['report_type'] = 'pdf'
doc['report_structure'] = data and data.get('report_type') or 'all'
docs.append(doc)
return {
'doc_ids': docids,
'doc_model': 'mrp.bom',
'docs': docs,
}
@api.model
def get_html(self, bom_id=False, searchQty=1, searchVariant=False):
res = self._get_report_data(bom_id=bom_id, searchQty=searchQty, searchVariant=searchVariant)
res['lines']['report_type'] = 'html'
res['lines']['report_structure'] = 'all'
res['lines']['has_attachments'] = res['lines']['attachments'] or any(component['attachments'] for component in res['lines']['components'])
res['lines'] = self.env.ref('mrp.report_mrp_bom').render({'data': res['lines']})
return res
@api.model
def get_bom(self, bom_id=False, product_id=False, line_qty=False, line_id=False, level=False):
lines = self._get_bom(bom_id=bom_id, product_id=product_id, line_qty=line_qty, line_id=line_id, level=level)
return self.env.ref('mrp.report_mrp_bom_line').render({'data': lines})
@api.model
def get_operations(self, bom_id=False, qty=0, level=0):
bom = self.env['mrp.bom'].browse(bom_id)
lines = self._get_operation_line(bom.routing_id, float_round(qty / bom.product_qty, precision_rounding=1, rounding_method='UP'), level)
values = {
'bom_id': bom_id,
'currency': self.env.user.company_id.currency_id,
'operations': lines,
}
return self.env.ref('mrp.report_mrp_operation_line').render({'data': values})
def _get_bom_reference(self, bom):
return bom.display_name
@api.model
def _get_report_data(self, bom_id, searchQty=0, searchVariant=False):
lines = {}
bom = self.env['mrp.bom'].browse(bom_id)
bom_quantity = searchQty or bom.product_qty
bom_product_variants = {}
bom_uom_name = ''
if bom:
bom_uom_name = bom.product_uom_id.name
# Get variants used for search
if not bom.product_id:
for variant in bom.product_tmpl_id.product_variant_ids:
bom_product_variants[variant.id] = variant.display_name
lines = self._get_bom(bom_id, product_id=searchVariant, line_qty=bom_quantity, level=1)
return {
'lines': lines,
'variants': bom_product_variants,
'bom_uom_name': bom_uom_name,
'bom_qty': bom_quantity,
'is_variant_applied': self.env.user.user_has_groups('product.group_product_variant') and len(bom_product_variants) > 1,
'is_uom_applied': self.env.user.user_has_groups('uom.group_uom')
}
def _get_bom(self, bom_id=False, product_id=False, line_qty=False, line_id=False, level=False):
bom = self.env['mrp.bom'].browse(bom_id)
bom_quantity = line_qty
if line_id:
current_line = self.env['mrp.bom.line'].browse(int(line_id))
bom_quantity = current_line.product_uom_id._compute_quantity(line_qty, bom.product_uom_id)
# Display bom components for current selected product variant
if product_id:
product = self.env['product.product'].browse(int(product_id))
else:
product = bom.product_id or bom.product_tmpl_id.product_variant_id
if product:
attachments = self.env['mrp.document'].search(['|', '&', ('res_model', '=', 'product.product'),
('res_id', '=', product.id), '&', ('res_model', '=', 'product.template'), ('res_id', '=', product.product_tmpl_id.id)])
else:
product = bom.product_tmpl_id
attachments = self.env['mrp.document'].search([('res_model', '=', 'product.template'), ('res_id', '=', product.id)])
operations = self._get_operation_line(bom.routing_id, float_round(bom_quantity / bom.product_qty, precision_rounding=1, rounding_method='UP'), 0)
lines = {
'bom': bom,
'bom_qty': bom_quantity,
'bom_prod_name': product.display_name,
'currency': self.env.user.company_id.currency_id,
'product': product,
'code': bom and self._get_bom_reference(bom) or '',
'price': product.uom_id._compute_price(product.standard_price, bom.product_uom_id) * bom_quantity,
'total': sum([op['total'] for op in operations]),
'level': level or 0,
'operations': operations,
'operations_cost': sum([op['total'] for op in operations]),
'attachments': attachments,
'operations_time': sum([op['duration_expected'] for op in operations])
}
components, total = self._get_bom_lines(bom, bom_quantity, product, line_id, level)
lines['components'] = components
lines['total'] += total
return lines
def _get_bom_lines(self, bom, bom_quantity, product, line_id, level):
components = []
total = 0
for line in bom.bom_line_ids:
line_quantity = (bom_quantity / (bom.product_qty or 1.0)) * line.product_qty
if line._skip_bom_line(product):
continue
price = line.product_id.uom_id._compute_price(line.product_id.standard_price, line.product_uom_id) * line_quantity
if line.child_bom_id:
factor = line.product_uom_id._compute_quantity(line_quantity, line.child_bom_id.product_uom_id) / line.child_bom_id.product_qty
sub_total = self._get_price(line.child_bom_id, factor, line.product_id)
else:
sub_total = price
sub_total = self.env.user.company_id.currency_id.round(sub_total)
components.append({
'prod_id': line.product_id.id,
'prod_name': line.product_id.display_name,
'code': line.child_bom_id and self._get_bom_reference(line.child_bom_id) or '',
'prod_qty': line_quantity,
'prod_uom': line.product_uom_id.name,
'prod_cost': self.env.user.company_id.currency_id.round(price),
'parent_id': bom.id,
'line_id': line.id,
'level': level or 0,
'total': sub_total,
'child_bom': line.child_bom_id.id,
'phantom_bom': line.child_bom_id and line.child_bom_id.type == 'phantom' or False,
'attachments': self.env['mrp.document'].search(['|', '&',
('res_model', '=', 'product.product'), ('res_id', '=', line.product_id.id), '&', ('res_model', '=', 'product.template'), ('res_id', '=', line.product_id.product_tmpl_id.id)]),
})
total += sub_total
return components, total
def _get_operation_line(self, routing, qty, level):
operations = []
total = 0.0
for operation in routing.operation_ids:
operation_cycle = float_round(qty / operation.workcenter_id.capacity, precision_rounding=1, rounding_method='UP')
duration_expected = operation_cycle * operation.time_cycle + operation.workcenter_id.time_stop + operation.workcenter_id.time_start
total = ((duration_expected / 60.0) * operation.workcenter_id.costs_hour)
operations.append({
'level': level or 0,
'operation': operation,
'name': operation.name + ' - ' + operation.workcenter_id.name,
'duration_expected': duration_expected,
'total': self.env.user.company_id.currency_id.round(total),
})
return operations
def _get_price(self, bom, factor, product):
price = 0
if bom.routing_id:
# routing are defined on a BoM and don't have a concept of quantity.
# It means that the operation time are defined for the quantity on
# the BoM (the user produces a batch of products). E.g the user
# product a batch of 10 units with a 5 minutes operation, the time
# will be the 5 for a quantity between 1-10, then doubled for
# 11-20,...
operation_cycle = float_round(factor, precision_rounding=1, rounding_method='UP')
operations = self._get_operation_line(bom.routing_id, operation_cycle, 0)
price += sum([op['total'] for op in operations])
for line in bom.bom_line_ids:
if line._skip_bom_line(product):
continue
if line.child_bom_id:
qty = line.product_uom_id._compute_quantity(line.product_qty * factor, line.child_bom_id.product_uom_id) / line.child_bom_id.product_qty
sub_price = self._get_price(line.child_bom_id, qty, line.product_id)
price += sub_price
else:
prod_qty = line.product_qty * factor
not_rounded_price = line.product_id.uom_id._compute_price(line.product_id.standard_price, line.product_uom_id) * prod_qty
price += self.env.user.company_id.currency_id.round(not_rounded_price)
return price
def _get_pdf_line(self, bom_id, product_id=False, qty=1, child_bom_ids=[], unfolded=False):
data = self._get_bom(bom_id=bom_id, product_id=product_id.id, line_qty=qty)
def get_sub_lines(bom, product_id, line_qty, line_id, level):
data = self._get_bom(bom_id=bom.id, product_id=product_id.id, line_qty=line_qty, line_id=line_id, level=level)
bom_lines = data['components']
lines = []
for bom_line in bom_lines:
lines.append({
'name': bom_line['prod_name'],
'type': 'bom',
'quantity': bom_line['prod_qty'],
'uom': bom_line['prod_uom'],
'prod_cost': bom_line['prod_cost'],
'bom_cost': bom_line['total'],
'level': bom_line['level'],
'code': bom_line['code']
})
if bom_line['child_bom'] and (unfolded or bom_line['child_bom'] in child_bom_ids):
line = self.env['mrp.bom.line'].browse(bom_line['line_id'])
lines += (get_sub_lines(line.child_bom_id, line.product_id, bom_line['prod_qty'], line, level + 1))
if data['operations']:
lines.append({
'name': _('Operations'),
'type': 'operation',
'quantity': data['operations_time'],
'uom': _('minutes'),
'bom_cost': data['operations_cost'],
'level': level,
})
for operation in data['operations']:
if unfolded or 'operation-' + str(bom.id) in child_bom_ids:
lines.append({
'name': operation['name'],
'type': 'operation',
'quantity': operation['duration_expected'],
'uom': _('minutes'),
'bom_cost': operation['total'],
'level': level + 1,
})
return lines
bom = self.env['mrp.bom'].browse(bom_id)
product = product_id or bom.product_id or bom.product_tmpl_id.product_variant_id
pdf_lines = get_sub_lines(bom, product, qty, False, 1)
data['components'] = []
data['lines'] = pdf_lines
return data
|
the-stack_0_13096 | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import time
import math
import json
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from torch.nn.utils import clip_grad_norm_
from torch.utils.data.distributed import DistributedSampler
from pysot.utils.lr_scheduler import build_lr_scheduler
from pysot.utils.log_helper import init_log, print_speed, add_file_handler
from pysot.utils.distributed import dist_init, DistModule, reduce_gradients,\
average_reduce, get_rank, get_world_size
from pysot.utils.model_load import load_pretrain, restore_from
from pysot.utils.average_meter import AverageMeter
from pysot.utils.misc import describe, commit
from pysot.models.model_builder import ModelBuilder
from pysot.datasets.dataset import TrkDataset
from pysot.datasets.dataflow import get_train_dataflow
from pysot.config import cfg
logger = logging.getLogger('global')
parser = argparse.ArgumentParser(description='siamrpn tracking')
parser.add_argument('--cfg', type=str, default='config.yaml',
help='configuration of tracking')
parser.add_argument('--seed', type=int, default=123456,
help='random seed')
parser.add_argument('--local_rank', type=int, default=0,
help='compulsory for pytorch launcer')
args = parser.parse_args()
torch.autograd.set_detect_anomaly(True)
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def build_data_loader():
logger.info("build train dataset")
# train_dataset
train_dataset = get_train_dataflow() #TrkDataset()
logger.info("build dataset done")
# let tensorpack handle all the distributed data loading
train_loader = DataLoader(train_dataset,
batch_size=None,
batch_sampler=None,
sampler=None)
# train_sampler = None
# if get_world_size() > 1:
# train_sampler = DistributedSampler(train_dataset)
# train_loader = DataLoader(train_dataset,
# batch_size=cfg.TRAIN.BATCH_SIZE,
# num_workers=cfg.TRAIN.NUM_WORKERS,
# pin_memory=True,
# sampler=train_sampler)
return train_loader
def build_opt_lr(model, current_epoch=0):
if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
for layer in cfg.BACKBONE.TRAIN_LAYERS:
for param in getattr(model.backbone, layer).parameters():
param.requires_grad = True
for m in getattr(model.backbone, layer).modules():
if isinstance(m, nn.BatchNorm2d):
m.train()
else:
for param in model.backbone.parameters():
param.requires_grad = False
for m in model.backbone.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
trainable_params = []
trainable_params += [{'params': filter(lambda x: x.requires_grad,
model.backbone.parameters()),
'lr': cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR}]
if cfg.ADJUST.ADJUST:
trainable_params += [{'params': model.neck.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
trainable_params += [{'params': model.rpn_head.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
optimizer = torch.optim.SGD(trainable_params,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
lr_scheduler.step(cfg.TRAIN.START_EPOCH)
return optimizer, lr_scheduler
def log_grads(model, tb_writer, tb_index):
def weights_grads(model):
grad = {}
weights = {}
for name, param in model.named_parameters():
if param.grad is not None:
grad[name] = param.grad
weights[name] = param.data
return grad, weights
grad, weights = weights_grads(model)
feature_norm, rpn_norm = 0, 0
for k, g in grad.items():
_norm = g.data.norm(2)
weight = weights[k]
w_norm = weight.norm(2)
if 'feature' in k:
feature_norm += _norm ** 2
else:
rpn_norm += _norm ** 2
tb_writer.add_scalar('grad_all/'+k.replace('.', '/'),
_norm, tb_index)
tb_writer.add_scalar('weight_all/'+k.replace('.', '/'),
w_norm, tb_index)
tb_writer.add_scalar('w-g/'+k.replace('.', '/'),
w_norm/(1e-20 + _norm), tb_index)
tot_norm = feature_norm + rpn_norm
tot_norm = tot_norm ** 0.5
feature_norm = feature_norm ** 0.5
rpn_norm = rpn_norm ** 0.5
tb_writer.add_scalar('grad/tot', tot_norm, tb_index)
tb_writer.add_scalar('grad/feature', feature_norm, tb_index)
tb_writer.add_scalar('grad/rpn', rpn_norm, tb_index)
def train(train_loader, model, optimizer, lr_scheduler, tb_writer):
cur_lr = lr_scheduler.get_cur_lr()
rank = get_rank()
average_meter = AverageMeter()
def is_valid_number(x):
return not(math.isnan(x) or math.isinf(x) or x > 1e4)
world_size = get_world_size()
num_per_epoch = len(train_loader.dataset) // world_size
# num_per_epoch = len(train_loader.dataset) // \
# cfg.TRAIN.EPOCH // (cfg.TRAIN.BATCH_SIZE * world_size)
start_epoch = cfg.TRAIN.START_EPOCH
epoch = start_epoch
if not os.path.exists(cfg.TRAIN.SNAPSHOT_DIR) and \
get_rank() == 0:
os.makedirs(cfg.TRAIN.SNAPSHOT_DIR)
logger.info("model\n{}".format(describe(model.module)))
end = time.time()
for idx, data in enumerate(train_loader):
if epoch != idx // num_per_epoch + start_epoch:
epoch = idx // num_per_epoch + start_epoch
if get_rank() == 0:
torch.save(
{'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict()},
cfg.TRAIN.SNAPSHOT_DIR+'/checkpoint_e%d.pth' % (epoch))
if epoch == cfg.TRAIN.EPOCH:
return
if cfg.BACKBONE.TRAIN_EPOCH == epoch:
logger.info('start training backbone.')
optimizer, lr_scheduler = build_opt_lr(model.module, epoch)
logger.info("model\n{}".format(describe(model.module)))
lr_scheduler.step(epoch)
cur_lr = lr_scheduler.get_cur_lr()
logger.info('epoch: {}'.format(epoch+1))
tb_idx = idx
if idx % num_per_epoch == 0 and idx != 0:
for idx, pg in enumerate(optimizer.param_groups):
logger.info('epoch {} lr {}'.format(epoch+1, pg['lr']))
if rank == 0:
tb_writer.add_scalar('lr/group{}'.format(idx+1),
pg['lr'], tb_idx)
data_time = average_reduce(time.time() - end)
if rank == 0:
tb_writer.add_scalar('time/data', data_time, tb_idx)
outputs = model(data)
loss = outputs['total_loss']
if is_valid_number(loss.data.item()):
optimizer.zero_grad()
loss.backward()
reduce_gradients(model)
if rank == 0 and cfg.TRAIN.LOG_GRADS:
log_grads(model.module, tb_writer, tb_idx)
# clip gradient
clip_grad_norm_(model.parameters(), cfg.TRAIN.GRAD_CLIP)
optimizer.step()
batch_time = time.time() - end
batch_info = {}
batch_info['batch_time'] = average_reduce(batch_time)
batch_info['data_time'] = average_reduce(data_time)
for k, v in sorted(outputs.items()):
batch_info[k] = average_reduce(v.data.item())
average_meter.update(**batch_info)
if rank == 0:
for k, v in batch_info.items():
tb_writer.add_scalar(k, v, tb_idx)
if (idx+1) % cfg.TRAIN.PRINT_FREQ == 0:
info = "Epoch: [{}][{}/{}] lr: {:.6f}\n".format(
epoch+1, (idx+1) % num_per_epoch,
num_per_epoch, cur_lr)
for cc, (k, v) in enumerate(batch_info.items()):
if cc % 2 == 0:
info += ("\t{:s}\t").format(
getattr(average_meter, k))
else:
info += ("{:s}\n").format(
getattr(average_meter, k))
logger.info(info)
print_speed(idx+1+start_epoch*num_per_epoch,
average_meter.batch_time.avg,
cfg.TRAIN.EPOCH * num_per_epoch)
end = time.time()
def main():
rank, world_size = dist_init()
logger.info("init done")
# load cfg
cfg.merge_from_file(args.cfg)
if rank == 0:
if not os.path.exists(cfg.TRAIN.LOG_DIR):
os.makedirs(cfg.TRAIN.LOG_DIR)
init_log('global', logging.INFO)
if cfg.TRAIN.LOG_DIR:
add_file_handler('global',
os.path.join(cfg.TRAIN.LOG_DIR, 'logs.txt'),
logging.INFO)
logger.info("Version Information: \n{}\n".format(commit()))
logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
# create model
model = ModelBuilder().cuda().train()
dist_model = DistModule(model)
# load pretrained backbone weights
if cfg.BACKBONE.PRETRAINED:
cur_path = os.path.dirname(os.path.realpath(__file__))
backbone_path = os.path.join(cur_path, '../', cfg.BACKBONE.PRETRAINED)
load_pretrain(model.backbone, backbone_path)
# create tensorboard writer
if rank == 0 and cfg.TRAIN.LOG_DIR:
tb_writer = SummaryWriter(cfg.TRAIN.LOG_DIR)
else:
tb_writer = None
# build dataset loader
train_loader = build_data_loader()
# build optimizer and lr_scheduler
optimizer, lr_scheduler = build_opt_lr(dist_model.module,
cfg.TRAIN.START_EPOCH)
# resume training
if cfg.TRAIN.RESUME:
logger.info("resume from {}".format(cfg.TRAIN.RESUME))
assert os.path.isfile(cfg.TRAIN.RESUME), \
'{} is not a valid file.'.format(cfg.TRAIN.RESUME)
model, optimizer, cfg.TRAIN.START_EPOCH = \
restore_from(model, optimizer, cfg.TRAIN.RESUME)
# load pretrain
elif cfg.TRAIN.PRETRAINED:
load_pretrain(model, cfg.TRAIN.PRETRAINED)
dist_model = DistModule(model)
logger.info(lr_scheduler)
logger.info("model prepare done")
# start training
train(train_loader, dist_model, optimizer, lr_scheduler, tb_writer)
if __name__ == '__main__':
seed_torch(args.seed)
main()
|
the-stack_0_13097 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import torch
import unittest
from monai.losses import FocalLoss
class TestFocalLoss(unittest.TestCase):
def test_consistency_with_cross_entropy_2d(self):
# For gamma=0 the focal loss reduces to the cross entropy loss
focal_loss = FocalLoss(gamma=0.0, reduction="mean")
ce = nn.CrossEntropyLoss(reduction="mean")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random tensor of shape (batch_size, class_num, 8, 4)
x = torch.rand(batch_size, class_num, 8, 4, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=class_num, size=(batch_size, 8, 4))
l = l.long()
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss.forward(x, l)
output1 = ce.forward(x, l)
a = float(output0.cpu().detach())
b = float(output1.cpu().detach())
if abs(a - b) > max_error:
max_error = abs(a - b)
self.assertAlmostEqual(max_error, 0.0, places=3)
def test_consistency_with_cross_entropy_classification(self):
# for gamma=0 the focal loss reduces to the cross entropy loss
focal_loss = FocalLoss(gamma=0.0, reduction="mean")
ce = nn.CrossEntropyLoss(reduction="mean")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random scores tensor of shape (batch_size, class_num)
x = torch.rand(batch_size, class_num, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=class_num, size=(batch_size,))
l = l.long()
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss.forward(x, l)
output1 = ce.forward(x, l)
a = float(output0.cpu().detach())
b = float(output1.cpu().detach())
if abs(a - b) > max_error:
max_error = abs(a - b)
self.assertAlmostEqual(max_error, 0.0, places=3)
def test_bin_seg_2d(self):
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute(0, 3, 1, 2).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
# Same test, but for target with a class dimension
target = target.unsqueeze(1) # shape (1, 1, H, W)
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_empty_class_2d(self):
num_classes = 2
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_multi_class_seg_2d(self):
num_classes = 6 # labels 0 to 5
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_bin_seg_3d(self):
# define 2d examples
target = torch.tensor(
[
# raw 0
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 1
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 2
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
]
)
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W, D)
pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute(0, 4, 1, 2, 3).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_convergence(self):
"""
The goal of this test is to assess if the gradient of the loss function
is correct by testing if we can train a one layer neural network
to segment one image.
We verify that the loss is decreasing in almost all SGD steps.
"""
learning_rate = 0.001
max_iter = 20
# define a simple 3d example
target_seg = torch.tensor(
[
# raw 0
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 1
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 2
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
]
)
target_seg = torch.unsqueeze(target_seg, dim=0)
image = 12 * target_seg + 27
image = image.float()
num_classes = 2
num_voxels = 3 * 4 * 4
# define a one layer model
class OnelayerNet(nn.Module):
def __init__(self):
super(OnelayerNet, self).__init__()
self.layer = nn.Linear(num_voxels, num_voxels * num_classes)
def forward(self, x):
x = x.view(-1, num_voxels)
x = self.layer(x)
x = x.view(-1, num_classes, 3, 4, 4)
return x
# initialise the network
net = OnelayerNet()
# initialize the loss
loss = FocalLoss()
# initialize an SGD
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
loss_history = []
# train the network
for _ in range(max_iter):
# set the gradient to zero
optimizer.zero_grad()
# forward pass
output = net(image)
loss_val = loss(output, target_seg)
# backward pass
loss_val.backward()
optimizer.step()
# stats
loss_history.append(loss_val.item())
# count the number of SGD steps in which the loss decreases
num_decreasing_steps = 0
for i in range(len(loss_history) - 1):
if loss_history[i] > loss_history[i + 1]:
num_decreasing_steps += 1
decreasing_steps_ratio = float(num_decreasing_steps) / (len(loss_history) - 1)
# verify that the loss is decreasing for sufficiently many SGD steps
self.assertTrue(decreasing_steps_ratio > 0.9)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.