ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a56039379a4a408313ddf7787cee268d2df4898
|
from omnibelt import agnosticmethod
class Extractor:
@agnosticmethod
def extract(self, observation):
raise NotImplementedError
class Encoder(Extractor):
@agnosticmethod
def extract(self, observation):
return self.encode(observation)
@agnosticmethod
def encode(self, observation):
raise NotImplementedError
class Decoder:
@agnosticmethod
def decode(self, latent):
raise NotImplementedError
class Generator:
@agnosticmethod
def sample(self, *shape, gen=None):
raise NotImplementedError
class Discriminator:
@agnosticmethod
def judge(self, observation):
raise NotImplementedError
class Augmentation:
@agnosticmethod
def augment(self, observation):
raise NotImplementedError
class Criterion:
@agnosticmethod
def compare(self, observation1, observation2):
raise NotImplementedError
class Metric(Criterion): # obeys triangle inequality
@agnosticmethod
def distance(self, observation1, observation2):
raise NotImplementedError
@agnosticmethod
def compare(self, observation1, observation2):
return self.distance(observation1, observation2)
class PathCriterion(Criterion):
@agnosticmethod
def compare(self, observation1, observation2):
return self.compare_path(observation1, observation2)
@agnosticmethod
def compare_path(self, path1, path2):
raise NotImplementedError
class Interpolator: # returns N steps to get from start to finish ("evenly spaces", by default)
@staticmethod
def interpolate(start, end, N):
raise NotImplementedError
class Estimator:
@agnosticmethod
def predict(self, observation):
raise NotImplementedError
class Invertible:
@agnosticmethod
def forward(self, observation):
raise NotImplementedError
@agnosticmethod
def inverse(self, observation):
raise NotImplementedError
class Compressor:
@staticmethod
def compress(observation):
raise NotImplementedError
@staticmethod
def decompress(data):
raise NotImplementedError
class Quantizer:
@staticmethod
def quantize(observation): # generally "removes" noise
raise NotImplementedError
@staticmethod
def dequantize(observation): # generally adds noise
raise NotImplementedError
|
py
|
1a5603c77720c05b637daf464f4ff0a1186d8d22
|
# Generated by Django 3.1 on 2020-11-06 08:44
from collections import defaultdict
from django.db import migrations
LEVELS_MAPPING = {"one": 1, "two": 2, "three": 3}
TAG_MAPPING = {
"BOLD": "b",
"ITALIC": "i",
"STRIKETHROUGH": "s",
"CODE": "code",
}
def parse_to_editorjs(data):
blocks = data.get("blocks")
entity_map = data.get("entityMap")
if not blocks:
return data
editor_js_blocks = []
list_data = {}
for block in blocks:
# if block doesn't have key it means it isn't in draft.js format
if "key" not in block:
return data
key = block["type"]
inline_style_ranges = block["inlineStyleRanges"]
entity_ranges = block["entityRanges"]
text = block["text"]
text = parse_text(text, inline_style_ranges, entity_ranges, entity_map)
type, data = get_block_data(text, key, list_data, editor_js_blocks)
if not type:
continue
new_block = {
"type": type,
"data": data,
}
editor_js_blocks.append(new_block)
return {"blocks": editor_js_blocks}
def parse_text(text, style_ranges, entity_ranges, entity_map):
operations = defaultdict(list)
prepare_operations(operations, style_ranges, entity_map, False)
prepare_operations(operations, entity_ranges, entity_map, True)
parsed_text = ""
previous_index = 0
# insert html element on specified indexes
for offset, tags in operations.items():
end_index = offset + 1
parsed_text += text[previous_index:end_index]
parsed_text += "".join(tags)
previous_index = offset + 1
parsed_text += text[previous_index:]
return parsed_text
def prepare_operations(operations, ranges, entity_map, entity):
"""Prepare operations dict defining operations on specific indexes.
Data format:
- key: index value
- value: list of html elements that should be insert into text on specific index
"""
for range_date in ranges:
tag = "a" if entity else TAG_MAPPING[range_date["style"]]
offset = range_date["offset"]
length = offset + range_date["length"] - 1
if entity:
entity_key = str(range_date["key"])
href = entity_map[entity_key]["data"]["url"]
start_tag = f'{tag} href="{href}"'
else:
start_tag = tag if tag != "code" else tag + ' class="inline-code"'
operations[offset - 1].append(f"<{start_tag}>")
operations[length] = [f"</{tag}>"] + operations[length]
def get_block_data(text, key, list_data, editor_js_blocks):
"""Prepare editorjs blocks based on draftjs blocks.
Draftjs types are replaces with corresponding editorjs types.
List must be handled specially. In draftjs every list item is in separate block,
but in editorjs all list items are in a list in one block.
"""
# if the list_data is not empty and list elements ended, append list block
if list_data and "list-item" not in key:
list_block = {"type": "list", "data": list_data}
editor_js_blocks.append(list_block)
list_data = {}
if "list-item" in key:
style = key.split("-")[0]
# if the list data is not empty and list style is the same as current block,
# just append list element to the list data
if list_data and list_data["style"] == style:
list_data["items"].append(text)
else:
# if the list data is not empty it means that list style has been changed,
# in this situation create new block from existing list data and
# override the list data with the new data
if list_data:
list_block = {"type": "list", "data": list_data}
editor_js_blocks.append(list_block)
list_data = {"style": style, "items": [text]}
return None, None
data = {"text": text}
if key.startswith("header"):
level = LEVELS_MAPPING[key.split("-")[1]]
type = "header"
data["level"] = level
elif key == "blockquote":
type = "quote"
data["alignment"] = "left"
elif key == "code-block":
type = "code"
else:
type = "paragraph"
return type, data
def migrate_draftjs_to_editorjs_format(apps, schema_editor):
Page = apps.get_model("page", "Page")
PageTranslation = apps.get_model("page", "PageTranslation")
for model in [Page, PageTranslation]:
migrate_model_field_data(model)
def migrate_model_field_data(Model):
queryset = Model.objects.all().order_by("pk")
for batch_pks in queryset_in_batches(queryset):
instances = []
batch = Model.objects.filter(pk__in=batch_pks)
for instance in batch:
if instance.content_json:
instance.content_json = parse_to_editorjs(instance.content_json)
instances.append(instance)
Model.objects.bulk_update(instances, ["content_json"])
def queryset_in_batches(queryset):
"""Slice a queryset into batches.
Input queryset should be sorted be pk.
"""
start_pk = 0
while True:
qs = queryset.filter(pk__gt=start_pk)[:2000]
pks = list(qs.values_list("pk", flat=True))
if not pks:
break
yield pks
start_pk = pks[-1]
class Migration(migrations.Migration):
dependencies = [
("page", "0014_add_metadata"),
]
operations = [
migrations.RunPython(
migrate_draftjs_to_editorjs_format, migrations.RunPython.noop
)
]
|
py
|
1a56058ee7acc1896cb631819f49359b20c3ca6a
|
# :coding: utf-8
import re
import functools
from .helper import collapse_all
from .helper import get_docstring
#: Regular Expression pattern for data
_DATA_PATTERN = re.compile(
r"(?P<start_regex>(\n|^)) *(?P<export>export +)?(?P<default>default +)?"
r"(?P<type>(const|let|var)) (?P<name>[\w._-]+) *= *(?P<value>.+?;)",
re.DOTALL
)
def fetch_environment(content, module_id):
"""Return data environment dictionary from *content*.
*module_id* represent the identifier of the module.
The environment is in the form of::
{
"moduleName.DATA": {
"id": "moduleName.DATA",
"module_id": "moduleName",
"exported": False,
"default": False,
"name": "DATA",
"value": "42",
"type": "const",
"line_number": 2,
"description": "Variable doc.\\n\\nDetailed description."
}
}
"""
environment = {}
lines = content.split("\n")
# The comment filter is made during the collapse content process to
# preserve the entire value (with semi-colons and docstrings!)
content, collapsed_content = collapse_all(content, filter_comment=True)
for match in _DATA_PATTERN.finditer(content):
data_id = ".".join([module_id, match.group("name")])
line_number = (
content[:match.start()].count("\n") +
match.group("start_regex").count("\n") + 1
)
value = match.group("value")
if "{}" in value and line_number in collapsed_content.keys():
value = value.replace("{}", collapsed_content[line_number])
# Do not keep semi-colon in value
if value.endswith(";"):
value = value[:-1]
data_environment = {
"id": data_id,
"module_id": module_id,
"exported": match.group("export") is not None,
"default": match.group("default") is not None,
"name": match.group("name"),
"value": functools.reduce(_clean_value, value.split('\n')).strip(),
"type": match.group("type"),
"line_number": line_number,
"description": get_docstring(line_number, lines)
}
environment[data_id] = data_environment
return environment
def _clean_value(line1, line2):
"""Clean up variable value for display."""
_line1 = line1.strip()
_line2 = line2.strip()
# Let trailing space to make the code easier to read
if _line1[-1:] in ["{", "}", "(", ")", "[", "]", ";", ","]:
_line1 += " "
return _line1 + _line2
|
py
|
1a5606bb05118a3ff40d2e3f0c2fb107f55d590f
|
# General-purpose Python library imports
import sys
import traceback
# AppScale library imports
from .. import version_helper
from ..appscale_tools import AppScaleTools
from ..local_state import LocalState
from ..parse_args import ParseArgs
version_helper.ensure_valid_python_is_used()
def main():
""" Execute appscale-remove-app script. """
options = ParseArgs(sys.argv[1:], "appscale-remove-app").args
try:
AppScaleTools.remove_app(options)
sys.exit(0)
except Exception, e:
LocalState.generate_crash_log(e, traceback.format_exc())
sys.exit(1)
|
py
|
1a560a96d93603024cededc8a9a5e661c554b7fb
|
#! /usr/bin/env python
################################################################################
#
# DemoFusion.py
#
"""Fusion Demo
The Fusion Demo demonstrates a command-line interface to the Fusion Reactor
application.
Author: Robin D. Knight
Email: [email protected]
URL: http://www.roadnarrowsrobotics.com
Date: 2005.01.05
Copyright (C) 2006. RoadNarrows LLC.
"""
#
# All Rights Reserved
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that
# (1) The above copyright notice and the following two paragraphs
# appear in all copies of the source code and (2) redistributions
# including binaries reproduces these notices in the supporting
# documentation. Substantial modifications to this software may be
# copyrighted by their authors and need not follow the licensing terms
# described here, provided that the new terms are clearly indicated in
# all files where they apply.
#
# IN NO EVENT SHALL THE AUTHOR, ROADNARROWS LLC, OR ANY MEMBERS/EMPLOYEES
# OF ROADNARROW LLC OR DISTRIBUTORS OF THIS SOFTWARE BE LIABLE TO ANY
# PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
# DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE AUTHORS OR ANY OF THE ABOVE PARTIES HAVE BEEN ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHOR AND ROADNARROWS LLC SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN
# "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
################################################################################
import sys
import getopt
import Fusion.Core.Reactor as Reactor
_Argv0 = ''
#--
class Usage(Exception):
""" Command-Line Options Usage Exception Class. """
def __init__(self, msg):
self.msg = msg
#--
def PrintUsageErr(emsg):
""" Print Error Usage Message. """
if emsg:
print("%s: %s" % (_Argv0, emsg))
else:
print("%s: error" % (_Argv0))
print("Try '%s --help' for more information." % (_Argv0))
#--
def PrintUsage():
""" Print Fusion Command-Line Usage Message """
print("usage: %s [options]..." % (_Argv0))
print("""Options and arguments:
-i, --ini <filename> : additional ini configuration file. Default: None
--debuglevel <num> : debug level 0=off, 1 - 5. Default: 0
--debugfile <filename> : debug output filename. Default: stdout
-h, --help : Display this help and exit.
Environment variables:
FUSION : points to the Fusion package base directory
FUSIONSTARTUP : a site/user standard ini configuration file
""")
#--
def main(argv=None, **kwargs):
""" Fusion Main. """
global _Argv0
print('main')
if argv is None:
argv = sys.argv
print('args', repr(argv))
_Argv0 = argv[0]
#if 'argv0' in kwargs:
# _Argv0 = kwargs['argv0']
#else:
# _Argv0 = __file__
# Reactor defaults
kwargs = {'vRobot': None, 'vBrain': None, 'iniFileName': None,
'debuglevel': 0, 'debugfout': None}
try:
try:
opts, args = getopt.getopt(argv[1:], "?hi:",
['help', 'ini=', 'debuglevel=', 'debugfile='])
except getopt.error as msg:
raise Usage(msg)
for opt, optarg in opts:
if opt in ('-h', '--help', '-?'):
PrintUsage()
return 0
elif opt in ('-i', '--ini'):
kwargs['iniFileName'] = optarg
elif opt in ('--debuglevel'):
try:
kwargs['debuglevel'] = int(optarg)
except ValueError as msg:
raise Usage(msg)
elif opt in ('--debugfile'):
try:
fout = open(optarg, 'w')
except IOError as msg:
raise Usage(msg)
kwargs['debugfout'] = fout
except Usage as err:
PrintUsageErr(err.msg)
return 2
reactor = Reactor.Reactor(**kwargs)
reactor.mGuiRoot.mainloop()
return 0
# run fusion
if __name__ == "__main__":
sys.exit( main(argv=None) )
|
py
|
1a560b57de26a96dae9caad4ef1cc2afba1ada7d
|
result = 0
instructions = []
registers = {
"a": 0,
"b": 0
}
with open("input.txt", "r") as input:
for line in input:
line = line.strip().replace(",","").split()
opcode = line[0]
if opcode == "jmp":
offset = int(line[1])
reg = None
else:
offset = None
reg = line[1]
if opcode in ["jie", "jio"]:
offset = int(line[2])
instructions.append({
"opcode": opcode,
"reg": reg,
"offset": offset
})
i = 0
while 0 <= i < len(instructions):
instruction = instructions[i]
opcode = instruction["opcode"]
reg = instruction["reg"]
offset = instruction["offset"]
if opcode == "jmp":
i += offset
elif opcode == "jie":
if registers[reg] % 2 == 0:
i += offset
else:
i += 1
elif opcode == "jio":
if registers[reg] == 1:
i += offset
else:
i += 1
else:
i += 1
if opcode == "inc":
registers[reg] += 1
elif opcode == "hlf":
registers[reg] //= 2
elif opcode == "tpl":
registers[reg] *= 3
else:
print("ERROR")
result = registers["b"]
with open("output1.txt", "w") as output:
output.write(str(result))
print(str(result))
|
py
|
1a560bc4a36110ba6df6eb6ea341df5d5315c9cf
|
#!/usr/bin/env python
import os
import pwd
import sys
import time
from Pegasus.DAX3 import *
# The name of the DAX file is the first argument
if len(sys.argv) != 2:
sys.stderr.write("Usage: %s DAXFILE\n" % (sys.argv[0]))
sys.exit(1)
daxfile = sys.argv[1]
USER = pwd.getpwuid(os.getuid())[0]
# Create a abstract dag
dax = ADAG("split")
# Add some workflow-level metadata
dax.metadata("creator", "%s@%s" % (USER, os.uname()[1]))
dax.metadata("created", time.ctime())
webpage = File("pegasus.html")
# the split job that splits the webpage into smaller chunks
split = Job("split")
split.addArguments("-l","100","-a","1",webpage,"part.")
split.uses(webpage, link=Link.INPUT)
# associate the label with the job. all jobs with same label
# are run with PMC when doing job clustering
split.addProfile( Profile("pegasus","label","p1"))
dax.addJob(split)
# we do a parmeter sweep on the first 4 chunks created
for c in "abcd":
part = File("part.%s" % c)
split.uses(part, link=Link.OUTPUT, transfer=False, register=False)
count = File("count.txt.%s" % c)
wc = Job("wc")
wc.addProfile( Profile("pegasus","label","p1"))
wc.addArguments("-l",part)
wc.setStdout(count)
wc.uses(part, link=Link.INPUT)
wc.uses(count, link=Link.OUTPUT, transfer=True, register=True)
dax.addJob(wc)
#adding dependency
dax.depends(wc, split)
f = open(daxfile, "w")
dax.writeXML(f)
f.close()
print "Generated dax %s" %daxfile
|
py
|
1a560ccbfca4e0f822d9753fac87d4eb967dacea
|
import pytest
from celery.result import EagerResult
from index_auth_service.users.tasks import get_users_count
from index_auth_service.users.tests.factories import UserFactory
@pytest.mark.django_db
def test_user_count(settings):
"""A basic test to execute the get_users_count Celery task."""
UserFactory.create_batch(3)
settings.CELERY_TASK_ALWAYS_EAGER = True
task_result = get_users_count.delay()
assert isinstance(task_result, EagerResult)
assert task_result.result == 3
|
py
|
1a560ccd0ca4ae5ee8b96c79c0bd0e70874eb088
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Project
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.sshClient import SshClient
import datetime
class Services:
"""Test Project Services
"""
def __init__(self):
self.services = {
"domain": {
"name": "Domain",
},
"project": {
"name": "Project",
"displaytext": "Test project",
},
"mgmt_server": {
"ipaddress": '192.168.100.21',
"username": 'root',
"password": 'password',
"port": 22,
},
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"user": {
"email": "[email protected]",
"firstname": "User",
"lastname": "User",
"username": "User",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"template": {
"displaytext": "Public Template",
"name": "Public template",
"ostype": 'CentOS 5.3 (64-bit)',
"url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
"hypervisor": 'XenServer',
"format": 'VHD',
"isfeatured": True,
"ispublic": True,
"isextractable": True,
},
"configs": {
"project.invite.timeout": 300,
},
"ostype": 'CentOS 5.3 (64-bit)',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
}
class TestUserProjectCreation(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUserProjectCreation, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
configs = Configurations.list(
cls.api_client,
name='allow.user.create.projects'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: allow.user.create.projects")
elif (configs[0].value).lower() != 'true':
raise unittest.SkipTest("'allow.user.create.projects' should be true")
# Create domains, account etc.
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account, cls.user, cls.domain]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(configuration = "allow.user.create.projects")
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns", "simulator"], required_hardware="false")
def test_admin_project_creation(self):
"""Test create project as a domain admin and domain user
"""
# Validate the following
# 1. Check if 'allow.user.create.projects' configuration is true
# 2. Create a Project as domain admin
# 3. Create a Project as domain user
# 4. In both 2 and 3 project creation should be successful
configs = Configurations.list(
self.apiclient,
name='allow.user.create.projects'
)
self.assertEqual(
isinstance(configs, list),
True,
"Check for a valid list configurations response"
)
config = configs[0]
self.assertEqual(
(config.value).lower(),
'true',
"'allow.user.create.projects' should be true"
)
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.user.name,
domainid=self.user.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain user with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
return
class TestProjectCreationNegative(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestProjectCreationNegative, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
# Checking for prereqisits - global configs
configs = Configurations.list(
cls.api_client,
name='allow.user.create.projects'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: allow.user.create.projects")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'allow.user.create.projects' should be false")
# Create domains, account etc.
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account, cls.user, cls.domain]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(configuration = "allow.user.create.projects")
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_user_project_creation(self):
"""Test create project as a domain admin and domain user
"""
# Validate the following
# 1. Check if 'allow.user.create.projects' configuration is false
# 2. Create a Project as domain admin. Project creation should be
# successful.
# 3. Create a Project as domain user. Project creation should fail
configs = Configurations.list(
self.apiclient,
name='allow.user.create.projects'
)
self.assertEqual(
isinstance(configs, list),
True,
"Check for a valid list configurations response"
)
config = configs[0]
self.assertEqual(
(config.value).lower(),
'false',
"'allow.user.create.projects' should be true"
)
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
with self.assertRaises(Exception):
project = Project.create(
self.apiclient,
self.services["project"],
account=self.user.name,
domainid=self.user.domainid
)
self.debug("Project creation with domain user: %s failed" %
self.user.name)
return
class TestProjectInviteRequired(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestProjectInviteRequired, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
# Create domains, account etc.
cls.domain = get_domain(cls.api_client)
# Verify 'project.invite.required' is set to false
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("The 'project.invite.required' is not found in global configs")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be false")
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["user"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account, cls.user]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns"], required_hardware="false")
def test_add_user_to_project(self):
"""Add user to project when 'project.invite.required' is false"""
# Validate the following:
# 1. Create a Project
# 2. Add users to the project. Verify user is added to project
# as regular user
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.user.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.name,
self.user.user[0].email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.user.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
return
class TestProjectInviteRequiredTrue(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestProjectInviteRequiredTrue, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
# Create domains, account etc.
cls.domain = get_domain(cls.api_client)
# Verify 'project.invite.required' is set to true
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("The 'project.invite.required' is not found in global configs")
elif (configs[0].value).lower() != 'true':
raise unittest.SkipTest("'project.invite.required' should be true")
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["user"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account, cls.user]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(configuration = "project.invite.required")
@attr(tags=["advanced", "basic", "sg", "eip", "advancedns"], required_hardware="false")
def test_add_user_to_project(self):
"""Add user to project when 'project.invite.required' is true"""
# Validate the following:
# 1. Create a Project
# 2. Add users to the project. verify user is shown in pending state
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.user.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.name,
self.user.user[0].email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = ProjectInvitation.list(
self.apiclient,
state='Pending',
account=self.user.name,
domainid=self.user.domainid
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.state,
'Pending',
"Newly added user is not added as a regular user"
)
return
class TestProjectInviteTimeout(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestProjectInviteTimeout, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
# Create domains, account etc.
cls.domain = get_domain(cls.api_client)
# Verify 'project.invite.required' is set to true
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("The 'project.invite.required' is not found in global configs")
elif (configs[0].value).lower() != 'true':
raise unittest.SkipTest("'project.invite.required' should be true")
# Verify 'project.invite.timeout' is set to 300
configs = Configurations.list(
cls.api_client,
name='project.invite.timeout'
)
if not isinstance(configs, list):
raise unittest.SkipTest("The 'project.invite.timeout' is not found in global configs")
elif int(configs[0].value) != cls.services["configs"]["project.invite.timeout"]:
raise unittest.SkipTest("'project.invite.timeout' should be: %s " %
cls.services["configs"]["project.invite.timeout"])
cls.config = configs[0]
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["user"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account, cls.user]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(configuration = "project.invite.timeout")
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_01_invitation_timeout(self):
"""Test global config project invitation timeout"""
# Validate the following:
# 1. Set configuration to 5 mins
# 2. Create a Project
# 3. Add users to the project
# 4. As a user accept invitation within 5 mins. Verify invitation is
# accepted and user become regular user of project
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.user.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.name,
self.user.user[0].email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = ProjectInvitation.list(
self.apiclient,
state='Pending',
account=self.user.name,
domainid=self.user.domainid
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.state,
'Pending',
"Newly added user is not added as a regular user"
)
# Accept the invite
ProjectInvitation.update(
self.apiclient,
projectid=project.id,
accept=True,
account=self.user.name
)
self.debug(
"Accepting project invitation for project: %s user: %s" % (
project.name,
self.user.name
))
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.user.name,
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
return
@attr(configuration = "project.invite.timeout")
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_02_invitation_timeout_after_expiry(self):
"""Test global config project invitation timeout"""
# Validate the following:
# 1. Set configuration to 5 mins
# 2. Create a Project
# 3. Add users to the project
# 4. As a user accept invitation after 5 mins. Verify invitation is
# not accepted and is shown as expired
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.user.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.name,
self.user.email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = ProjectInvitation.list(
self.apiclient,
state='Pending',
account=self.user.name,
domainid=self.user.domainid
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.state,
'Pending',
"Newly added user is not added as a regular user"
)
# sleep for 'project.invite.timeout' * 2 interval to wait for invite
# to expire
time.sleep(int(self.config.value) * 2)
with self.assertRaises(Exception):
# Accept the invite
ProjectInvitation.update(
self.apiclient,
projectid=project.id,
accept=True,
account=self.user.name
)
self.debug(
"Accepting invitation after expiry project: %s user: %s" % (
project.name,
self.user.name
))
# listProjectAccount to verify the user is added to project or not
accounts_reponse = ProjectInvitation.list(
self.apiclient,
account=self.user.name,
domainid=self.user.domainid
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.state,
'Expired',
"Newly added user is not added as a regular user"
)
return
@attr(configuration = "project.invite.timeout")
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_03_invite_after_expiry(self):
"""Test global config project invitation timeout"""
# Validate the following:
# 1. Set configuration to 5 mins
# 2. Create a Project
# 3. Add users to the project
# 4. As a user accept invitation after 5 mins.
# 5. Resend the invitation
# 6. Verify invitation is sent again
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.user.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.name,
self.user.email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = ProjectInvitation.list(
self.apiclient,
state='Pending',
account=self.user.name,
domainid=self.user.domainid
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.state,
'Pending',
"Newly added user is not added as a regular user"
)
# sleep for 'project.invite.timeout' * 2 interval to wait for invite
# to expire
time.sleep(int(self.config.value) * 2)
self.debug("Adding %s user again to project: %s" % (
self.user.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.name,
self.user.email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = ProjectInvitation.list(
self.apiclient,
state='Pending',
account=self.user.name,
domainid=self.user.domainid
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.state,
'Pending',
"Newly added user is not added as a regular user"
)
return
@attr(configuration = "project.invite.timeout")
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_04_decline_invitation(self):
"""Test decline invitation"""
# Validate the following:
# 1. Set configuration to 5 mins
# 2. Create a Project
# 3. Add users to the project
# 4. As a user decline invitation within 5 mins.
# 5. Verify invitation is rejected and user doesn't become regular
# user.
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.user.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.name,
self.user.email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = ProjectInvitation.list(
self.apiclient,
state='Pending',
account=self.user.name,
domainid=self.user.domainid
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.state,
'Pending',
"Newly added user is not added as a regular user"
)
# Accept the invite
ProjectInvitation.update(
self.apiclient,
projectid=project.id,
accept=False,
account=self.user.name
)
self.debug(
"Declining invitation for project: %s user: %s" % (
project.name,
self.user.name
))
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.user.name,
)
self.assertEqual(
accounts_reponse,
None,
"Check for a valid list accounts response"
)
return
def test_09_invite_to_project_by_email(self):
"""Test invite user to project by email"""
# Validate the following:
# 1. Set configuration to 5 mins
# 2. Create a Project
# 3. Add users to the project
# 4. As a user decline invitation within 5 mins.
# 5. Verify invitation is rejected and user doesn't become regular
# user.
# Verify 'project.invite.required' is set to false
configs = Configurations.list(
self.apiclient,
name='project.invite.timeout'
)
self.assertEqual(
isinstance(configs, list),
True,
"Check for a valid list configurations response"
)
config = configs[0]
self.assertEqual(
int(config.value),
self.services["configs"]["project.invite.timeout"],
"'project.invite.timeout' should be %s" %
self.services["configs"]["project.invite.timeout"]
)
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding user with email: %s to project: %s" % (
self.user.email,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
email=self.user.user[0].email
)
# Fetch the latest mail sent to user
mail_content = fetch_latest_mail(
self.services["mail_account"],
from_mail=self.user.user[0].email
)
return
|
py
|
1a560d5d2a1cb518a780daec1fd9f4b99b5fd59d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from time import localtime, strftime
import argparse
from argparse import RawTextHelpFormatter
from gdcmdtools.rm import GDRm
from gdcmdtools.base import BASE_INFO
from gdcmdtools.base import DEBUG_LEVEL
from gdcmdtools.perm import help_permission_text
import csv
import pprint
__THIS_APP = 'gdrm'
__THIS_DESCRIPTION = 'Tool to remove file or folder on Google Drive'
__THIS_VERSION = BASE_INFO["version"]
import logging
logger = logging.getLogger(__THIS_APP)
def test():
assert True
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='%s v%s - %s - %s (%s)' %
(__THIS_APP,
__THIS_VERSION,
__THIS_DESCRIPTION,
BASE_INFO["app"],
BASE_INFO["description"]),
formatter_class=RawTextHelpFormatter)
arg_parser.add_argument(
'-d',
'--delete',
action='store_true',
help='Permanently deletes the file instead of trashing it')
arg_parser.add_argument(
'file_id',
help='The file id or drive link for the file you\'re going to remove')
arg_parser.add_argument('--debug',
choices=DEBUG_LEVEL,
default=DEBUG_LEVEL[-1],
help='define the debug level')
args = arg_parser.parse_args()
# set debug devel
logger.setLevel(getattr(logging, args.debug.upper()))
logger.debug(args)
rm = GDRm(args)
try:
response = rm.run()
except:
raise
logger.debug(pprint.pformat(response))
sys.exit(0)
|
py
|
1a560dc75c825556d13abe410263ede5657ca683
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import numba
import numba.core.typing
import numba.core.typing.ctypes_utils
import awkward as ak
numpy = ak.nplike.Numpy.instance()
dynamic_addrs = {}
def globalstring(context, builder, pyvalue):
import llvmlite.ir.types
if pyvalue not in dynamic_addrs:
buf = dynamic_addrs[pyvalue] = numpy.array(pyvalue.encode("utf-8") + b"\x00")
context.add_dynamic_addr(
builder, buf.ctypes.data, info="str({0})".format(repr(pyvalue))
)
ptr = context.get_constant(numba.types.uintp, dynamic_addrs[pyvalue].ctypes.data)
return builder.inttoptr(
ptr, llvmlite.llvmpy.core.Type.pointer(llvmlite.llvmpy.core.Type.int(8))
)
class ArrayBuilderType(numba.types.Type):
def __init__(self, behavior):
super(ArrayBuilderType, self).__init__(
name="ak.ArrayBuilderType({0})".format(
ak._connect._numba.repr_behavior(behavior)
)
)
self.behavior = behavior
@numba.extending.register_model(ArrayBuilderType)
class ArrayBuilderModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [("rawptr", numba.types.voidptr), ("pyptr", numba.types.pyobject)]
super(ArrayBuilderModel, self).__init__(dmm, fe_type, members)
@numba.core.imputils.lower_constant(ArrayBuilderType)
def lower_const_ArrayBuilder(context, builder, arraybuildertype, arraybuilder):
layout = arraybuilder._layout
rawptr = context.get_constant(numba.intp, arraybuilder._layout._ptr)
proxyout = context.make_helper(builder, arraybuildertype)
proxyout.rawptr = builder.inttoptr(
rawptr, context.get_value_type(numba.types.voidptr)
)
proxyout.pyptr = context.add_dynamic_addr(
builder, id(layout), info=str(type(layout))
)
return proxyout._getvalue()
@numba.extending.unbox(ArrayBuilderType)
def unbox_ArrayBuilder(arraybuildertype, arraybuilderobj, c):
inner_obj = c.pyapi.object_getattr_string(arraybuilderobj, "_layout")
rawptr_obj = c.pyapi.object_getattr_string(inner_obj, "_ptr")
proxyout = c.context.make_helper(c.builder, arraybuildertype)
proxyout.rawptr = c.pyapi.long_as_voidptr(rawptr_obj)
proxyout.pyptr = inner_obj
c.pyapi.decref(inner_obj)
c.pyapi.decref(rawptr_obj)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(proxyout._getvalue(), is_error)
@numba.extending.box(ArrayBuilderType)
def box_ArrayBuilder(arraybuildertype, arraybuilderval, c):
ArrayBuilder_obj = c.pyapi.unserialize(
c.pyapi.serialize_object(ak.highlevel.ArrayBuilder)
)
behavior_obj = c.pyapi.unserialize(
c.pyapi.serialize_object(arraybuildertype.behavior)
)
proxyin = c.context.make_helper(c.builder, arraybuildertype, arraybuilderval)
c.pyapi.incref(proxyin.pyptr)
out = c.pyapi.call_method(ArrayBuilder_obj, "_wrap", (proxyin.pyptr, behavior_obj))
c.pyapi.decref(ArrayBuilder_obj)
c.pyapi.decref(behavior_obj)
c.pyapi.decref(proxyin.pyptr)
return out
def call(context, builder, fcn, args):
numbatype = numba.core.typing.ctypes_utils.make_function_type(fcn)
fcntype = context.get_function_pointer_type(numbatype)
fcnval = context.add_dynamic_addr(
builder, numbatype.get_pointer(fcn), info=fcn.name
)
fcnptr = builder.bitcast(fcnval, fcntype)
err = context.call_function_pointer(builder, fcnptr, args)
with builder.if_then(
builder.icmp_unsigned("!=", err, context.get_constant(numba.uint8, 0)),
likely=False,
):
context.call_conv.return_user_exc(builder, ValueError, (fcn.name + " failed",))
@numba.core.typing.templates.infer_global(len)
class type_len(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], ArrayBuilderType)
):
return numba.intp(args[0])
@numba.extending.lower_builtin(len, ArrayBuilderType)
def lower_len(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
result = numba.core.cgutils.alloca_once(
builder, context.get_value_type(numba.int64)
)
call(
context, builder, ak._libawkward.ArrayBuilder_length, (proxyin.rawptr, result),
)
return ak._connect._numba.castint(
context, builder, numba.int64, numba.intp, builder.load(result)
)
@numba.core.typing.templates.infer_getattr
class type_methods(numba.core.typing.templates.AttributeTemplate):
key = ArrayBuilderType
@numba.core.typing.templates.bound_function("clear")
def resolve_clear(self, arraybuildertype, args, kwargs):
if len(args) == 0 and len(kwargs) == 0:
return numba.types.none()
else:
raise TypeError(
"wrong number of arguments for ArrayBuilder.clear"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("null")
def resolve_null(self, arraybuildertype, args, kwargs):
if len(args) == 0 and len(kwargs) == 0:
return numba.types.none()
else:
raise TypeError(
"wrong number of arguments for ArrayBuilder.null"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("boolean")
def resolve_boolean(self, arraybuildertype, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], numba.types.Boolean)
):
return numba.types.none(args[0])
else:
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.boolean"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("integer")
def resolve_integer(self, arraybuildertype, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], numba.types.Integer)
):
return numba.types.none(args[0])
else:
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.integer"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("real")
def resolve_real(self, arraybuildertype, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], (numba.types.Integer, numba.types.Float))
):
return numba.types.none(args[0])
else:
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.real"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("begin_list")
def resolve_begin_list(self, arraybuildertype, args, kwargs):
if len(args) == 0 and len(kwargs) == 0:
return numba.types.none()
else:
raise TypeError(
"wrong number of arguments for ArrayBuilder.begin_list"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("end_list")
def resolve_end_list(self, arraybuildertype, args, kwargs):
if len(args) == 0 and len(kwargs) == 0:
return numba.types.none()
else:
raise TypeError(
"wrong number of arguments for ArrayBuilder.end_list"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("begin_tuple")
def resolve_begin_tuple(self, arraybuildertype, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], numba.types.Integer)
):
return numba.types.none(args[0])
else:
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.begin_tuple"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("index")
def resolve_index(self, arraybuildertype, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], numba.types.Integer)
):
return arraybuildertype(args[0])
else:
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.index"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("end_tuple")
def resolve_end_tuple(self, arraybuildertype, args, kwargs):
if len(args) == 0 and len(kwargs) == 0:
return numba.types.none()
else:
raise TypeError(
"wrong number of arguments for ArrayBuilder.end_tuple"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("begin_record")
def resolve_begin_record(self, arraybuildertype, args, kwargs):
if len(args) == 0 and len(kwargs) == 0:
return numba.types.none()
elif (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], numba.types.StringLiteral)
):
return numba.types.none(args[0])
else:
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.begin_record"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("field")
def resolve_field(self, arraybuildertype, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], numba.types.StringLiteral)
):
return arraybuildertype(args[0])
else:
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.field"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("end_record")
def resolve_end_record(self, arraybuildertype, args, kwargs):
if len(args) == 0 and len(kwargs) == 0:
return numba.types.none()
else:
raise TypeError(
"wrong number of arguments for ArrayBuilder.end_record"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("append")
def resolve_append(self, arraybuildertype, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(
args[0],
(
ak._connect._numba.arrayview.ArrayViewType,
ak._connect._numba.arrayview.RecordViewType,
numba.types.Boolean,
numba.types.Integer,
numba.types.Float,
),
)
):
return numba.types.none(args[0])
elif (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], numba.types.Optional)
and isinstance(
args[0].type,
(numba.types.Boolean, numba.types.Integer, numba.types.Float),
)
):
return numba.types.none(args[0])
elif (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], numba.types.NoneType)
):
return numba.types.none(args[0])
elif (
len(args) == 2
and len(kwargs) == 0
and isinstance(args[0], ak._connect._numba.arrayview.ArrayViewType)
and isinstance(args[1], numba.types.Integer)
):
return numba.types.none(args[0], args[1])
else:
if len(args) == 1 and arraybuildertype.behavior is not None:
for key, lower in arraybuildertype.behavior.items():
if (
isinstance(key, tuple)
and len(key) == 3
and key[0] == "__numba_lower__"
and key[1] == ak.highlevel.ArrayBuilder.append
and (
args[0] == key[2]
or (
isinstance(key[2], type) and isinstance(args[0], key[2])
)
)
):
numba.extending.lower_builtin(
"append", ArrayBuilderType, args[0]
)(lower)
return numba.types.none(args[0])
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.append"
+ ak._util.exception_suffix(__file__)
)
@numba.core.typing.templates.bound_function("extend")
def resolve_extend(self, arraybuildertype, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], ak._connect._numba.arrayview.ArrayViewType)
):
return numba.types.none(args[0])
else:
raise TypeError(
"wrong number or types of arguments for ArrayBuilder.extend"
+ ak._util.exception_suffix(__file__)
)
@numba.extending.lower_builtin("clear", ArrayBuilderType)
def lower_clear(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, ak._libawkward.ArrayBuilder_clear, (proxyin.rawptr,))
return context.get_dummy_value()
@numba.extending.lower_builtin("null", ArrayBuilderType)
def lower_null(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, ak._libawkward.ArrayBuilder_null, (proxyin.rawptr,))
return context.get_dummy_value()
@numba.extending.lower_builtin("boolean", ArrayBuilderType, numba.types.Boolean)
def lower_boolean(context, builder, sig, args):
arraybuildertype, xtype = sig.args
arraybuilderval, xval = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
x = builder.zext(xval, context.get_value_type(numba.uint8))
call(context, builder, ak._libawkward.ArrayBuilder_boolean, (proxyin.rawptr, x))
return context.get_dummy_value()
@numba.extending.lower_builtin("integer", ArrayBuilderType, numba.types.Integer)
def lower_integer(context, builder, sig, args):
arraybuildertype, xtype = sig.args
arraybuilderval, xval = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
x = ak._connect._numba.castint(context, builder, xtype, numba.int64, xval)
call(context, builder, ak._libawkward.ArrayBuilder_integer, (proxyin.rawptr, x))
return context.get_dummy_value()
@numba.extending.lower_builtin("real", ArrayBuilderType, numba.types.Integer)
@numba.extending.lower_builtin("real", ArrayBuilderType, numba.types.Float)
def lower_real(context, builder, sig, args):
arraybuildertype, xtype = sig.args
arraybuilderval, xval = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
if isinstance(xtype, numba.types.Integer) and xtype.signed:
x = builder.sitofp(xval, context.get_value_type(numba.types.float64))
elif isinstance(xtype, numba.types.Integer):
x = builder.uitofp(xval, context.get_value_type(numba.types.float64))
elif xtype.bitwidth < 64:
x = builder.fpext(xval, context.get_value_type(numba.types.float64))
elif xtype.bitwidth > 64:
x = builder.fptrunc(xval, context.get_value_type(numba.types.float64))
else:
x = xval
call(context, builder, ak._libawkward.ArrayBuilder_real, (proxyin.rawptr, x))
return context.get_dummy_value()
@numba.extending.lower_builtin("begin_list", ArrayBuilderType)
def lower_beginlist(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, ak._libawkward.ArrayBuilder_beginlist, (proxyin.rawptr,))
return context.get_dummy_value()
@numba.extending.lower_builtin("end_list", ArrayBuilderType)
def lower_endlist(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, ak._libawkward.ArrayBuilder_endlist, (proxyin.rawptr,))
return context.get_dummy_value()
@numba.extending.lower_builtin("begin_tuple", ArrayBuilderType, numba.types.Integer)
def lower_begintuple(context, builder, sig, args):
arraybuildertype, numfieldstype = sig.args
arraybuilderval, numfieldsval = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
numfields = ak._connect._numba.castint(
context, builder, numfieldstype, numba.int64, numfieldsval
)
call(
context,
builder,
ak._libawkward.ArrayBuilder_begintuple,
(proxyin.rawptr, numfields),
)
return context.get_dummy_value()
@numba.extending.lower_builtin("index", ArrayBuilderType, numba.types.Integer)
def lower_index(context, builder, sig, args):
arraybuildertype, indextype = sig.args
arraybuilderval, indexval = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
index = ak._connect._numba.castint(
context, builder, indextype, numba.int64, indexval
)
call(
context, builder, ak._libawkward.ArrayBuilder_index, (proxyin.rawptr, index),
)
return arraybuilderval
@numba.extending.lower_builtin("end_tuple", ArrayBuilderType)
def lower_endtuple(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, ak._libawkward.ArrayBuilder_endtuple, (proxyin.rawptr,))
return context.get_dummy_value()
@numba.extending.lower_builtin("begin_record", ArrayBuilderType)
def lower_beginrecord(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(
context, builder, ak._libawkward.ArrayBuilder_beginrecord, (proxyin.rawptr,),
)
return context.get_dummy_value()
@numba.extending.lower_builtin(
"begin_record", ArrayBuilderType, numba.types.StringLiteral
)
def lower_beginrecord_field(context, builder, sig, args):
arraybuildertype, nametype = sig.args
arraybuilderval, nameval = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
name = globalstring(context, builder, nametype.literal_value)
call(
context,
builder,
ak._libawkward.ArrayBuilder_beginrecord_fast,
(proxyin.rawptr, name),
)
return context.get_dummy_value()
@numba.extending.lower_builtin("field", ArrayBuilderType, numba.types.StringLiteral)
def lower_field(context, builder, sig, args):
arraybuildertype, keytype = sig.args
arraybuilderval, keyval = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
key = globalstring(context, builder, keytype.literal_value)
call(
context, builder, ak._libawkward.ArrayBuilder_field_fast, (proxyin.rawptr, key),
)
return arraybuilderval
@numba.extending.lower_builtin("end_record", ArrayBuilderType)
def lower_endrecord(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, ak._libawkward.ArrayBuilder_endrecord, (proxyin.rawptr,))
return context.get_dummy_value()
@numba.extending.lower_builtin(
"append",
ArrayBuilderType,
ak._connect._numba.arrayview.ArrayViewType,
numba.types.Integer,
)
def lower_append_array_at(context, builder, sig, args):
arraybuildertype, viewtype, attype = sig.args
arraybuilderval, viewval, atval = args
viewproxy = context.make_helper(builder, viewtype, viewval)
atval = ak._connect._numba.layout.regularize_atval(
context, builder, viewproxy, attype, atval, True, True
)
atval = ak._connect._numba.castint(context, builder, numba.intp, numba.int64, atval)
sharedptr = ak._connect._numba.layout.getat(
context, builder, viewproxy.sharedptrs, viewproxy.pos
)
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(
context,
builder,
ak._libawkward.ArrayBuilder_append_nowrap,
(
proxyin.rawptr,
builder.inttoptr(sharedptr, context.get_value_type(numba.types.voidptr)),
atval,
),
)
return context.get_dummy_value()
@numba.extending.lower_builtin(
"append", ArrayBuilderType, ak._connect._numba.arrayview.ArrayViewType
)
def lower_append_array(context, builder, sig, args):
arraybuildertype, viewtype = sig.args
arraybuilderval, viewval = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, ak._libawkward.ArrayBuilder_beginlist, (proxyin.rawptr,))
lower_extend_array(context, builder, sig, args)
call(context, builder, ak._libawkward.ArrayBuilder_endlist, (proxyin.rawptr,))
return context.get_dummy_value()
@numba.extending.lower_builtin(
"append", ArrayBuilderType, ak._connect._numba.arrayview.RecordViewType
)
def lower_append_record(context, builder, sig, args):
arraybuildertype, recordviewtype = sig.args
arraybuilderval, recordviewval = args
recordviewproxy = context.make_helper(builder, recordviewtype, recordviewval)
arrayviewproxy = context.make_helper(
builder, recordviewtype.arrayviewtype, recordviewproxy.arrayview
)
atval = ak._connect._numba.castint(
context, builder, numba.intp, numba.int64, recordviewproxy.at
)
sharedptr = ak._connect._numba.layout.getat(
context, builder, arrayviewproxy.sharedptrs, arrayviewproxy.pos
)
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(
context,
builder,
ak._libawkward.ArrayBuilder_append_nowrap,
(
proxyin.rawptr,
builder.inttoptr(sharedptr, context.get_value_type(numba.types.voidptr)),
atval,
),
)
return context.get_dummy_value()
@numba.extending.lower_builtin("append", ArrayBuilderType, numba.types.Boolean)
def lower_append_bool(context, builder, sig, args):
return lower_boolean(context, builder, sig, args)
@numba.extending.lower_builtin("append", ArrayBuilderType, numba.types.Integer)
def lower_append_int(context, builder, sig, args):
return lower_integer(context, builder, sig, args)
@numba.extending.lower_builtin("append", ArrayBuilderType, numba.types.Float)
def lower_append_float(context, builder, sig, args):
return lower_real(context, builder, sig, args)
@numba.extending.lower_builtin("append", ArrayBuilderType, numba.types.Optional)
def lower_append_optional(context, builder, sig, args):
arraybuildertype, opttype = sig.args
arraybuilderval, optval = args
optproxy = context.make_helper(builder, opttype, optval)
validbit = numba.core.cgutils.as_bool_bit(builder, optproxy.valid)
with builder.if_else(validbit) as (is_valid, is_not_valid):
with is_valid:
if isinstance(opttype.type, numba.types.Boolean):
lower_boolean(
context,
builder,
numba.types.none(arraybuildertype, opttype.type),
(arraybuilderval, optproxy.data),
)
elif isinstance(opttype.type, numba.types.Integer):
lower_integer(
context,
builder,
numba.types.none(arraybuildertype, opttype.type),
(arraybuilderval, optproxy.data),
)
elif isinstance(opttype.type, numba.types.Float):
lower_real(
context,
builder,
numba.types.none(arraybuildertype, opttype.type),
(arraybuilderval, optproxy.data),
)
else:
raise AssertionError(
repr(opttype.type) + ak._util.exception_suffix(__file__)
)
with is_not_valid:
lower_null(
context,
builder,
numba.types.none(arraybuildertype,),
(arraybuilderval,),
)
return context.get_dummy_value()
@numba.extending.lower_builtin("append", ArrayBuilderType, numba.types.NoneType)
def lower_append_none(context, builder, sig, args):
return lower_null(context, builder, sig.return_type(sig.args[0]), (args[0],))
@numba.extending.lower_builtin(
"extend", ArrayBuilderType, ak._connect._numba.arrayview.ArrayViewType
)
def lower_extend_array(context, builder, sig, args):
arraybuildertype, viewtype = sig.args
arraybuilderval, viewval = args
viewproxy = context.make_helper(builder, viewtype, viewval)
sharedptr = ak._connect._numba.layout.getat(
context, builder, viewproxy.sharedptrs, viewproxy.pos
)
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
with numba.core.cgutils.for_range(builder, viewproxy.stop, viewproxy.start) as loop:
atval = ak._connect._numba.castint(
context, builder, numba.intp, numba.int64, loop.index
)
call(
context,
builder,
ak._libawkward.ArrayBuilder_append_nowrap,
(
proxyin.rawptr,
builder.inttoptr(
sharedptr, context.get_value_type(numba.types.voidptr)
),
atval,
),
)
return context.get_dummy_value()
|
py
|
1a560e60dd913623d3752447126c246f3741513f
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: route53_zone
short_description: add or delete Route53 zones
description:
- Creates and deletes Route53 private and public zones
version_added: "2.0"
requirements: [ boto3 ]
options:
zone:
description:
- "The DNS zone record (eg: foo.com.)"
required: true
state:
description:
- whether or not the zone should exist or not
default: present
choices: [ "present", "absent" ]
vpc_id:
description:
- The VPC ID the zone should be a part of (if this is going to be a private zone)
vpc_region:
description:
- The VPC Region the zone should be a part of (if this is going to be a private zone)
comment:
description:
- Comment associated with the zone
default: ''
hosted_zone_id:
description:
- The unique zone identifier you want to delete or "all" if there are many zones with the same domain name.
Required if there are multiple zones identified with the above options
version_added: 2.4
delegation_set_id:
description:
- The reusable delegation set ID to be associated with the zone.
Note that you can't associate a reusable delegation set with a private hosted zone.
version_added: 2.6
extends_documentation_fragment:
- aws
- ec2
author: "Christopher Troup (@minichate)"
'''
EXAMPLES = '''
- name: create a public zone
route53_zone:
zone: example.com
comment: this is an example
- name: delete a public zone
route53_zone:
zone: example.com
state: absent
- name: create a private zone
route53_zone:
zone: devel.example.com
vpc_id: '{{ myvpc_id }}'
vpc_region: us-west-2
comment: developer domain
- name: create a public zone associated with a specific reusable delegation set
route53_zone:
zone: example.com
comment: reusable delegation set example
delegation_set_id: A1BCDEF2GHIJKL
'''
RETURN = '''
comment:
description: optional hosted zone comment
returned: when hosted zone exists
type: str
sample: "Private zone"
name:
description: hosted zone name
returned: when hosted zone exists
type: str
sample: "private.local."
private_zone:
description: whether hosted zone is private or public
returned: when hosted zone exists
type: bool
sample: true
vpc_id:
description: id of vpc attached to private hosted zone
returned: for private hosted zone
type: str
sample: "vpc-1d36c84f"
vpc_region:
description: region of vpc attached to private hosted zone
returned: for private hosted zone
type: str
sample: "eu-west-1"
zone_id:
description: hosted zone id
returned: when hosted zone exists
type: str
sample: "Z6JQG9820BEFMW"
delegation_set_id:
description: id of the associated reusable delegation set
returned: for public hosted zones, if they have been associated with a reusable delegation set
type: str
sample: "A1BCDEF2GHIJKL"
'''
import time
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
def find_zones(module, client, zone_in, private_zone):
try:
paginator = client.get_paginator('list_hosted_zones')
results = paginator.paginate().build_full_result()
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not list current hosted zones")
zones = []
for r53zone in results['HostedZones']:
if r53zone['Name'] != zone_in:
continue
# only save zone names that match the public/private setting
if (r53zone['Config']['PrivateZone'] and private_zone) or \
(not r53zone['Config']['PrivateZone'] and not private_zone):
zones.append(r53zone)
return zones
def create(module, client, matching_zones):
zone_in = module.params.get('zone').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
comment = module.params.get('comment')
delegation_set_id = module.params.get('delegation_set_id')
if not zone_in.endswith('.'):
zone_in += "."
private_zone = bool(vpc_id and vpc_region)
record = {
'private_zone': private_zone,
'vpc_id': vpc_id,
'vpc_region': vpc_region,
'comment': comment,
'name': zone_in,
'delegation_set_id': delegation_set_id,
}
if private_zone:
changed, result = create_or_update_private(module, client, matching_zones, record)
else:
changed, result = create_or_update_public(module, client, matching_zones, record)
return changed, result
def create_or_update_private(module, client, matching_zones, record):
for z in matching_zones:
try:
result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
zone_details = result['HostedZone']
vpc_details = result['VPCs']
current_vpc_id = None
current_vpc_region = None
if isinstance(vpc_details, dict):
if vpc_details['VPC']['VPCId'] == record['vpc_id']:
current_vpc_id = vpc_details['VPC']['VPCId']
current_vpc_region = vpc_details['VPC']['VPCRegion']
else:
if record['vpc_id'] in [v['VPCId'] for v in vpc_details]:
current_vpc_id = record['vpc_id']
if record['vpc_region'] in [v['VPCRegion'] for v in vpc_details]:
current_vpc_region = record['vpc_region']
if record['vpc_id'] == current_vpc_id and record['vpc_region'] == current_vpc_region:
record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
if not module.check_mode:
try:
client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
return True, record
else:
record['msg'] = "There is already a private hosted zone in the same region with the same VPC \
you chose. Unable to create a new private hosted zone in the same name space."
return False, record
if not module.check_mode:
try:
result = client.create_hosted_zone(
Name=record['name'],
HostedZoneConfig={
'Comment': record['comment'] if record['comment'] is not None else "",
'PrivateZone': True,
},
VPC={
'VPCRegion': record['vpc_region'],
'VPCId': record['vpc_id'],
},
CallerReference="%s-%s" % (record['name'], time.time()),
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not create hosted zone")
hosted_zone = result['HostedZone']
zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
record['zone_id'] = zone_id
changed = True
return changed, record
def create_or_update_public(module, client, matching_zones, record):
zone_details, zone_delegation_set_details = None, {}
for matching_zone in matching_zones:
try:
zone = client.get_hosted_zone(Id=matching_zone['Id'])
zone_details = zone['HostedZone']
zone_delegation_set_details = zone.get('DelegationSet', {})
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id'])
if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
if not module.check_mode:
try:
client.update_hosted_zone_comment(
Id=zone_details['Id'],
Comment=record['comment']
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
changed = True
else:
changed = False
break
if zone_details is None:
if not module.check_mode:
try:
params = dict(
Name=record['name'],
HostedZoneConfig={
'Comment': record['comment'] if record['comment'] is not None else "",
'PrivateZone': False,
},
CallerReference="%s-%s" % (record['name'], time.time()),
)
if record.get('delegation_set_id') is not None:
params['DelegationSetId'] = record['delegation_set_id']
result = client.create_hosted_zone(**params)
zone_details = result['HostedZone']
zone_delegation_set_details = result.get('DelegationSet', {})
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not create hosted zone")
changed = True
if not module.check_mode:
record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
record['name'] = zone_details['Name']
record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '')
return changed, record
def delete_private(module, client, matching_zones, vpc_id, vpc_region):
for z in matching_zones:
try:
result = client.get_hosted_zone(Id=z['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
zone_details = result['HostedZone']
vpc_details = result['VPCs']
if isinstance(vpc_details, dict):
if vpc_details['VPC']['VPCId'] == vpc_id and vpc_region == vpc_details['VPC']['VPCRegion']:
if not module.check_mode:
try:
client.delete_hosted_zone(Id=z['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
return True, "Successfully deleted %s" % zone_details['Name']
else:
if vpc_id in [v['VPCId'] for v in vpc_details] and vpc_region in [v['VPCRegion'] for v in vpc_details]:
if not module.check_mode:
try:
client.delete_hosted_zone(Id=z['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
return True, "Successfully deleted %s" % zone_details['Name']
return False, "The vpc_id and the vpc_region do not match a private hosted zone."
def delete_public(module, client, matching_zones):
if len(matching_zones) > 1:
changed = False
msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone."
else:
if not module.check_mode:
try:
client.delete_hosted_zone(Id=matching_zones[0]['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id'])
changed = True
msg = "Successfully deleted %s" % matching_zones[0]['Id']
return changed, msg
def delete_hosted_id(module, client, hosted_zone_id, matching_zones):
if hosted_zone_id == "all":
deleted = []
for z in matching_zones:
deleted.append(z['Id'])
if not module.check_mode:
try:
client.delete_hosted_zone(Id=z['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
changed = True
msg = "Successfully deleted zones: %s" % deleted
elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]:
if not module.check_mode:
try:
client.delete_hosted_zone(Id=hosted_zone_id)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id)
changed = True
msg = "Successfully deleted zone: %s" % hosted_zone_id
else:
changed = False
msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id
return changed, msg
def delete(module, client, matching_zones):
zone_in = module.params.get('zone').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
hosted_zone_id = module.params.get('hosted_zone_id')
if not zone_in.endswith('.'):
zone_in += "."
private_zone = bool(vpc_id and vpc_region)
if zone_in in [z['Name'] for z in matching_zones]:
if hosted_zone_id:
changed, result = delete_hosted_id(module, client, hosted_zone_id, matching_zones)
else:
if private_zone:
changed, result = delete_private(module, client, matching_zones, vpc_id, vpc_region)
else:
changed, result = delete_public(module, client, matching_zones)
else:
changed = False
result = "No zone to delete."
return changed, result
def main():
argument_spec = dict(
zone=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
vpc_id=dict(default=None),
vpc_region=dict(default=None),
comment=dict(default=''),
hosted_zone_id=dict(),
delegation_set_id=dict(),
)
mutually_exclusive = [
['delegation_set_id', 'vpc_id'],
['delegation_set_id', 'vpc_region'],
]
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
)
zone_in = module.params.get('zone').lower()
state = module.params.get('state').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
if not zone_in.endswith('.'):
zone_in += "."
private_zone = bool(vpc_id and vpc_region)
client = module.client('route53')
zones = find_zones(module, client, zone_in, private_zone)
if state == 'present':
changed, result = create(module, client, matching_zones=zones)
elif state == 'absent':
changed, result = delete(module, client, matching_zones=zones)
if isinstance(result, dict):
module.exit_json(changed=changed, result=result, **result)
else:
module.exit_json(changed=changed, result=result)
if __name__ == '__main__':
main()
|
py
|
1a5613d88649abc831660b7aa4cbbb18fa0d07e5
|
"""Support for MyChevy sensors."""
import logging
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN, SensorEntity
from homeassistant.const import PERCENTAGE
from homeassistant.core import callback
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util import slugify
from . import (
DOMAIN as MYCHEVY_DOMAIN,
ERROR_TOPIC,
MYCHEVY_ERROR,
MYCHEVY_SUCCESS,
UPDATE_TOPIC,
EVSensorConfig,
)
_LOGGER = logging.getLogger(__name__)
BATTERY_SENSOR = "batteryLevel"
SENSORS = [
EVSensorConfig("Mileage", "totalMiles", "miles", "mdi:speedometer"),
EVSensorConfig("Electric Range", "electricRange", "miles", "mdi:speedometer"),
EVSensorConfig("Charged By", "estimatedFullChargeBy"),
EVSensorConfig("Charge Mode", "chargeMode"),
EVSensorConfig(
"Battery Level", BATTERY_SENSOR, PERCENTAGE, "mdi:battery", ["charging"]
),
]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MyChevy sensors."""
if discovery_info is None:
return
hub = hass.data[MYCHEVY_DOMAIN]
sensors = [MyChevyStatus()]
for sconfig in SENSORS:
for car in hub.cars:
sensors.append(EVSensor(hub, sconfig, car.vid))
add_entities(sensors)
class MyChevyStatus(SensorEntity):
"""A string representing the charge mode."""
_name = "MyChevy Status"
_icon = "mdi:car-connected"
def __init__(self):
"""Initialize sensor with car connection."""
self._state = None
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
UPDATE_TOPIC, self.success
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
ERROR_TOPIC, self.error
)
)
@callback
def success(self):
"""Update state, trigger updates."""
if self._state != MYCHEVY_SUCCESS:
_LOGGER.debug("Successfully connected to mychevy website")
self._state = MYCHEVY_SUCCESS
self.async_write_ha_state()
@callback
def error(self):
"""Update state, trigger updates."""
_LOGGER.error(
"Connection to mychevy website failed. "
"This probably means the mychevy to OnStar link is down"
)
self._state = MYCHEVY_ERROR
self.async_write_ha_state()
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def name(self):
"""Return the name."""
return self._name
@property
def native_value(self):
"""Return the state."""
return self._state
@property
def should_poll(self):
"""Return the polling state."""
return False
class EVSensor(SensorEntity):
"""Base EVSensor class.
The only real difference between sensors is which units and what
attribute from the car object they are returning. All logic can be
built with just setting subclass attributes.
"""
def __init__(self, connection, config, car_vid):
"""Initialize sensor with car connection."""
self._conn = connection
self._name = config.name
self._attr = config.attr
self._extra_attrs = config.extra_attrs
self._unit_of_measurement = config.unit_of_measurement
self._icon = config.icon
self._state = None
self._state_attributes = {}
self._car_vid = car_vid
self.entity_id = f"{SENSOR_DOMAIN}.{MYCHEVY_DOMAIN}_{slugify(self._car.name)}_{slugify(self._name)}"
async def async_added_to_hass(self):
"""Register callbacks."""
self.hass.helpers.dispatcher.async_dispatcher_connect(
UPDATE_TOPIC, self.async_update_callback
)
@property
def _car(self):
"""Return the car."""
return self._conn.get_car(self._car_vid)
@property
def icon(self):
"""Return the icon."""
if self._attr == BATTERY_SENSOR:
charging = self._state_attributes.get("charging", False)
return icon_for_battery_level(self.state, charging)
return self._icon
@property
def name(self):
"""Return the name."""
return self._name
@callback
def async_update_callback(self):
"""Update state."""
if self._car is not None:
self._state = getattr(self._car, self._attr, None)
if self._unit_of_measurement == "miles":
self._state = round(self._state)
for attr in self._extra_attrs:
self._state_attributes[attr] = getattr(self._car, attr)
self.async_write_ha_state()
@property
def native_value(self):
"""Return the state."""
return self._state
@property
def extra_state_attributes(self):
"""Return all the state attributes."""
return self._state_attributes
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement the state is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return False
|
py
|
1a5616934602872685ad5db97a951af9283ea837
|
from flask import Blueprint
from flask_restful import Api
from digeiz_api.api.resources import Accounts, AccountsDetail, Malls, MallsDetail, Units, UnitsDetail
api_blueprint = Blueprint('api', __name__, url_prefix='/api')
api = Api(api_blueprint)
api.add_resource(Accounts, '/accounts')
api.add_resource(AccountsDetail, '/accounts/<int:account_id>')
api.add_resource(Malls, '/malls')
api.add_resource(MallsDetail, '/malls/<int:mall_id>')
api.add_resource(Units, '/units')
api.add_resource(UnitsDetail, '/units/<int:unit_id>')
|
py
|
1a56170367f1765238d9af38b716bea96822db84
|
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.qtum import *
import sys
import random
import time
class QtumPrematureCoinstakeSpendTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def remove_from_staking_prevouts(self, remove_prevout):
for j in range(len(self.staking_prevouts)):
prevout = self.staking_prevouts[j]
if prevout[0].serialize() == remove_prevout.serialize():
self.staking_prevouts.pop(j)
break
def assert_spend_of_coinstake_at_height(self, height, should_accept):
spend_block = self.node.getblock(self.node.getblockhash(height))
spend_coinstake_txid = spend_block['tx'][1]
spend_coinstake_txout = self.node.gettxout(spend_coinstake_txid, 1)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spend_coinstake_txid, 16), 1))]
tx.vout = [CTxOut(int(float(str(spend_coinstake_txout['value']))*COIN - 1000000), scriptPubKey=CScript([OP_TRUE]))]
tx = rpc_sign_transaction(self.node, tx)
if should_accept:
self.node.sendrawtransaction(bytes_to_hex_str(tx.serialize()))
else:
assert_raises_rpc_error(-26, "bad-txns-premature-spend-of-coinbase", self.node.sendrawtransaction, bytes_to_hex_str(tx.serialize()))
tip = self.node.getblock(self.node.getbestblockhash())
next_block_time = (tip['time'] + 0x30) & 0xfffffff0
self.node.setmocktime(next_block_time)
block, sig_key = create_unsigned_mpos_block(self.node, self.staking_prevouts, next_block_time, 1000000)
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.sign_block(sig_key)
blockcount = self.node.getblockcount()
self.node.submitblock(bytes_to_hex_str(block.serialize()))
#assert_equal(self.node.getblockcount(), blockcount + (1 if should_accept else 0))
self.remove_from_staking_prevouts(block.prevoutStake)
def run_test(self):
self.node = self.nodes[0]
self.node.setmocktime(int(time.time()) - 1000000)
self.node.generate(10 + COINBASE_MATURITY)
# These are the privkeys that corresponds to the pubkeys in the pos outputs
# These are used by default by create_pos_block
for i in range(0xff+1):
privkey = byte_to_base58(hash256(struct.pack('<I', i)), 239)
self.node.importprivkey(privkey)
generatedMpos = activate_mpos(self.node)
self.staking_prevouts = collect_prevouts(self.node)
last_height = self.node.getblock(self.node.getbestblockhash())['height']
self.log.info('last_height=%s' % (last_height))
self.assert_spend_of_coinstake_at_height(height=last_height, should_accept=False)
if generatedMpos > COINBASE_MATURITY:
self.assert_spend_of_coinstake_at_height(last_height - generatedMpos + 1, should_accept=True)
# Invalidate the last block and make sure that the previous rejection of the premature coinstake spends fails
self.node.invalidateblock(self.node.getbestblockhash())
assert_equal(last_height, self.node.getblock(self.node.getbestblockhash())['height'] + 1)
#self.log.info('updated last_height=%s' % (self.node.getblock(self.node.getbestblockhash())['height']))
#self.assert_spend_of_coinstake_at_height(height=last_height, should_accept=False)
if __name__ == '__main__':
QtumPrematureCoinstakeSpendTest().main()
|
py
|
1a5618119de97ff3e188223d03d67b404730757e
|
import getpass
import os
import re
import subprocess
import click
from .config import *
class JumpOutFuckingClick(Exception):
"""Just to break out the unkown loop"""
pass
class JumpOutFuckingClick2(Exception):
"""Just to break out the unkown loop2"""
pass
def ssl_file_gen(domain,usr,loc,email,key):
with open(SSL, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{DOMAIN}}', '.'.join(domain.split('.')[-2:]), fds)
fce = re.sub(r'{{EMAIL}}', email, fcd)
res = re.sub(r'{{KEY}}', key, fce)
with open(domain+"/"+domain+'.sh', 'w') as ssl_sh:
ssl_sh.write(res)
ssl_sh.close()
fh.close()
click.echo("-4- SSL script: {} create successfully".format(domain+"/"+domain+'.sh'))
def ssl_multi_gen(domain,usr,loc,op1,op2,dns_op):
with open(SSL, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{DOMAIN}}', '.'.join(domain.split('.')[-2:]), fds)
fce = re.sub(r'{{OP1}}', op1, fcd)
fcf = re.sub(r'{{OP2}}', op2, fce)
res = re.sub(r'{{DNS_OP}}', dns_op, fcf)
with open(domain+"/"+domain+'.sh', 'w') as ssl_sh:
ssl_sh.write(res)
ssl_sh.close()
fh.close()
click.echo("-4- SSL script: {} create successfully".format(domain+"/"+domain+'.sh'))
def docker_file_gen(domain,usr,loc):
with open(DOCKER, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{DOMAIN}}', domain, fds)
fcu = re.sub(r'{{usr}}', usr, fcd)
res = re.sub(r'{{passwd}}', loc+domain+usr, fcu)
with open(domain+"/"+domain+'.run', 'w') as docker_run:
docker_run.write(res)
docker_run.close()
fh.close()
click.echo("-3- Docker config script: {} create successfully".format(domain+"/"+domain+'.run'))
def uwsgi_file_gen(domain,usr,loc):
env = os.path.dirname(loc)
with open(uWSGI, 'r') as fh:
fds = fh.read()
fce = re.sub(r'{{env}}',env,fds)
fcu = re.sub(r'{{usr}}',usr,fce)
res = re.sub(r'{{loc}}',loc,fcu)
with open(domain+"/"+domain+'.ini', 'w') as uwsgi_ini:
uwsgi_ini.write(res)
uwsgi_ini.close()
fh.close()
click.echo("-0- uwsgi config file: {} create successfully".format(domain+"/"+domain+'.ini'))
#static
def nginx_file_gen(domain,usr,loc):
with open(NGINX, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{DOMAIN}}', domain, fds)
res = re.sub(r'{{loc}}', loc, fcd)
with open(domain+"/"+domain+'.conf', 'w') as nginx_conf:
nginx_conf.write(res)
nginx_conf.close()
fh.close()
click.echo("-1- Nginx config file: {} create successfully".format(domain+"/"+domain+'.conf'))
#static
def service_file_gen(domain,usr,loc):
with open(SERVICE, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{DOMAIN}}', domain, fds)
fcu = re.sub(r'{{usr}}', usr, fcd)
res = re.sub(r'{{loc}}', loc, fcu)
with open(domain+"/"+domain+'.service', 'w') as confservice:
confservice.write(res)
confservice.close()
fh.close()
click.echo("-2- Systemd service file : {} create successfully".format(domain+"/"+domain+'.service'))
def script_files_gen(domain, usr, loc):
cmd = []
files = loc+"/"+domain
c = None
if os.path.exists(files+'.sh'):
c = "sudo mkdir -p /etc/nginx/certs"
c1 = "sudo /bin/bash "+files+'.sh'
cmd.append(c)
cmd.append(c1)
if os.path.exists(files+'.run'):
c = "sudo "+files+'.run'
cmd.append(c)
if os.path.exists(files+'.conf'):
c = "sudo cp "+files+'.conf ' + NGINX_CONF1
c1 = "sudo cp "+files+'.conf ' + NGINX_CONF2
c2 = "sudo nginx -s reload"
cmd.append(c)
cmd.append(c1)
cmd.append(c2)
if os.path.exists(files+'.service'):
c = "sudo cp "+files+'.service ' + SYSTEMD_CONF
c1 = "sudo systemctl enable "+domain+'.service'
c2 = "sudo systemctl start "+domain+'.service'
cmd.append(c)
cmd.append(c1)
cmd.append(c2)
with open(loc+'/start.sh', 'w') as file:
for c in cmd:
file.write(c+"\n")
file.close()
click.echo("-5- One click script file : {} create successfully".format(domain+"/"+'start.sh'))
def script_files_run(domain, usr, loc):
subprocess.call(['sudo', '/bin/bash',loc+'/start.sh'])
|
py
|
1a5618411d4e7986026f9a600c4852de09514493
|
import timeit
from typing import *
from subseq import is_subseq_py, is_subseq_rs
seq = ['a', 'b', 'c'] * 100
subseq = ['dd', 'ee']
joined_seq = "," + ",".join(seq) + ","
joined_subseq = "," + ",".join(subseq) + ","
def find_loop(seq, subseq):
n = len(seq)
m = len(subseq)
for i in range(n - m + 1):
found = True
for j in range(m):
if seq[i + j] != subseq[j]:
found = False
break
if found:
return True
def is_subseq_str(seq, subseq):
return subseq in seq
is_subseq_py(seq, subseq)
n = 10000
timer = timeit.Timer("is_subseq(seq, subseq)", globals={"is_subseq": is_subseq_rs, "seq": seq, "subseq": subseq})
t = timer.timeit(number=n)
print(f"rust (rust): {t*10**9/n}")
timer = timeit.Timer("is_subseq(seq, subseq)", globals={"is_subseq": is_subseq_py, "seq": seq, "subseq": subseq})
t = timer.timeit(number=n)
print(f"rust (py): {t*10**9/n}")
timer = timeit.Timer("is_subseq(seq, subseq)", globals={"is_subseq": find_loop, "seq": seq, "subseq": subseq})
t = timer.timeit(number=n)
print(f"python: {t*10**9/n}")
timer = timeit.Timer("is_subseq(seq, subseq)", globals={"is_subseq": is_subseq_str, "seq": seq, "subseq": subseq})
t = timer.timeit(number=n)
print(f"python str: {t*10**9/n}")
|
py
|
1a561843179cc61a5f55c0327073f46365fb2f68
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import json
import os.path
import re
import resources
import subprocess
import sys
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
# Add internal libs
from bisect import insort
from collections import defaultdict
from functools import partial
from libs.canvas import Canvas
from libs.colorDialog import ColorDialog
from libs.constants import *
from libs.labelDialog import LabelDialog
from libs.labelFile import LabelFile, LabelFileError
from libs.lib import struct, newAction, newIcon, addActions, fmtShortcut, generateColorByText
from libs.loginDialog import Login
from libs.pascal_voc_io import PascalVocReader, XML_EXT
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.toolBar import ToolBar
from libs.ustr import ustr
from libs.version import __version__
from libs.zoomWidget import ZoomWidget
import lmdb
__appname__ = 'vanno_ver'
server_path = "../vanno_server/env/"
dataset = 'jester'
# Utility functions and classes.
def have_qstring():
'''p3/qt5 get rid of QString wrapper as py3 has native unicode str type'''
return not (sys.version_info.major >= 3 or QT_VERSION_STR.startswith('5.'))
def util_qt_strlistclass():
return QStringList if have_qstring() else list
class WindowMixin(object):
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
return toolbar
# PyQt5: TypeError: unhashable type: 'QListWidgetItem'
class HashableQListWidgetItem(QListWidgetItem):
def __init__(self, *args):
super(HashableQListWidgetItem, self).__init__(*args)
def __hash__(self):
return hash(id(self))
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self,logged_id, defaultFilename=None, defaultPrefdefClassFile=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
# Load setting in the main thread
self.settings = Settings()
self.settings.load()
settings = self.settings
# Save as Pascal voc xml
self.defaultSaveDir = ""
self.defaultSaveDir_folder= ""
self.usingPascalVocFormat = True
# For loading all image under a directory
self.mImgList = []
self.mDirList = []
self.dirname = None
self.labelHist = []
self.lastOpenDir = None
self.old_Filepath=None
# self.proj_dir=None
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
self._beginner = True
self.screencastViewer = "firefox"
self.screencast = "https://youtu.be/p0nR2YsCY_U"
self.logged_id=logged_id
self.ids = []
# Load predefined classes to the list
if defaultPrefdefClassFile is not None:
self.loadPredefinedClasses(defaultPrefdefClassFile)
# Main widgets and related state.
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.itemsToShapes = {}
self.shapesToItems = {}
self.prevLabelText = ''
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# Create a widget for using default label
self.useDefaultLabelCheckbox = QCheckBox(u'Use default label')
self.useDefaultLabelCheckbox.setChecked(False)
self.defaultLabelTextLine = QLineEdit()
useDefaultLabelQHBoxLayout = QHBoxLayout()
useDefaultLabelQHBoxLayout.addWidget(self.useDefaultLabelCheckbox)
useDefaultLabelQHBoxLayout.addWidget(self.defaultLabelTextLine)
useDefaultLabelContainer = QWidget()
useDefaultLabelContainer.setLayout(useDefaultLabelQHBoxLayout)
# Create a widget for edit and diffc button
self.diffcButton = QCheckBox(u'difficult')
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.editButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# self.saveButton = QToolButton()
# self.saveButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# self.saveButton.setText("Save checking")
# self.saveButton.clicked.connect(self.saveButtonClicked)
self.edit_label = QLabel()
self.save_label = QLabel()
self.anno_label = QLabel()
self.id_label = QLabel()
# Add some of widgets to listLayout
listLayout.addWidget(self.editButton)
listLayout.addWidget(self.diffcButton)
listLayout.addWidget(useDefaultLabelContainer)
# Create and add a widget for showing current label items
self.labelList = QListWidget()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
# Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged)
listLayout.addWidget(self.labelList)
listLayout.addWidget(self.anno_label)
listLayout.addWidget(self.edit_label)
listLayout.addWidget(self.save_label)
self.dock = QDockWidget(self.logged_id, self)
self.dock.setObjectName(u'Labels')
self.dock.setWidget(labelListContainer)
self.folderListWidget = QListWidget()
self.folderListWidget.itemDoubleClicked.connect(self.diritemDoubleClicked)
self.folderListWidget.itemChanged.connect(self.diritemChanged)
folderlistLayout = QVBoxLayout()
folderlistLayout.setContentsMargins(0, 0, 0, 0)
# folderlistLayout.addWidget(self.saveButton)
###
self.savebtncnt_label = QLabel()
folderlistLayout.addWidget(self.savebtncnt_label)
# self.savebtn_label = QLabel()
# folderlistLayout.addWidget(self.savebtn_label)
folderlistLayout.addWidget(self.folderListWidget)
folderListContainer = QWidget()
folderListContainer.setLayout(folderlistLayout)
self.folderdock = QDockWidget(u'Folder List', self)
self.folderdock.setObjectName(u'Folders')
self.folderdock.setWidget(folderListContainer)
# Tzutalin 20160906 : Add file list and dock to move faster
self.fileListWidget = QListWidget()
self.fileListWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(u'File List', self)
self.filedock.setObjectName(u'Files')
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
# Tzutalin 20160906 : Add file list and dock to move faster
self.addDockWidget(Qt.RightDockWidgetArea, self.folderdock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
# self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
# self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
# self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
###
self.foldercnt = 0
self.checkList = []
self.verJobList = []
file = QFile(server_path + '../ids.txt')
if file.open(QFile.ReadOnly | QFile.Text):
while not file.atEnd():
line = bytearray(file.readLine()).decode().strip()
insort(self.ids, line)
file.close()
for id in self.ids:
file = QFile(server_path + dataset + '/' + id + '.txt')
if file.open(QFile.ReadOnly | QFile.Text):
while not file.atEnd():
line = bytearray(file.readLine()).decode().strip()
insort(self.verJobList, line)
file.close()
# file = QFile(server_path + dataset + '/' + self.logged_id + '.txt')
# if file.open(QFile.ReadOnly | QFile.Text):
# while not file.atEnd():
# line = bytearray(file.readLine()).decode().strip()
# insort(self.checkList, line)
# file.close()
# Actions
action = partial(newAction, self)
quit = action('&Quit', self.close,
'Ctrl+Q', 'quit', u'Quit application')
open = action('&Open', self.openFile,
'Ctrl+O', 'open', u'Open image or label file')
opendir = action('&Open Dir', self.openDirDialog,
'u', 'open', u'Open Dir')
changeSavedir = action('&Change Save Dir', self.changeSavedirDialog,
'r', 'open', u'Change default saved Annotation dir')
openAnnotation = action('&Open Annotation', self.openAnnotationDialog,
'Ctrl+Shift+O', 'open', u'Open Annotation')
openNextImg = action('&Next Image', self.openNextImg,
'd', 'next', u'Open Next')
openPrevImg = action('&Prev Image', self.openPrevImg,
'a', 'prev', u'Open Prev')
verify = action('&Verify Image', self.verifyImg,
'space', 'verify', u'Verify Image')
save = action('&Save', self.saveFile,
's', 'save', u'Save labels to file', enabled=False)
saveAs = action('&Save As', self.saveFileAs,
'Ctrl+Shift+S', 'save-as', u'Save labels to a different file', enabled=False)
close = action('&Close', self.closeFile, 'Ctrl+W', 'close', u'Close current file')
resetAll = action('&ResetAll', self.resetAll, None, 'resetall', u'Reset all')
color1 = action('Box Line Color', self.chooseColor1,
'Ctrl+L', 'color_line', u'Choose Box line color')
createMode = action('Create\nRectBox', self.setCreateMode,
'w', 'new', u'Start drawing Boxs', enabled=False)
editMode = action('&Edit\nRectBox', self.setEditMode,
'Ctrl+J', 'edit', u'Move and edit Boxs', enabled=False)
create = action('Create\nRectBox', self.createShape,
'w', 'new', u'Draw a new Box', enabled=False)
delete = action('Delete\nRectBox', self.deleteSelectedShape,
'Delete', 'delete', u'Delete', enabled=False)
copy = action('&Duplicate\nRectBox', self.copySelectedShape,
'Ctrl+D', 'copy', u'Create a duplicate of the selected Box',
enabled=False)
advancedMode = action('&Advanced Mode', self.toggleAdvancedMode,
'Ctrl+Shift+A', 'expert', u'Switch to advanced mode',
checkable=True)
hideAll = action('&Hide\nRectBox', partial(self.togglePolygons, False),
'Ctrl+H', 'hide', u'Hide all Boxs',
enabled=False)
showAll = action('&Show\nRectBox', partial(self.togglePolygons, True),
'Ctrl+A', 'hide', u'Show all Boxs',
enabled=False)
help = action('&Tutorial', self.showTutorialDialog, None, 'help', u'Show demos')
showInfo = action('&Information', self.showInfoDialog, None, 'help', u'Information')
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action('Zoom &In', partial(self.addZoom, 10),
'Ctrl++', 'zoom-in', u'Increase zoom level', enabled=False)
zoomOut = action('&Zoom Out', partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out', u'Decrease zoom level', enabled=False)
zoomOrg = action('&Original size', partial(self.setZoom, 100),
'Ctrl+=', 'zoom', u'Zoom to original size', enabled=False)
fitWindow = action('&Fit Window', self.setFitWindow,
'Ctrl+F', 'fit-window', u'Zoom follows window size',
checkable=True, enabled=False)
fitWidth = action('Fit &Width', self.setFitWidth,
'Ctrl+Shift+F', 'fit-width', u'Zoom follows window width',
checkable=True, enabled=False)
# Group zoom controls into a list for easier toggling.
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action('&Edit Label', self.editLabel,
'Ctrl+E', 'edit', u'Modify the label of the selected Box',
enabled=False)
self.editButton.setDefaultAction(edit)
shapeLineColor = action('Shape &Line Color', self.chshapeLineColor,
icon='color_line', tip=u'Change the line color for this specific shape',
enabled=False)
shapeFillColor = action('Shape &Fill Color', self.chshapeFillColor,
icon='color', tip=u'Change the fill color for this specific shape',
enabled=False)
labels = self.dock.toggleViewAction()
labels.setText('Show/Hide Label Panel')
labels.setShortcut('Ctrl+Shift+L')
# Lavel list context menu.
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu)
# Store actions for further handling.
self.actions = struct(save=save, saveAs=saveAs, open=open, close=close, resetAll = resetAll,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy,
createMode=createMode, editMode=editMode, advancedMode=advancedMode,
shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions,
fileMenuActions=(
open, opendir, save, saveAs, close, resetAll, quit),
beginner=(), advanced=(),
editMenu=(edit, copy, delete,
None, color1),
beginnerContext=(create, edit, copy, delete),
advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor),
onLoadActive=(
close, create, createMode, editMode),
onShapesPresent=(saveAs, hideAll, showAll))
self.menus = struct(
file=self.menu('&File'),
edit=self.menu('&Edit'),
view=self.menu('&View'),
help=self.menu('&Help'),
recentFiles=QMenu('Open &Recent'),
labelList=labelMenu)
# Auto saving : Enable auto saving if pressing next
self.autoSaving = QAction("Auto Saving", self)
self.autoSaving.setCheckable(True)
self.autoSaving.setChecked(settings.get(SETTING_AUTO_SAVE, False))
# Sync single class mode from PR#106
self.singleClassMode = QAction("Single Class Mode", self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None
addActions(self.menus.file,
(open, opendir, changeSavedir, openAnnotation, self.menus.recentFiles, save, saveAs, close, resetAll, quit))
addActions(self.menus.help, (help, showInfo))
addActions(self.menus.view, (
self.autoSaving,
self.singleClassMode,
labels, advancedMode, None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
self.tools = self.toolbar('Tools')
self.actions.beginner = (
open, opendir, changeSavedir, openNextImg, openPrevImg, verify, save, None, create, copy, delete, None,
zoomIn, zoom, zoomOut, fitWindow, fitWidth)
self.actions.advanced = (
open, opendir, changeSavedir, openNextImg, openPrevImg, save, None,
createMode, editMode, None,
hideAll, showAll)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
# Application state.
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
# Add Chris
self.difficult = False
## Fix the compatible issue for qt4 and qt5. Convert the QStringList to python list
if settings.get(SETTING_RECENT_FILES):
if have_qstring():
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(600, 500))
position = settings.get(SETTING_WIN_POSE, QPoint(0, 0))
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
if saveDir is not None and os.path.exists(saveDir):
self.defaultSaveDir = saveDir
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
# self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
# Add chris
Shape.difficult = self.difficult
def xbool(x):
if isinstance(x, QVariant):
return x.toBool()
return bool(x)
if xbool(settings.get(SETTING_ADVANCE_MODE, False)):
self.actions.advancedMode.setChecked(True)
self.toggleAdvancedMode()
# Populate the File menu dynamically.
self.updateFileMenu()
# Since loading the file may take some time, make sure it runs in the background.
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
# Display cursor coordinates at the right of status bar
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
# Open Dir if deafult file
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath)
self.save_label.setText("Save DIR: " + self.defaultSaveDir)
## Support Functions ##
def noShapes(self):
return not self.itemsToShapes
def toggleAdvancedMode(self, value=True):
self._beginner = not value
self.canvas.setEditing(True)
self.populateModeActions()
self.editButton.setVisible(not value)
if value:
self.actions.createMode.setEnabled(True)
self.actions.editMode.setEnabled(False)
self.dock.setFeatures(self.dock.features() | self.dockFeatures)
else:
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
def populateModeActions(self):
if self.beginner():
tool, menu = self.actions.beginner, self.actions.beginnerContext
else:
tool, menu = self.actions.advanced, self.actions.advancedContext
self.tools.clear()
addActions(self.tools, tool)
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (self.actions.create,) if self.beginner()\
else (self.actions.createMode, self.actions.editMode)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setBeginner(self):
self.tools.clear()
addActions(self.tools, self.actions.beginner)
def setAdvanced(self):
self.tools.clear()
addActions(self.tools, self.actions.advanced)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.itemsToShapes.clear()
self.shapesToItems.clear()
self.labelList.clear()
self.filePath = None
#self.old_Filepath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
self.canvas.itemsToShapes.clear()
self.canvas.shapesToItems.clear()
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
## Callbacks ##
def showTutorialDialog(self):
subprocess.Popen([self.screencastViewer, self.screencast])
def showInfoDialog(self):
msg = u'Name:{0} \nApp Version:{1} \n{2} '.format(__appname__, __version__, sys.version_info)
QMessageBox.information(self, u'Information', msg)
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
# Cancel creation.
print('Cancel creation.')
self.canvas.setEditing(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
def setCreateMode(self):
assert self.advanced()
self.toggleDrawMode(False)
def setEditMode(self):
assert self.advanced()
self.toggleDrawMode(True)
self.labelSelectionChanged()
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('labels')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def editLabel(self):
if not self.canvas.editing():
return
item = self.currentItem()
text = self.labelDialog.popUp(item.text())
if text is not None:
item.setText(text)
item.setBackground(generateColorByText(text))
self.setDirty()
# Tzutalin 20160906 : Add file list and dock to move faster
def fileitemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(ustr(item.text()))
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename)
def diritemDoubleClicked(self, item=None):
currIndex = self.mDirList.index(ustr(item.text()))
if currIndex < len(self.mDirList):
foldername = self.mDirList[currIndex]
if foldername:
self.defaultSaveDir_folder = os.path.join(self.defaultSaveDir, foldername)
self.importDirImages(os.path.join(self.lastOpenDir,foldername))
self.save_label.setText("Save DIR: " + self.defaultSaveDir_folder)
self.fileListWidget.setFocus(True)
# self.fileListWidget.setSelected(0)
###
def diritemChanged(self, item=None):
# QMessageBox.warning(self, u'changed', msg, yes | no)
# self.savebtn_label.setText('Not saved')
# self.savebtn_label.setStyleSheet('color: red')
# if item.text() in self.checkList:
# self.checkList.remove(item.text())
# else:
# insort(self.checkList, item.text())
# self.savebtncnt_label.setText('{0}/{1}'.format(len(self.checkList), self.foldercnt))
###
with self.lmdb.begin(write=True) as txn:
flag = txn.put(item.text().encode('ascii'), "1".encode('ascii'), overwrite=False)
if flag:
self.checknum += 1
else:
# QMessageBox.warning(self, u'Duplicate', "Already checked")
txn.delete(item.text().encode('ascii'))
self.checknum -= 1
print("put: ",flag)
self.savebtncnt_label.setText('{0}/{1}'.format(self.checknum, self.foldercnt))
###
# def saveButtonClicked(self):
# self.savebtn_label.setText('')
# file = QFile(server_path + dataset + '/'+ self.logged_id + '.txt')
# if file.open(QFile.WriteOnly | QFile.Text):
# for check in self.checkList:
# file.write(bytearray(check + '\n', 'utf8'))
# file.close()
# print('saved')
# Add chris
def btnstate(self, item= None):
""" Function to handle difficult examples
Update on each object """
if not self.canvas.editing():
return
item = self.currentItem()
if not item: # If not selected Item, take the first one
item = self.labelList.item(self.labelList.count()-1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass
# React to canvas signals.
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape:
self.shapesToItems[shape].setSelected(True)
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
self.actions.edit.setEnabled(selected)
self.actions.shapeLineColor.setEnabled(selected)
self.actions.shapeFillColor.setEnabled(selected)
def addLabel(self, shape):
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
item.setBackground(generateColorByText(shape.label))
self.itemsToShapes[item] = shape
self.shapesToItems[shape] = item
self.labelList.addItem(item)
self.canvas.itemsToShapes[item] = shape
self.canvas.shapesToItems[shape] = item
for action in self.actions.onShapesPresent:
action.setEnabled(True)
def remLabel(self, shape):
if shape is None:
# print('rm empty label')
return
item = self.shapesToItems[shape]
self.labelList.takeItem(self.labelList.row(item))
del self.shapesToItems[shape]
del self.itemsToShapes[item]
del self.canvas.shapesToItems[shape]
del self.canvas.itemsToShapes[item]
def loadLabels(self, shapes):
s = []
for label, points, line_color, fill_color, difficult in shapes:
shape = Shape(label=label)
for x, y in points:
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.close()
s.append(shape)
if line_color:
shape.line_color = QColor(*line_color)
else:
shape.line_color = generateColorByText(label)
if fill_color:
shape.fill_color = QColor(*fill_color)
else:
shape.fill_color = generateColorByText(label)
self.addLabel(shape)
self.canvas.loadShapes(s)
def saveLabels(self, annotationFilePath):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
return dict(label=s.label,
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
# add chris
difficult = s.difficult)
shapes = [format_shape(shape) for shape in self.canvas.shapes]
# Can add differrent annotation formats here
try:
if self.usingPascalVocFormat is True:
print ('Img: ' + self.filePath + ' -> Its xml: ' + annotationFilePath)
self.labelFile.savePascalVocFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
else:
self.labelFile.save(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
# fix copy and delete
self.shapeSelectionChanged(True)
def labelSelectionChanged(self):
item = self.currentItem()
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
shape = self.itemsToShapes[item]
# Add Chris
self.diffcButton.setChecked(shape.difficult)
def labelItemChanged(self, item):
shape = self.itemsToShapes[item]
label = item.text()
if label != shape.label:
shape.label = item.text()
shape.line_color = generateColorByText(shape.label)
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
# Callback functions:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
"""
if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text():
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.defaultLabelTextLine.text()
# Add Chris
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = text
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color)
self.addLabel(shape)
if self.beginner(): # Switch to edit mode.
self.canvas.setEditing(True)
self.actions.create.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
if text not in self.labelHist:
self.labelHist.append(text)
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
# get the current scrollbar positions
# calculate the percentages ~ coordinates
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
# get the current maximum, to know the difference after zooming
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
# get the cursor position and canvas size
# calculate the desired movement from 0 to 1
# where 0 = move left
# 1 = move right
# up and down analogous
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
# the scaling from 0 to 1 has some padding
# you don't have to hit the very leftmost pixel for a maximum-left movement
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def togglePolygons(self, value):
for item, shape in self.itemsToShapes.items():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = str(filePath)
unicodeFilePath = ustr(filePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
bsucces = True
# Label xml file and show bound box according to its filename
if self.usingPascalVocFormat is True:
if self.defaultSaveDir_folder is not None:
basename = os.path.basename(
os.path.splitext(self.filePath)[0]) + XML_EXT
xmlPath = os.path.join(self.defaultSaveDir_folder, basename)
bsucces = self.loadPascalXMLByFilename(xmlPath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
if os.path.isfile(xmlPath):
bsucces = self.loadPascalXMLByFilename(xmlPath)
if bsucces is False:
self.anno_label.setText("")
self.diffcButton.setChecked(False)
self.old_Filepath = str(self.old_Filepath)
self.old_Filepath = ustr(self.old_Filepath)
# print("old: ",self.old_Filepath)
basename_old = os.path.basename(
os.path.splitext(self.old_Filepath)[0]) + XML_EXT
xmlPath_old = os.path.join(self.defaultSaveDir_folder, basename_old)
bsucces = self.loadPascalXMLByFilename(xmlPath_old, False)
self.diffcButton.setChecked(False)
if bsucces is True:
self.actions.save.setEnabled(True)
else:
self.anno_label.setText(xmlPath)
self.anno_label.setStyleSheet('color: red')
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count()-1))
self.labelList.item(self.labelList.count()-1).setSelected(True)
self.canvas.setFocus(True)
return True
return False
def resizeEvent(self, event):
if self.canvas and not self.image.isNull()\
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
settings = self.settings
# If it loads images from dir, don't load it at the begining
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
settings[SETTING_ADVANCE_MODE] = not self._beginner
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ""
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ""
settings[SETTING_AUTO_SAVE] = self.autoSaving.isChecked()
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings.save()
## User Dialogs ##
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.jpeg', '.jpg', '.png', '.bmp']
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
images.sort(key=lambda x: x.lower())
return images
def scanAllDirs(self, folderPath):
pre_dirs = os.listdir(folderPath)
# for root, dirs, files in os.walk(folderPath):
# for file in files:
# if file.lower().endswith(tuple(extensions)):
# relativePath = os.path.join(root, file)
# path = ustr(os.path.abspath(relativePath))
# images.append(path)
pre_dirs.sort(key=lambda x: x.lower())
return pre_dirs
def changeSavedirDialog(self, _value=False):
if self.defaultSaveDir is not None:
path = ustr(self.defaultSaveDir)
else:
path = '.'
dirpath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path, QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks))
self.save_label.setText("Save DIR: " + dirpath)
if dirpath is not None and len(dirpath) > 1:
self.defaultSaveDir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.defaultSaveDir))
self.statusBar().show()
def openAnnotationDialog(self, _value=False):
if self.filePath is None:
self.statusBar().showMessage('Please select image first')
self.statusBar().show()
return
path = os.path.dirname(ustr(self.filePath))\
if self.filePath else '.'
if self.usingPascalVocFormat:
filters = "Open Annotation XML file (%s)" % ' '.join(['*.xml'])
filename = ustr(QFileDialog.getOpenFileName(self,'%s - Choose a xml file' % __appname__, path, filters))
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadPascalXMLByFilename(filename)
def openDirDialog(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
self.importDirs(targetDirPath)
def importJobs(self, envpath):
envpath = server_path + envpath.split("/")[-1]
self.lmdb=lmdb.open(os.path.join(envpath,self.logged_id))
return json.load(open(os.path.join(envpath,"job_assign.json")))
def importDirs(self, dirpath):
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
# self.dirname = dirpath
# self.filePath = None
job_dict = self.importJobs(dirpath)
# print(job_dict)
###
# job_list = list(chain(job_list))
# job_list = job_dict.values()
# job_list = [k for j in job_list for k in j]
# print(job_list)
job_list = self.verJobList
with self.lmdb.begin() as txn:
cursor = txn.cursor()
for key, value in cursor:
# print(key.decode('ascii'), value.decode('ascii'))
insort(self.checkList, key.decode('ascii'))
self.checknum = len(self.checkList)
self.folderListWidget.clear()
self.mDirList = self.scanAllDirs(dirpath)
# self.openNextImg()
###
for dirPath in self.mDirList:
if dirPath in job_list:
self.foldercnt += 1
item = QListWidgetItem(dirPath)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
if item.text() in self.checkList:
item.setCheckState(Qt.Checked)
else:
item.setCheckState(Qt.Unchecked)
self.folderListWidget.addItem(item)
self.savebtncnt_label.setText('{0}/{1}'.format(len(self.checkList), self.foldercnt))
self.edit_label.setText("Edit DIR: " + dirpath)
def importDirImages(self, dirpath):
if not self.mayContinue() or not dirpath:
return
# self.lastOpenDir = dirpath
self.dirname = dirpath
self.filePath = None
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
self.openNextImg()
self.fileListWidget.setFocus(True)
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
self.edit_label.setText("Edit DIR: " + dirpath)
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
self.labelFile.toggleVerify()
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
if filename:
self.loadFile(filename)
def openNextImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
# print("now ", self.filePath)
self.old_Filepath=self.filePath
filename = None
if self.filePath is None:
filename = self.mImgList[0]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.loadFile(filename)
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def saveFile(self, _value=False):
if self.defaultSaveDir_folder is not None and len(ustr(self.defaultSaveDir_folder)):
if self.filePath:
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0] + XML_EXT
savedPath = os.path.join(ustr(self.defaultSaveDir_folder), savedFileName)
self._saveFile(savedPath)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0] + XML_EXT
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog())
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
return dlg.selectedFiles()[0]
return ''
def _saveFile(self, annotationFilePath):
if annotationFilePath and self.saveLabels(annotationFilePath):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self):
return not (self.dirty and not self.discardChangesDialog())
def discardChangesDialog(self):
yes, no = QMessageBox.Yes, QMessageBox.No
msg = u'You have unsaved changes, proceed anyway?'
return yes == QMessageBox.warning(self, u'Attention', msg, yes | no)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def chooseColor1(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.lineColor = color
Shape.line_color = color
self.canvas.setDrawingColor(color)
self.canvas.update()
self.setDirty()
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def chshapeLineColor(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.canvas.selectedShape.line_color = color
self.canvas.update()
self.setDirty()
def chshapeFillColor(self):
color = self.colorDialog.getColor(self.fillColor, u'Choose fill color',
default=DEFAULT_FILL_COLOR)
if color:
self.canvas.selectedShape.fill_color = color
self.canvas.update()
self.setDirty()
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def loadPascalXMLByFilename(self, xmlPath, current=True):
if self.filePath is None:
return False
if os.path.isfile(xmlPath) is False:
return False
tVocParseReader = PascalVocReader(xmlPath)
shapes = tVocParseReader.getShapes()
self.loadLabels(shapes)
if current:
self.canvas.verified = tVocParseReader.verified
else:
self.canvas.verified = False
return True
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
with open(filename, 'rb') as f:
return f.read()
except:
return default
def get_main_app(argv=[]):
"""
Standard boilerplate Qt application code.
Do everything but app.exec_() -- so that we can test the application in one thread
"""
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("app"))
login = Login()
# Tzutalin 201705+: Accept extra agruments to change predefined class file
# Usage : labelImg.py image predefClassFile
if login.exec_() == QDialog.Accepted:
# win = MainWindow(login.logged_id,argv[1] if len(argv) >= 2 else None,
# argv[2] if len(argv) >= 3 else os.path.join(
# os.path.dirname(sys.argv[0]),
# 'data', 'predefined_classes.txt'))
win = MainWindow(login.logged_id)
# win.logged_id=login.logged_id
win.show()
return app, win
else:
sys.exit()
def main(argv=[]):
'''construct main app and run it'''
app, _win = get_main_app(argv)
return app.exec_()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
py
|
1a56189017ded847ad4b01e086df6ccd4dc9d74e
|
#!/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Usage: python bpacking_avx512_codegen.py > bpacking_avx512_generated.h
def print_unpack_bit_func(bit):
shift = 0
shifts = []
in_index = 0
inls = []
mask = (1 << bit) - 1
bracket = "{"
print(
f"inline static const uint32_t* unpack{bit}_32_avx512(const uint32_t* in, uint32_t* out) {bracket}")
print(" uint32_t mask = 0x%x;" % mask)
print(" __m512i reg_shifts, reg_inls, reg_masks;")
print(" __m512i results;")
print("")
for i in range(32):
if shift + bit == 32:
shifts.append(shift)
inls.append(f"in[{in_index}]")
in_index += 1
shift = 0
elif shift + bit > 32: # cross the boundary
inls.append(
f"in[{in_index}] >> {shift} | in[{in_index + 1}] << {32 - shift}")
in_index += 1
shift = bit - (32 - shift)
shifts.append(0) # zero shift
else:
shifts.append(shift)
inls.append(f"in[{in_index}]")
shift += bit
print(" reg_masks = _mm512_set1_epi32(mask);")
print("")
print(" // shift the first 16 outs")
print(
f" reg_shifts = _mm512_set_epi32({shifts[15]}, {shifts[14]}, {shifts[13]}, {shifts[12]},")
print(
f" {shifts[11]}, {shifts[10]}, {shifts[9]}, {shifts[8]},")
print(
f" {shifts[7]}, {shifts[6]}, {shifts[5]}, {shifts[4]},")
print(
f" {shifts[3]}, {shifts[2]}, {shifts[1]}, {shifts[0]});")
print(f" reg_inls = _mm512_set_epi32({inls[15]}, {inls[14]},")
print(f" {inls[13]}, {inls[12]},")
print(f" {inls[11]}, {inls[10]},")
print(f" {inls[9]}, {inls[8]},")
print(f" {inls[7]}, {inls[6]},")
print(f" {inls[5]}, {inls[4]},")
print(f" {inls[3]}, {inls[2]},")
print(f" {inls[1]}, {inls[0]});")
print(
" results = _mm512_and_epi32(_mm512_srlv_epi32(reg_inls, reg_shifts), reg_masks);")
print(" _mm512_storeu_si512(out, results);")
print(" out += 16;")
print("")
print(" // shift the second 16 outs")
print(
f" reg_shifts = _mm512_set_epi32({shifts[31]}, {shifts[30]}, {shifts[29]}, {shifts[28]},")
print(
f" {shifts[27]}, {shifts[26]}, {shifts[25]}, {shifts[24]},")
print(
f" {shifts[23]}, {shifts[22]}, {shifts[21]}, {shifts[20]},")
print(
f" {shifts[19]}, {shifts[18]}, {shifts[17]}, {shifts[16]});")
print(f" reg_inls = _mm512_set_epi32({inls[31]}, {inls[30]},")
print(f" {inls[29]}, {inls[28]},")
print(f" {inls[27]}, {inls[26]},")
print(f" {inls[25]}, {inls[24]},")
print(f" {inls[23]}, {inls[22]},")
print(f" {inls[21]}, {inls[20]},")
print(f" {inls[19]}, {inls[18]},")
print(f" {inls[17]}, {inls[16]});")
print(
" results = _mm512_and_epi32(_mm512_srlv_epi32(reg_inls, reg_shifts), reg_masks);")
print(" _mm512_storeu_si512(out, results);")
print(" out += 16;")
print("")
print(f" in += {bit};")
print("")
print(" return in;")
print("}")
def print_unpack_bit0_func():
print(
"inline static const uint32_t* unpack0_32_avx512(const uint32_t* in, uint32_t* out) {")
print(" memset(out, 0x0, 32 * sizeof(*out));")
print(" out += 32;")
print("")
print(" return in;")
print("}")
def print_unpack_bit32_func():
print(
"inline static const uint32_t* unpack32_32_avx512(const uint32_t* in, uint32_t* out) {")
print(" memcpy(out, in, 32 * sizeof(*out));")
print(" in += 32;")
print(" out += 32;")
print("")
print(" return in;")
print("}")
def print_copyright():
print(
"""// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.""")
def print_note():
print("//")
print("// Automatically generated file; DO NOT EDIT.")
def main():
print_copyright()
print_note()
print("")
print("#pragma once")
print("")
print("#include <stdint.h>")
print("#include <string.h>")
print("")
print("#ifdef _MSC_VER")
print("#include <intrin.h>")
print("#else")
print("#include <immintrin.h>")
print("#endif")
print("")
print("namespace arrow {")
print("namespace internal {")
print("")
print_unpack_bit0_func()
print("")
for i in range(1, 32):
print_unpack_bit_func(i)
print("")
print_unpack_bit32_func()
print("")
print("} // namespace internal")
print("} // namespace arrow")
if __name__ == '__main__':
main()
|
py
|
1a561920500bf5e69f9c05af4871eb4c5964e33b
|
# -*- coding: utf-8 -*
import serial
import time
ser = serial.Serial("/dev/ttyS0", 115200)
def getTFminiData():
while True:
#time.sleep(0.1)
count = ser.in_waiting
if count > 8:
recv = ser.read(9)
ser.reset_input_buffer()
# type(recv), 'str' in python2(recv[0] = 'Y'), 'bytes' in python3(recv[0] = 89)
# type(recv[0]), 'str' in python2, 'int' in python3
if recv[0] == 0x59 and recv[1] == 0x59: #python3
distance = recv[2] + recv[3] * 256
strength = recv[4] + recv[5] * 256
print('(', distance, ',', strength, ')')
ser.reset_input_buffer()
if recv[0] == 'Y' and recv[1] == 'Y': #python2
lowD = int(recv[2].encode('hex'), 16)
highD = int(recv[3].encode('hex'), 16)
lowS = int(recv[4].encode('hex'), 16)
highS = int(recv[5].encode('hex'), 16)
distance = lowD + highD * 256
strength = lowS + highS * 256
print(distance, strength)
# you can also distinguish python2 and python3:
#import sys
#sys.version[0] == '2' #True, python2
#sys.version[0] == '3' #True, python3
if __name__ == '__main__':
try:
if ser.is_open == False:
ser.open()
getTFminiData()
except KeyboardInterrupt: # Ctrl+C
if ser != None:
ser.close()
|
py
|
1a56196f7fb385e1575ab1fe3271c835b71b81b9
|
from pydantic import BaseModel, validator, Field
from typing import List, Dict
from datetime import datetime
class Agents(BaseModel):
name: str
integration: str
id: str
class CampaignTasksOut(BaseModel):
name: str
scheduled_date: str
start_date: str = None
end_date: str = None
agents: List[Agents]
dependencies: List[str]
state: str
@validator('scheduled_date', pre=True, always=True)
def _get_scheduled_date(cls, v):
return str(datetime.fromtimestamp(v['$date']/1000))
@validator('start_date', pre=True, always=True)
def _get_start_date(cls, v):
return None if not v else str(datetime.fromtimestamp(v['$date']/1000))
@validator('end_date', pre=True, always=True)
def _get_end_date(cls, v):
return None if not v else str(datetime.fromtimestamp(v['$date']/1000))
class CampaignsOut(BaseModel):
id: str = Field(None, alias='_id')
group_id: str
name: str
saved_date: str
tasks: List[CampaignTasksOut]
@validator('id', pre=True, always=True)
def _get_id(cls, v):
return v['$oid']
@validator('saved_date', pre=True, always=True)
def _get_saved_date(cls, v):
return str(datetime.fromtimestamp(v['$date']/1000))
# Data input
class CommandIn(BaseModel):
reference: str = None
reference_name: str = None
technique_name: str = None
kill_chain_phase: str = None
technique_id: str = None
category: str
integration: str
module: str
input: Dict
sleep: str = 1
class DependencyIn(BaseModel):
source: str
destination: str
class CampaignTaskIn(BaseModel):
name: str
sleep: int
scheduled_date: datetime = None
commands: List[CommandIn]
agents: List[Agents]
@validator('scheduled_date', pre=True, always=True)
def _set_date(cls, v):
return v or datetime.now()
class CampaignIn(BaseModel):
name: str
tasks: List[CampaignTaskIn]
dependencies: List[DependencyIn]
# Filtered values for denormalization
class CampaignTaskDenomIn(BaseModel):
name: str
scheduled_date: datetime = None
agents: List[Agents]
@validator('scheduled_date', pre=True, always=True)
def _set_date(cls, v):
return v or datetime.now()
class CampaignDenomIn(BaseModel):
name: str
group_id: str
tasks: List[CampaignTaskDenomIn]
dependencies: List[DependencyIn]
class CreateCampaignTasksOut(BaseModel):
task: str
class ScheduledTasksOut(BaseModel):
agent: str
queue: List[str]
class CreateCampaignOut(BaseModel):
campaign: str
group_id: str
scheduled_tasks: List[ScheduledTasksOut]
|
py
|
1a561a712b4911c8aeaee1745420e300409062e2
|
import os
import datetime
import datetime
import os
import sys
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info("Test")
logging.debug("moretest")
logging.error("uhoh")
|
py
|
1a561c0dc8f999781317132f612a97b71307e360
|
__version__ = "0.0.4"
from .camloop import camloop
|
py
|
1a561c2b94171426dde122a212544a14cf71cd5c
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPolicyDefinitionResult',
'AwaitableGetPolicyDefinitionResult',
'get_policy_definition',
]
@pulumi.output_type
class GetPolicyDefinitionResult:
"""
The policy definition.
"""
def __init__(__self__, description=None, display_name=None, id=None, metadata=None, mode=None, name=None, parameters=None, policy_rule=None, policy_type=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
pulumi.set(__self__, "metadata", metadata)
if mode and not isinstance(mode, str):
raise TypeError("Expected argument 'mode' to be a str")
pulumi.set(__self__, "mode", mode)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parameters and not isinstance(parameters, dict):
raise TypeError("Expected argument 'parameters' to be a dict")
pulumi.set(__self__, "parameters", parameters)
if policy_rule and not isinstance(policy_rule, dict):
raise TypeError("Expected argument 'policy_rule' to be a dict")
pulumi.set(__self__, "policy_rule", policy_rule)
if policy_type and not isinstance(policy_type, str):
raise TypeError("Expected argument 'policy_type' to be a str")
pulumi.set(__self__, "policy_type", policy_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The policy definition description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The display name of the policy definition.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the policy definition.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The policy definition metadata. Metadata is an open ended object and is typically a collection of key value pairs.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def mode(self) -> Optional[str]:
"""
The policy definition mode. Some examples are All, Indexed, Microsoft.KeyVault.Data.
"""
return pulumi.get(self, "mode")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the policy definition.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> Optional[Mapping[str, 'outputs.ParameterDefinitionsValueResponse']]:
"""
The parameter definitions for parameters used in the policy rule. The keys are the parameter names.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyRule")
def policy_rule(self) -> Optional[Any]:
"""
The policy rule.
"""
return pulumi.get(self, "policy_rule")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[str]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, Custom, and Static.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource (Microsoft.Authorization/policyDefinitions).
"""
return pulumi.get(self, "type")
class AwaitableGetPolicyDefinitionResult(GetPolicyDefinitionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPolicyDefinitionResult(
description=self.description,
display_name=self.display_name,
id=self.id,
metadata=self.metadata,
mode=self.mode,
name=self.name,
parameters=self.parameters,
policy_rule=self.policy_rule,
policy_type=self.policy_type,
type=self.type)
def get_policy_definition(policy_definition_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPolicyDefinitionResult:
"""
The policy definition.
:param str policy_definition_name: The name of the policy definition to get.
"""
__args__ = dict()
__args__['policyDefinitionName'] = policy_definition_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:authorization/v20200901:getPolicyDefinition', __args__, opts=opts, typ=GetPolicyDefinitionResult).value
return AwaitableGetPolicyDefinitionResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
metadata=__ret__.metadata,
mode=__ret__.mode,
name=__ret__.name,
parameters=__ret__.parameters,
policy_rule=__ret__.policy_rule,
policy_type=__ret__.policy_type,
type=__ret__.type)
|
py
|
1a561cab0049b96932a574f2596b62caa2687831
|
import os
from flask import Config as FlaskConfig
class Config(FlaskConfig):
pass
def get_config():
retval = Config(os.path.abspath(__file__))
retval.from_object(os.environ["DRAINBOW_MCC_CONFIG"])
return retval
|
py
|
1a561dca3c2cc4167e86ab5ce59d56d7363d7516
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import time
import torch
import torch.utils.data
import torch.optim as optim
import numpy as np
import math
import random
import os
import datetime
from optimization.training import train, evaluate
from utils.load_data import load_dataset
parser = argparse.ArgumentParser(description='PyTorch Discrete Normalizing flows')
parser.add_argument('-d', '--dataset', type=str, default='cifar10',
choices=['cifar10', 'imagenet32', 'imagenet64'],
metavar='DATASET',
help='Dataset choice.')
parser.add_argument('-nc', '--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--manual_seed', type=int, help='manual seed, if not given resorts to random seed.')
parser.add_argument('-li', '--log_interval', type=int, default=20, metavar='LOG_INTERVAL',
help='how many batches to wait before logging training status')
parser.add_argument('--evaluate_interval_epochs', type=int, default=25,
help='Evaluate per how many epochs')
parser.add_argument('-od', '--out_dir', type=str, default='snapshots', metavar='OUT_DIR',
help='output directory for model snapshots etc.')
fp = parser.add_mutually_exclusive_group(required=False)
fp.add_argument('-te', '--testing', action='store_true', dest='testing',
help='evaluate on test set after training')
fp.add_argument('-va', '--validation', action='store_false', dest='testing',
help='only evaluate on validation set')
parser.set_defaults(testing=True)
# optimization settings
parser.add_argument('-e', '--epochs', type=int, default=2000, metavar='EPOCHS',
help='number of epochs to train (default: 2000)')
parser.add_argument('-es', '--early_stopping_epochs', type=int, default=300, metavar='EARLY_STOPPING',
help='number of early stopping epochs')
parser.add_argument('-bs', '--batch_size', type=int, default=96, metavar='BATCH_SIZE',
help='input batch size for training (default: 100)')
parser.add_argument('-lr', '--learning_rate', type=float, default=0.001, metavar='LEARNING_RATE',
help='learning rate')
parser.add_argument('--warmup', type=int, default=10,
help='number of warmup epochs')
parser.add_argument('--data_augmentation_level', type=int, default=2,
help='data augmentation level')
parser.add_argument('--variable_type', type=str, default='discrete',
help='variable type of data distribution: discrete/continuous',
choices=['discrete', 'continuous'])
parser.add_argument('--distribution_type', type=str, default='logistic',
choices=['logistic', 'normal', 'steplogistic'],
help='distribution type: logistic/normal')
parser.add_argument('--n_flows', type=int, default=8,
help='number of flows per level')
parser.add_argument('--n_levels', type=int, default=3,
help='number of levels')
parser.add_argument('--n_bits', type=int, default=8,
help='')
# ---------------- SETTINGS CONCERNING NETWORKS -------------
parser.add_argument('--densenet_depth', type=int, default=8,
help='Depth of densenets')
parser.add_argument('--n_channels', type=int, default=512,
help='number of channels in coupling and splitprior')
# ---------------- ----------------------------- -------------
# ---------------- SETTINGS CONCERNING COUPLING LAYERS -------------
parser.add_argument('--coupling_type', type=str, default='shallow',
choices=['shallow', 'resnet', 'densenet'],
help='Type of coupling layer')
parser.add_argument('--splitfactor', default=0, type=int,
help='Split factor for coupling layers.')
parser.add_argument('--split_quarter', dest='split_quarter', action='store_true',
help='Split coupling layer on quarter')
parser.add_argument('--no_split_quarter', dest='split_quarter', action='store_false')
parser.set_defaults(split_quarter=True)
# ---------------- ----------------------------------- -------------
# ---------------- SETTINGS CONCERNING SPLITPRIORS -------------
parser.add_argument('--splitprior_type', type=str, default='shallow',
choices=['none', 'shallow', 'resnet', 'densenet'],
help='Type of splitprior. Use \'none\' for no splitprior')
# ---------------- ------------------------------- -------------
# ---------------- SETTINGS CONCERNING PRIORS -------------
parser.add_argument('--n_mixtures', type=int, default=1,
help='number of mixtures')
# ---------------- ------------------------------- -------------
parser.add_argument('--hard_round', dest='hard_round', action='store_true',
help='Rounding of translation in discrete models. Weird '
'probabilistic implications, only for experimental phase')
parser.add_argument('--no_hard_round', dest='hard_round', action='store_false')
parser.set_defaults(hard_round=True)
parser.add_argument('--round_approx', type=str, default='smooth',
choices=['smooth', 'stochastic'])
parser.add_argument('--lr_decay', default=0.999, type=float,
help='Learning rate')
parser.add_argument('--temperature', default=1.0, type=float,
help='Temperature used for BackRound. It is used in '
'the the SmoothRound module. '
'(default=1.0')
# gpu/cpu
parser.add_argument('--gpu_num', type=int, default=0, metavar='GPU',
help='choose GPU to run on.')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.manual_seed is None:
args.manual_seed = random.randint(1, 100000)
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
np.random.seed(args.manual_seed)
kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
def run(args, kwargs):
print('\nMODEL SETTINGS: \n', args, '\n')
print("Random Seed: ", args.manual_seed)
if 'imagenet' in args.dataset and args.evaluate_interval_epochs > 5:
args.evaluate_interval_epochs = 5
# ==================================================================================================================
# SNAPSHOTS
# ==================================================================================================================
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
snapshots_path = os.path.join(args.out_dir, args.variable_type + '_' + args.distribution_type + args.dataset)
snap_dir = snapshots_path
snap_dir += '_' + 'flows_' + str(args.n_flows) + '_levels_' + str(args.n_levels)
snap_dir = snap_dir + '__' + args.model_signature + '/'
args.snap_dir = snap_dir
if not os.path.exists(snap_dir):
os.makedirs(snap_dir)
with open(snap_dir + 'log.txt', 'a') as ff:
print('\nMODEL SETTINGS: \n', args, '\n', file=ff)
# SAVING
torch.save(args, snap_dir + '.config')
# ==================================================================================================================
# LOAD DATA
# ==================================================================================================================
train_loader, val_loader, test_loader, args = load_dataset(args, **kwargs)
# ==================================================================================================================
# SELECT MODEL
# ==================================================================================================================
# flow parameters and architecture choice are passed on to model through args
print(args.input_size)
import models.Model as Model
model = Model.Model(args)
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.set_temperature(args.temperature)
model.enable_hard_round(args.hard_round)
model_sample = model
# ====================================
# INIT
# ====================================
# data dependend initialization on CPU
for batch_idx, (data, _) in enumerate(train_loader):
model(data)
break
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = torch.nn.DataParallel(model, dim=0)
model.to(args.device)
def lr_lambda(epoch):
return min(1., (epoch+1) / args.warmup) * np.power(args.lr_decay, epoch)
optimizer = optim.Adamax(model.parameters(), lr=args.learning_rate, eps=1.e-7)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1)
# ==================================================================================================================
# TRAINING
# ==================================================================================================================
train_bpd = []
val_bpd = []
# for early stopping
best_val_bpd = np.inf
best_train_bpd = np.inf
epoch = 0
train_times = []
model.eval()
model.train()
for epoch in range(1, args.epochs + 1):
t_start = time.time()
scheduler.step()
tr_loss, tr_bpd = train(epoch, train_loader, model, optimizer, args)
train_bpd.append(tr_bpd)
train_times.append(time.time()-t_start)
print('One training epoch took %.2f seconds' % (time.time()-t_start))
if epoch < 25 or epoch % args.evaluate_interval_epochs == 0:
v_loss, v_bpd = evaluate(
train_loader, val_loader, model, model_sample, args,
epoch=epoch, file=snap_dir + 'log.txt')
val_bpd.append(v_bpd)
# Model save based on TRAIN performance (is heavily correlated with validation performance.)
if np.mean(tr_bpd) < best_train_bpd:
best_train_bpd = np.mean(tr_bpd)
best_val_bpd = v_bpd
torch.save(model, snap_dir + 'a.model')
torch.save(optimizer, snap_dir + 'a.optimizer')
print('->model saved<-')
print('(BEST: train bpd {:.4f}, test bpd {:.4f})\n'.format(
best_train_bpd, best_val_bpd))
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
train_bpd = np.hstack(train_bpd)
val_bpd = np.array(val_bpd)
# training time per epoch
train_times = np.array(train_times)
mean_train_time = np.mean(train_times)
std_train_time = np.std(train_times, ddof=1)
print('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time))
# ==================================================================================================================
# EVALUATION
# ==================================================================================================================
final_model = torch.load(snap_dir + 'a.model')
test_loss, test_bpd = evaluate(
train_loader, test_loader, final_model, final_model, args,
epoch=epoch, file=snap_dir + 'test_log.txt')
print('Test loss / bpd: %.2f / %.2f' % (test_loss, test_bpd))
if __name__ == "__main__":
run(args, kwargs)
|
py
|
1a561ee1c164c26c1613f2d6c97f44e16550bbad
|
from fastapi import Depends, FastAPI
from fastapi.security import OAuth2PasswordBearer
app = FastAPI()
oauth2_schema = OAuth2PasswordBearer(tokenUrl="token")
@app.get("/items/")
async def read_items(token: str = Depends(oauth2_schema)):
return {"token": token}
|
py
|
1a561f6fe82f4b673885cfd21bb42d99a8c44d29
|
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
import sendgrid
import os
from sendgrid.helpers.mail import *
def notify_by_email(user, email):
sg = sendgrid.SendGridAPIClient(apikey=os.environ['SENDGRID_API_KEY'])
from_email = Email('[email protected]')
to_email = Email(email)
subject = 'You have not solved any problem on Leetcode for a day!'
content = Content('text/plain', open('email.txt').read().format(user=user))
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
print("Sent email to %s\'s email %s. Status code: %d." % (user, email, response.status_code))
return response
|
py
|
1a561f87ceada80dd75b90a6a6ed6239f51af937
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.io_cert_manager_v1_certificate_status_conditions import IoCertManagerV1CertificateStatusConditions # noqa: E501
from kubernetes.client.rest import ApiException
class TestIoCertManagerV1CertificateStatusConditions(unittest.TestCase):
"""IoCertManagerV1CertificateStatusConditions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IoCertManagerV1CertificateStatusConditions
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.io_cert_manager_v1_certificate_status_conditions.IoCertManagerV1CertificateStatusConditions() # noqa: E501
if include_optional :
return IoCertManagerV1CertificateStatusConditions(
last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
observed_generation = 56,
reason = '0',
status = 'True',
type = '0'
)
else :
return IoCertManagerV1CertificateStatusConditions(
status = 'True',
type = '0',
)
def testIoCertManagerV1CertificateStatusConditions(self):
"""Test IoCertManagerV1CertificateStatusConditions"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
py
|
1a561fe1f0c14832461189b0a173850a4010a3a7
|
from ..context import Context
from .base import BaseTag, tag_registry
class Compose(BaseTag):
'''
arguments: |
`value`: The value to apply tags on
`tags`: A list of tag names to apply, latest first
example: |
`!Base64,Var foo`
description: |
Used internally to implement tag composition.
Usually not used in the spelt-out form.
See _Tag composition_ below.
'''
value_types = (dict,)
def enrich(self, context: Context):
value = self.data.get('value')
for tag_name in reversed(self.data['tags']):
tag_class = tag_registry[tag_name]
value = tag_class(value)
return context.enrich(value)
|
py
|
1a562056fc68ca3997363f2bf147f287d140c4b0
|
import numpy
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
import chainer.functions
from chainer.graph_optimizations import static_code
from chainer.utils import type_check
class LinearFunction(function_node.FunctionNode):
_config_use_ideep = None
_supports_static_optimizations = True
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check.argname((x_type, w_type), ('x', 'W'))
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 2,
w_type.ndim == 2,
x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.argname((b_type,), ('b',))
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
@static_code
def static_linear_no_bias(self, xp, optimized, inputs, outputs):
x, W = inputs
y = outputs[0]
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (isinstance(x, numpy.ndarray) and
not (x.flags.c_contiguous or x.flags.f_contiguous) and
1 in x.shape):
x = numpy.ascontiguousarray(x)
if optimized:
# Note: We can only call this function when both x and W
# have the same dtype. Otherwise, the output type (for y)
# may not be as expected (i.e., not the same dtype as x).
xp.dot(x, W.T, out=y)
else:
y[:] = x.dot(W.T).astype(x.dtype, copy=False)
@static_code
def static_add_bias(self, inputs, outputs):
bias = inputs[0]
y = outputs[0]
y += bias
def forward(self, inputs):
self._config_use_ideep = chainer.config.use_ideep
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
# iDeep implementation
return self._forward_ideep(inputs)
# Generic implementation
if len(inputs) == 3:
x, W, b = inputs
else:
(x, W), b = inputs, None
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (isinstance(x, numpy.ndarray) and
not (x.flags.c_contiguous or x.flags.f_contiguous) and
1 in x.shape):
x = numpy.ascontiguousarray(x)
# In order to be compatible with the "static graph" feature, it is
# required that all output arrays of this forward
# function be allocated explicitly:
xp = cuda.get_array_module(x)
y = xp.empty((x.shape[0], W.shape[0])).astype(x.dtype)
# This is required because all of the "static_*()" functions
# use the convention that any output arrays are supplied
# as input arguments to the function. That is because it is
# not allowed for a "static_*()" function to return anything
# other than `None`. The reason is to prevent dynamic allocation
# of output arrays during execution of the static schedule
# because it would break the model.
self.static_linear_no_bias(xp, x.dtype == W.dtype, inputs=[x, W],
outputs=[y])
if len(inputs) == 3:
self.static_add_bias(inputs=[b], outputs=[y])
self.retain_inputs((0, 1)) # b is not retained
return y,
def _forward_ideep(self, inputs):
if len(inputs) == 3:
x, W, b = inputs
else:
(x, W), b = inputs, None
y = intel64.ideep.linear.Forward(
intel64.ideep.array(x),
intel64.ideep.array(W),
intel64.ideep.array(b) if b is not None else None)
self.retain_inputs((0, 1))
return y,
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
ret = []
with chainer.using_config('use_ideep', self._config_use_ideep):
if 0 in indexes:
gx, = LinearGradData().apply((W, gy))
ret.append(chainer.functions.cast(gx, x.dtype))
if 1 in indexes:
gW, = LinearGradWeight(W.dtype).apply((x, gy))
ret.append(chainer.functions.cast(gW, W.dtype))
if 2 in indexes:
gb = chainer.functions.sum(gy, axis=0)
ret.append(gb)
return ret
class LinearGradData(function_node.FunctionNode):
_config_use_ideep = None
def forward(self, inputs):
self._config_use_ideep = chainer.config.use_ideep
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
# iDeep implementation
return self._forward_ideep(inputs)
# Generic implementation
self.retain_inputs((0, 1))
W, gy = inputs
if (isinstance(gy, numpy.ndarray) and
not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
gx = gy.dot(W).astype(gy.dtype, copy=False)
return gx,
def _forward_ideep(self, inputs):
self.retain_inputs((0, 1))
W, gy = inputs
gx = intel64.ideep.linear.BackwardData(
intel64.ideep.array(W),
intel64.ideep.array(gy))
return gx,
def backward(self, indexes, grad_outputs):
W, gy = self.get_retained_inputs()
ggx, = grad_outputs
ret = []
with chainer.using_config('use_ideep', self._config_use_ideep):
if 0 in indexes:
gw, = LinearGradWeight(W.dtype).apply((ggx, gy))
ret.append(chainer.functions.cast(gw, W.dtype))
if 1 in indexes:
ggy = linear(ggx, W)
ret.append(chainer.functions.cast(ggy, gy.dtype))
return ret
class LinearGradWeight(function_node.FunctionNode):
_config_use_ideep = None
def __init__(self, w_dtype):
self._w_dtype = w_dtype
def forward(self, inputs):
self._config_use_ideep = chainer.config.use_ideep
if (intel64.should_use_ideep('>=auto')
and self._w_dtype == numpy.float32
and intel64.inputs_all_ready(inputs)):
# iDeep implementation
return self._forward_ideep(inputs)
# Generic implementation
self.retain_inputs((0, 1))
x, gy = inputs
if (isinstance(gy, numpy.ndarray) and
not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
gW = gy.T.dot(x).astype(self._w_dtype, copy=False)
return gW,
def _forward_ideep(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gW = intel64.ideep.linear.BackwardWeights(
intel64.ideep.array(x),
intel64.ideep.array(gy))
return gW,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ggW, = grad_outputs
ret = []
with chainer.using_config('use_ideep', self._config_use_ideep):
if 0 in indexes:
gx, = LinearGradData().apply((ggW, gy))
ret.append(chainer.functions.cast(gx, x.dtype))
if 1 in indexes:
ggy = linear(x, ggW)
ret.append(chainer.functions.cast(ggy, gy.dtype))
return ret
def linear(x, W, b=None, n_batch_axes=1):
"""Linear function, or affine transformation.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
.. math:: Y = xW^\\top + b.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which is a :math:`(s_1, s_2, \
..., s_n)`-shaped float array. Its first ``n_batch_axes``
dimensions are handled as *minibatch dimensions*. The
other dimensions are handled as concatenated one dimension whose
size must be :math:`(s_{\\rm n\\_batch\\_axes} * ... * s_n = N)`.
W (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Weight variable of shape :math:`(M, N)`,
where :math:`(N = s_{\\rm n\\_batch\\_axes} * ... * s_n)`.
b (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Bias variable (optional) of shape
:math:`(M,)`.
n_batch_axes (int): The number of batch axes. The default is 1. The
input variable is reshaped into
(:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional tensor.
This should be greater than 0.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(s_1, ..., s_{\\rm n\\_batch\\_axes}, M)`.
.. seealso:: :class:`~chainer.links.Linear`
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (3, 4)).astype(np.float32)
>>> W = np.random.uniform(0, 1, (5, 4)).astype(np.float32)
>>> b = np.random.uniform(0, 1, (5,)).astype(np.float32)
>>> y = F.linear(x, W, b)
>>> y.shape
(3, 5)
"""
if n_batch_axes <= 0:
raise ValueError('n_batch_axes should be greater than 0.')
if n_batch_axes > 1:
batch_shape = x.shape[:n_batch_axes]
batch_size = numpy.prod(batch_shape)
x = x.reshape(batch_size, -1)
elif x.ndim > 2:
x = x.reshape(x.shape[0], -1)
if b is None:
args = x, W
else:
args = x, W, b
y, = LinearFunction().apply(args)
if n_batch_axes > 1:
y = y.reshape(batch_shape + (-1,))
return y
|
py
|
1a5620a10ac60c963bad355eda7ef06fa1387911
|
"""Stud.IP file synchronization tool.
A command line tool that keeps track of new files on Stud.IP and downloads them to your computer.
"""
__license__ = "Unlicense"
__version__ = "2.0.0"
__author__ = __maintainer__ = "lenke182"
def _get_config_path():
import os
prefix = os.environ.get("XDG_CONFIG_HOME") or "~/.config"
path = os.path.join(prefix, "studip-sync/")
return os.path.expanduser(path)
def get_config_file():
import os
from studip_sync.arg_parser import ARGS
from studip_sync.constants import CONFIG_FILENAME
if ARGS.config:
return ARGS.config
else:
return os.path.join(CONFIG_PATH, CONFIG_FILENAME)
CONFIG_PATH = _get_config_path()
|
py
|
1a5620b6e72721856b90cc4946ec935a75b3dd73
|
from idm.objects import dp, Event
from idm.api_utils import get_msg_id
@dp.event_register('banGetReason')
def ban_get_reason(event: Event) -> str:
reply = {}
if event.obj['local_id'] != 0:
reply['reply_to'] = get_msg_id(
event.api, event.chat.peer_id, event.obj['local_id']
)
event.api.msg_op(1, event.chat.peer_id, event.obj['message'], **reply)
return "ok"
|
py
|
1a56211cd167c90d7cb96a3f1202329db1323ecf
|
from django.apps import AppConfig
class AwwardConfig(AppConfig):
name = 'award'
|
py
|
1a562190224e80db10b9b87ed57c768dcaee88f7
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from jax.abstract_arrays import ShapedArray
from jax.core import Primitive
from jax.interpreters import xla
from ..interpreters import ad
from ..interpreters import batching
def fft(x, fft_type, fft_lengths=None):
if fft_lengths is None:
fft_lengths = x.shape
elif len(fft_lengths) == 0:
# XLA FFT doesn't support 0-rank.
return x
else:
fft_lengths = tuple(fft_lengths)
return fft_p.bind(x, fft_type=fft_type, fft_lengths=fft_lengths)
def fft_impl(x, fft_type, fft_lengths):
return xla.apply_primitive(fft_p, x, fft_type=fft_type, fft_lengths=fft_lengths)
def fft_abstract_eval(x, fft_type, fft_lengths):
return ShapedArray(x.shape, x.dtype)
def fft_translation_rule(c, x, fft_type, fft_lengths):
return c.Fft(x, fft_type, fft_lengths)
def fft_transpose_rule(t, fft_type, fft_lengths):
return fft(t, fft_type, fft_lengths),
def fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths):
x, = batched_args
bd, = batch_dims
x = batching.bdim_at_front(x, bd)
return fft(x, fft_type, fft_lengths), 0
fft_p = Primitive('fft')
fft_p.def_impl(fft_impl)
fft_p.def_abstract_eval(fft_abstract_eval)
xla.translations[fft_p] = fft_translation_rule
ad.deflinear(fft_p, fft_transpose_rule)
batching.primitive_batchers[fft_p] = fft_batching_rule
|
py
|
1a5621cd1e8fec022451a99ae8e6730e82878ef0
|
"""
Gaussian Kernel Expansion Diagram
---------------------------------
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
plt.figure(figsize=(5, 3.75), facecolor='w')
ax = plt.axes([0, 0, 1, 1], frameon=False, xticks=[], yticks=[])
ax.add_patch(plt.Rectangle((-0.5, -0.25), 0.8, 0.4,
fc='none', ec='k', lw=2))
ax.add_patch(plt.Rectangle((-1.75, 0.1), 0.8, 0.4,
fc='none', ec='k', lw=2, linestyle='dashed'))
ax.add_patch(plt.Rectangle((0.8, -0.55), 0.8, 0.4,
fc='none', ec='k', lw=2, linestyle='dashed'))
ax.add_patch(plt.Rectangle((-1.3, -0.95), 0.8, 0.4,
fc='none', ec='k', lw=2, linestyle='dashed'))
red_pts = np.array([[-0.163, 0.093],
[-0.123, -0.22],
[0.194, 0.035],
[0.146, -0.178],
[-0.387, -0.143]])
blue_pts = np.array([[-1.51, 0.17],
[-1.17, 0.36],
[-1.23, -0.68],
[-0.80, -0.83],
[1.28, -0.45],
[1.41, -0.26]])
x0 = -0.5 + 0.4
y0 = -0.25 + 0.2
ax.scatter(red_pts[:, 0], red_pts[:, 1], c='r')
ax.scatter(blue_pts[:, 0], blue_pts[:, 1], c='b')
ax.scatter([x0], [y0], c='gray')
for pt in blue_pts:
ax.annotate("", pt, (x0, y0), arrowprops=dict(arrowstyle='->',
linestyle='dashed'))
for i, pt in enumerate(red_pts):
ax.annotate("", pt, (x0, y0), arrowprops=dict(arrowstyle='<-'))
ax.text(pt[0] + 0.03, pt[1] + 0.03, '$r_{j%i}$' % (i + 1),
bbox=dict(boxstyle='round', ec='k', fc='w', alpha=0.7))
ax.annotate("R.c", (x0, y0), (0.2, 0.2),
arrowprops=dict(arrowstyle='-', color='gray'),
bbox=dict(boxstyle='round', ec='k', fc='w'))
ax.set_xlim(-1.9, 1.9)
ax.set_ylim(-1.2, 0.8)
plt.show()
|
py
|
1a562265382b20be8ac3c615bf6f244c1acead48
|
"""
Lower level of visualization framework which does three main things:
- associate visualizations with objects
- create urls to visualizations based on some target object(s)
- unpack a query string into the desired objects needed for rendering
"""
import os
import weakref
from galaxy.web import url_for
import galaxy.exceptions
from galaxy.web.base import pluginframework
from galaxy.visualization.plugins import config_parser
from galaxy.visualization.plugins import plugin as vis_plugins
from galaxy.visualization.plugins import utils as vis_utils
import logging
log = logging.getLogger(__name__)
# -------------------------------------------------------------------
class VisualizationsRegistry(pluginframework.PageServingPluginManager):
"""
Main responsibilities are:
- discovering visualization plugins in the filesystem
- testing if an object has a visualization that can be applied to it
- generating a link to controllers.visualization.render with
the appropriate params
- validating and parsing params into resources (based on a context)
used in the visualization template
"""
NAMED_ROUTE = 'visualization_plugin'
DEFAULT_BASE_URL = 'visualizations'
# these should be handled somewhat differently - and be passed onto their resp. methods in ctrl.visualization
# TODO: change/remove if/when they can be updated to use this system
#: any built in visualizations that have their own render method in ctrls/visualization
BUILT_IN_VISUALIZATIONS = [
'trackster',
'circster',
'sweepster',
'phyloviz'
]
def __str__(self):
return self.__class__.__name__
def __init__(self, app, skip_bad_plugins=True, **kwargs):
self.app = weakref.ref(app)
self.config_parser = config_parser.VisualizationsConfigParser()
super(VisualizationsRegistry, self).__init__(app, skip_bad_plugins=skip_bad_plugins, **kwargs)
def is_plugin(self, plugin_path):
"""
Determines whether the given filesystem path contains a plugin.
In this base class, all sub-directories are considered plugins.
:type plugin_path: string
:param plugin_path: relative or absolute filesystem path to the
potential plugin
:rtype: bool
:returns: True if the path contains a plugin
"""
# plugin_path must be a directory, have a config dir, and a config file matching the plugin dir name
if not os.path.isdir(plugin_path):
# super won't work here - different criteria
return False
if 'config' not in os.listdir(plugin_path):
return False
expected_config_filename = '%s.xml' % (os.path.split(plugin_path)[1])
if not os.path.isfile(os.path.join(plugin_path, 'config', expected_config_filename)):
return False
return True
def load_plugin(self, plugin_path):
"""
Create the visualization plugin object, parse its configuration file,
and return it.
:type plugin_path: string
:param plugin_path: relative or absolute filesystem path to the plugin
:rtype: ``VisualizationPlugin``
:returns: the loaded plugin
"""
plugin_name = os.path.split(plugin_path)[1]
# TODO: this is the standard/older way to config
config_file = os.path.join(plugin_path, 'config', (plugin_name + '.xml'))
config = self.config_parser.parse_file(config_file)
# config file is required, otherwise skip this visualization
if not config:
return None
plugin = self._build_plugin(plugin_name, plugin_path, config)
return plugin
def _build_plugin(self, plugin_name, plugin_path, config):
# TODO: as builder not factory
# default class
plugin_class = vis_plugins.VisualizationPlugin
# jupyter, etc
if config['plugin_type'] == 'interactive_environment':
plugin_class = vis_plugins.InteractiveEnvironmentPlugin
# js only
elif config['entry_point']['type'] == 'script':
plugin_class = vis_plugins.ScriptVisualizationPlugin
# from a static file (html, etc)
elif config['entry_point']['type'] == 'html':
plugin_class = vis_plugins.StaticFileVisualizationPlugin
plugin = plugin_class(self.app(), plugin_path, plugin_name, config, context=dict(
base_url=self.base_url,
template_cache_dir=self.template_cache_dir,
additional_template_paths=self.additional_template_paths
))
return plugin
def get_plugin(self, key):
"""
Wrap to throw error if plugin not in registry.
"""
if key not in self.plugins:
raise galaxy.exceptions.ObjectNotFound('Unknown or invalid visualization: ' + key)
return self.plugins[key]
# -- building links to visualizations from objects --
def get_visualizations(self, trans, target_object):
"""
Get the names of visualizations usable on the `target_object` and
the urls to call in order to render the visualizations.
"""
# TODO:?? a list of objects? YAGNI?
applicable_visualizations = []
for vis_name in self.plugins:
url_data = self.get_visualization(trans, vis_name, target_object)
if url_data:
applicable_visualizations.append(url_data)
return applicable_visualizations
def get_visualization(self, trans, visualization_name, target_object):
"""
Return data to build a url to the visualization with the given
`visualization_name` if it's applicable to `target_object` or
`None` if it's not.
"""
# log.debug( 'VisReg.get_visualization: %s, %s', visualization_name, target_object )
visualization = self.plugins.get(visualization_name, None)
if not visualization:
return None
data_sources = visualization.config['data_sources']
for data_source in data_sources:
# log.debug( 'data_source: %s', data_source )
# currently a model class is required
model_class = data_source['model_class']
# log.debug( '\t model_class: %s', model_class )
if not isinstance(target_object, model_class):
continue
# log.debug( '\t passed model_class' )
# TODO: not true: must have test currently
tests = data_source['tests']
if tests and not self.is_object_applicable(trans, target_object, tests):
continue
# log.debug( '\t passed tests' )
param_data = data_source['to_params']
url = self.get_visualization_url(trans, target_object, visualization, param_data)
display_name = visualization.config.get('name', None)
render_target = visualization.config.get('render_target', 'galaxy_main')
embeddable = visualization.config.get('embeddable', False)
# remap some of these vars for direct use in ui.js, PopupMenu (e.g. text->html)
return {
'href' : url,
'html' : display_name,
'target' : render_target,
'embeddable': embeddable
}
return None
def is_object_applicable(self, trans, target_object, data_source_tests):
"""
Run a visualization's data_source tests to find out if
it can be applied to the target_object.
"""
# log.debug( 'is_object_applicable( self, trans, %s, %s )', target_object, data_source_tests )
for test in data_source_tests:
test_type = test['type']
result_type = test['result_type']
test_result = test['result']
test_fn = test['fn']
# log.debug( '%s %s: %s, %s, %s, %s', str( target_object ), 'is_object_applicable',
# test_type, result_type, test_result, test_fn )
if test_type == 'isinstance':
# parse test_result based on result_type (curr: only datatype has to do this)
if result_type == 'datatype':
# convert datatypes to their actual classes (for use with isinstance)
datatype_class_name = test_result
test_result = trans.app.datatypes_registry.get_datatype_class_by_name(datatype_class_name)
if not test_result:
# but continue (with other tests) if can't find class by that name
# if self.debug:
# log.warning( 'visualizations_registry cannot find class (%s)' +
# ' for applicability test on: %s, id: %s', datatype_class_name,
# target_object, getattr( target_object, 'id', '' ) )
continue
# NOTE: tests are OR'd, if any test passes - the visualization can be applied
if test_fn(target_object, test_result):
# log.debug( '\t test passed' )
return True
return False
def get_visualization_url(self, trans, target_object, visualization, param_data):
"""
Generates a url for the visualization with `visualization`
for use with the given `target_object` with a query string built
from the configuration data in `param_data`.
"""
# precondition: the target_object should be usable by the visualization (accrd. to data_sources)
# convert params using vis.data_source.to_params
params = self.get_url_params(trans, target_object, param_data)
# we want existing visualizations to work as normal but still be part of the registry (without mod'ing)
# so generate their urls differently
url = None
if visualization.name in self.BUILT_IN_VISUALIZATIONS:
url = url_for(controller='visualization', action=visualization.name, **params)
# TODO: needs to be split off as it's own registry
elif isinstance(visualization, vis_plugins.InteractiveEnvironmentPlugin):
url = url_for('interactive_environment_plugin', visualization_name=visualization.name, **params)
else:
url = url_for(self.NAMED_ROUTE, visualization_name=visualization.name, **params)
# TODO:?? not sure if embedded would fit/used here? or added in client...
return url
def get_url_params(self, trans, target_object, param_data):
"""
Convert the applicable objects and assoc. data into a param dict
for a url query string to add to the url that loads the visualization.
"""
params = {}
for to_param_name, to_param_data in param_data.items():
# TODO??: look into params as well? what is required, etc.
target_attr = to_param_data.get('param_attr', None)
assign = to_param_data.get('assign', None)
# one or the other is needed
# assign takes precedence (goes last, overwrites)?
# NOTE this is only one level
if target_attr and vis_utils.hasattr_recursive(target_object, target_attr):
params[to_param_name] = vis_utils.getattr_recursive(target_object, target_attr)
if assign:
params[to_param_name] = assign
# NOTE!: don't expose raw ids: encode id, _id
# TODO: double encodes if from config
if params:
params = trans.security.encode_dict_ids(params)
return params
|
py
|
1a5622d5ccbd50a54236ba4fa23a89738ce9e42b
|
from __future__ import print_function
#
# cfg file to unpack RAW L1 GT DAQ data
# the options set in "user choices" file
# L1Trigger/GlobalTriggerAnalyzer/python/UserOptions.py
# V M Ghete 2009-04-03
# V M Ghete 2011-02-09 use UserOptions.py
import FWCore.ParameterSet.Config as cms
import sys
process = cms.Process("TestL1GtUnpacker")
print('\n')
from L1Trigger.GlobalTriggerAnalyzer.UserOptions_cff import *
if errorUserOptions == True :
print('\nError returned by UserOptions_cff\n')
sys.exit()
# source according to data type
if dataType == 'StreamFile' :
process.source = cms.Source("NewEventStreamFileReader", fileNames=readFiles)
else :
process.source = cms.Source ('PoolSource',
fileNames=readFiles,
secondaryFileNames=secFiles,
eventsToProcess = selectedEvents
)
# number of events to be processed and source file
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(maxNumberEvents)
)
# load and configure modules via Global Tag
# https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = useGlobalTag
# L1 GT/GMT unpack
process.load("EventFilter.L1GlobalTriggerRawToDigi.l1GtUnpack_cfi")
# input tag for GT readout collection (before CMSSW_5_0_X)
# source = hardware record
#
#if useRelValSample == True :
# daqGtInputTag = 'rawDataCollector'
#else :
# daqGtInputTag = 'rawDataCollector'
daqGtInputTag = 'rawDataCollector'
process.l1GtUnpack.DaqGtInputTag = daqGtInputTag
#process.l1GtUnpack.DaqGtInputTag = 'l1GtTextToRaw'
# Active Boards Mask
# no board masked (default)
#process.l1GtUnpack.ActiveBoardsMask = 0xFFFF
# GTFE only in the record
#process.l1GtUnpack.ActiveBoardsMask = 0x0000
# GTFE + FDL
#process.l1GtUnpack.ActiveBoardsMask = 0x0001
# GTFE + GMT
#process.l1GtUnpack.ActiveBoardsMask = 0x0100
# GTFE + FDL + GMT
#process.l1GtUnpack.ActiveBoardsMask = 0x0101
# BxInEvent to be unpacked
# all available BxInEvent (default)
#process.l1GtUnpack.UnpackBxInEvent = -1
# BxInEvent = 0 (L1A)
#process.l1GtUnpack.UnpackBxInEvent = 1
# 3 BxInEvent (F, 0, 1)
#process.l1GtUnpack.UnpackBxInEvent = 3
# set it to verbose
process.l1GtUnpack.Verbosity = cms.untracked.int32(1)
#
# l1GtTrigReport module
#
process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi")
# boolean flag to select the input record
# if true, it will use L1GlobalTriggerRecord
#process.l1GtTrigReport.UseL1GlobalTriggerRecord = True
# input tag for GT record:
# GT emulator: gtDigis (DAQ record)
# GT unpacker: gtDigis (DAQ record)
# GT lite record: l1GtRecord
process.l1GtTrigReport.L1GtRecordInputTag = "l1GtUnpack"
#process.l1GtTrigReport.PrintVerbosity = 10
# print output: 0 = std::cout; 1 = LogTrace; 2 = LogVerbatim; 3 = LogInfo
#process.l1GtTrigReport.PrintOutput = 0
# path to be run
process.p = cms.Path(process.l1GtUnpack*process.l1GtTrigReport)
# Message Logger
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.debugModules = ['l1GtUnpack', 'l1GtTrigReport']
process.MessageLogger.cerr.enable = False
process.MessageLogger.files.L1GtUnpacker_errors = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR'),
ERROR = cms.untracked.PSet( limit = cms.untracked.int32(-1) ),
L1GlobalTriggerRawToDigi = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.MessageLogger.files.L1GtUnpacker_warnings = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING'),
WARNING = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
L1GlobalTriggerRawToDigi = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.MessageLogger.files.L1GtUnpacker_info = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
INFO = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
WARNING = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
L1GtTrigReport = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.MessageLogger.files.L1GtUnpacker = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG'),
DEBUG = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
INFO = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
WARNING = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
L1GlobalTriggerRawToDigi = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
# summary
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# output
process.outputL1GtUnpack = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('L1GtUnpacker.root'),
# keep only unpacked data in the ROOT file
outputCommands = cms.untracked.vstring('drop *',
'keep *_l1GtUnpack_*_*')
)
process.outpath = cms.EndPath(process.outputL1GtUnpack)
|
py
|
1a5624023b821367c6575bb8d90af48218853895
|
# Train an agent from scratch with PPO2 and save package and learning graphs
# from OpenGL import GLU
import os
import glob
import time
import subprocess
import shutil
import gym
import wandb
import random
import logging
from collections import defaultdict
from gym_smartquad.envs import quad_env
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import customMonitor
import datetime
import imageio
from stable_baselines3.ppo import MlpPolicy
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv, VecNormalize, sync_envs_normalization
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.vec_env import VecFrameStack
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3 import PPO
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.results_plotter import load_results, ts2xy
from stable_baselines3.common.cmd_util import make_vec_env
from stable_baselines3.common import logger
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import json
class smartCurriculumCallback(BaseCallback):
"""
A custom callback that derives from ``BaseCallback``.
:param verbose: (int) Verbosity level 0: no output 1: info 2: debug
"""
def __init__(self, envFunct, refreshRate, betaRate, initCurriculum, endDomain, targetDomain, domainPowers, ADRMethod = 'ep_reward_mean',targetReliability=None, targetReward=None, renders = True, verbose=1):
super(smartCurriculumCallback, self).__init__(verbose)
self.refreshRate = refreshRate
self.evalRate = 100000
self.betaRate = betaRate
self.n_calls = 0
self.oldLoss = None
self.newLoss = None
self.oldStep = 0
self.oldEvalStep = 0
self.meanRew = 0
self.rewardScale = 700 #TO BE FULLY INTEGRATED
self.envFunct = envFunct
self.curriculum = initCurriculum
self.initCurriculum = initCurriculum
self.endDomain = endDomain
self.progress = 0
self.targetDomain = targetDomain
self.domainPowers = domainPowers
self.targetReliability = targetReliability
self.targetReward = targetReward
self.best_min = np.NINF
self.best_mean = np.NINF
self.ADRMethod = ADRMethod
self.n_eval_episodes = 15
self.evaluations_results = []
self.evaluations_mins = []
self.evaluations_timesteps = []
self.gif_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp_gif/")
self.models_tmp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models_tmp/")
self.renders = renders
# Those variables will be accessible in the callback
# The RL model
# self.model = None # type: BaseRLModel
# An alias for self.model.get_env(), the environment used for training
# self.training_env = None # type: Union[gym.Env, VecEnv, None]
# Number of time the callback was called
# self.n_calls = 0 # type: int
# self.num_timesteps = 0 # type: int
# local and global variables
# self.locals = None # type: Dict[str, Any]
# self.globals = None # type: Dict[str, Any]
self.logger_dir = None
#self.logger_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "logger/")
#logger.make_output_format('csv', self.logger_dir, log_suffix = 'progresslog')
def _on_training_start(self) :
"""
This method is called before the first rollout starts.
"""
self.logger_dir = self.logger.get_dir() +'/'+ os.listdir(self.logger.get_dir())[0]
def _on_rollout_start(self) :
"""
A rollout is the collection of environment interaction
using the current policy.
This event is triggered before collecting new samples.
"""
pass
def _on_step(self) :
#Basic curriculum progression, every self.refreshRate timesteps
if self.num_timesteps-self.oldStep >= self.refreshRate :
self.oldStep = self.num_timesteps
print(self.num_timesteps)
#Loss based ADR
if self.ADRMethod == 'loss':
self.lossMethod()
if self.ADRMethod == 'ep_reward_mean':
self.rewardMethod()
#evaluation
if self.num_timesteps - self.oldEvalStep >= self.evalRate :
self.oldEvalStep = self.num_timesteps
evalEnv = self.envFunct(self.targetDomain)
#sync_envs_normalization(self.training_env, evalEnv)
episode_rewards, episode_lengths = evaluate_policy(self.model, evalEnv,
n_eval_episodes=self.n_eval_episodes,
return_episode_rewards=True)
print(episode_rewards)
self.evaluations_results.append(np.mean(episode_rewards))
self.evaluations_mins.append(np.min(episode_rewards))
self.evaluations_timesteps.append(self.num_timesteps)
#remembering the best results :
if np.mean(episode_rewards) == np.max(self.evaluations_results):
self.best_mean = np.mean(episode_rewards)
self.best_min = np.min(episode_rewards)
#wandb.log({"best_mean": self.best_mean, "best_min": self.best_min}, step=self.num_timesteps)
self.model.save(self.models_tmp_dir +"best_network"+".plk")
#TO IMPLEMENT : SAVE THE NETWORK
#External logging
wandb.log({"eval_reward": self.evaluations_results[-1]}, step=self.num_timesteps)
wandb.log({"best_mean": self.best_mean, "best_min": self.best_min}, step=self.num_timesteps)
print('average score in a real environment : '+str(self.evaluations_results[-1]))
print('minimum score in a real environment : '+str(self.evaluations_mins[-1]))
self.model.save(self.models_tmp_dir +"step_"+str(self.num_timesteps)+".plk")
if self.renders :
self.createGif(evalEnv)
else :
evalEnv.close()
#Not used yet
if self.targetReliability!=None and self.targetReward!=None:
goalReached = True
for i in range(self.n_eval_episodes):
if episode_rewards < self.targetReward :
goalReached = False
if goalReached :
return False
return True
def rewardMethod(self):
summary_iterators = EventAccumulator(self.logger_dir).Reload()
tags = summary_iterators.Tags()['scalars']
out = defaultdict(list)
for tag in tags:
#steps = [e.step for e in summary_iterators.Scalars(tag)]
for events in summary_iterators.Scalars(tag):
out[tag].append([e for e in events])
out = np.array(out['rollout/ep_rew_mean'])
#print(out) #May help debugging in case anything happens
try :
self.meanRew = out[-1,2]
except : #if there is only one logged element
try :
self.meanRew = out[2]
except : #if nothing is logged yet
return True
print(self.curriculum)
for i in range(len(self.curriculum)):
self.progress = self.meanRew/self.rewardScale
if self.progress < 0 :
self.progress = 0
elif self.progress > 1 :
self.progress = 1
#For now, the only supported progression goes from the simplest to the most difficult
self.curriculum[i] = self.initCurriculum[i] + (self.endDomain[i]-self.initCurriculum[i])*self.progress**self.domainPowers[i]
#print(self.progress)
self.training_env.env_method('refresh',self.curriculum)
wandb.log({"domain_progress": self.progress}, step=self.num_timesteps)
def lossMethod(self):
summary_iterators = EventAccumulator(self.logger_dir).Reload()
tags = summary_iterators.Tags()['scalars']
out = defaultdict(list)
for tag in tags:
#steps = [e.step for e in summary_iterators.Scalars(tag)]
for events in summary_iterators.Scalars(tag):
out[tag].append([e for e in events])
out = np.array(out['train/loss'])
#print(out) #May help debugging in case anything happens
try :
meanLoss = out[:,2]
except : #if there is only one logged element
try :
meanLoss = out[2]
except : #if nothing is logged yet
return True
try :
meanLoss = np.mean(meanLoss[-5:]) #may be edited
except :
meanLoss = meanLoss[-1]
if self.oldLoss != None :
self.oldLoss = self.newLoss
self.newLoss = meanLoss
lossDiff = self.newLoss-self.oldLoss
#Updating the curriculum
if lossDiff > 0 :
print(self.curriculum)
for i in range(len(self.curriculum)):
progressStep = self.betaRate*lossDiff
#Clipping progress :
if progressStep > 0.05 :
progressStep = 0.05
self.progress += progressStep
#For now, the only supported progression goes from the simplest to the most difficult
if self.progress>1 :
self.progress=1
self.curriculum[i] = self.initCurriculum[i] + (self.endDomain[i]-self.initCurriculum[i])*self.progress**self.domainPowers[i]
#print(self.progress)
self.training_env.env_method('refresh',self.curriculum)
wandb.log({"domain_progress": self.progress, "loss_dif": lossDiff}, step=self.num_timesteps)
print(self.num_timesteps)
else :
self.newLoss = meanLoss
self.oldLoss = self.newLoss
def createGif(self,evalEnv):
gif_name = "PPO_"+str(self.num_timesteps)
save_str = self.gif_dir + gif_name + '.gif'
model = PPO.load(self.models_tmp_dir +"step_"+str(self.num_timesteps)+".plk", env=evalEnv)
images = []
obs = evalEnv.reset()
img = evalEnv.sim.render(
width=400, height=400, camera_name="isometric_view")
for _ in range(600):
action, _ = model.predict(obs)
obs, _, _, _ = evalEnv.step(action)
img = evalEnv.sim.render(
width=400, height=400, camera_name="isometric_view")
images.append(np.flipud(img))
#print("creating gif...")
imageio.mimsave(save_str, [np.array(img)
for i, img in enumerate(images) if i % 2 == 0], fps=29)
print("gif created...")
evalEnv.close()
def _on_rollout_end(self) :
"""
This event is triggered before updating the policy.
"""
pass
def _on_training_end(self) :
"""
This event is triggered before exiting the `learn()` method.
"""
pass
class PPOtraining():
def __init__(self, envFunct, trainingSteps, targetDomain, domainPowers, domainProgressRate, learningRate = 0.0003, batchSize = 256, ADRMethod ='loss', autoParameters = False, startDomain=None, endDomain=None, targetReliability=None, targetReward=None, initModelLoc=None, render=False, verbose = 1, tag="") :
"""Trains a model using PPO (baselines3, based on PyTorch).
Env :
Must be imported at the beginning of the file, and declared in the 'updateEnv' method. Please do check out the evaluation section of the Callback class. Currently supports Gym structure.
WARNING : In order to support smart curriculum learning, the environment must incorporate a 'refresh' method, which updates domains parameters.
Note that this additionnal method is different from a reset method, but can simply update values that will be used in the 'reset' method (especially true for geometrical parameters).
As for noise parameters, they can be directly used after being updated, even in the middle of an episode.
Args:
envFunct : function. See the aforementioned 'Env' section.
trainingSteps : int. Total number of steps for the training.
autoParameters : bool. False by default. Automatically assess the impact of domain variable on the performance of the neural network, and infers custom progress parameters for the ADR.
targetDomain : vector (1D) of domain parameters estimated as representative of the real environment. If possible, it is recommended to characterize such values by performing measurements of the sub-systems (sensors/actuators), or environmental parameters.
These parameters can also be infered from the system requirements. Set to a Null vector to ignore Domain Randomization.
domainPowers : same dimension as targetDomains. Vector of empirical parameters. Default should be np.ones(targetDomain.shape).
1 means that that this parameter will be more or less linearly increased throughout the learning. x<1 means that the parameter will mostly increase in the final phase of the learning. x>1 means that that parameter will mostly increase in the early phase of the learning.
Base function is parameters = (progress)**domainPowers, with progress belonging in [0,1]. Set to a 0.00001 vector to ignore Curriculum Learning.
domainProgressRate : float < 1. Describes of fast is the ADR going to progress. Requires a bit of fine-tuning. Set such as the domain_progress reaches 1 toward the end of the training. A uniform parameter sweep is probably the best best way to go.
KwArgs :
startDomain : vector (1D) of domain parameters to begin the learning with. None by default, meaning that all of these parameters will be 0.
endDomain : vector (1D) of domain parameters to end the learning with. By default, equals to None, and is automatically chosen as being equal to targetDomain.
targetReliability : float in [0,1]. Enables validation to be performed every now and then, and the learning process to be stopped when the model achieves a targetReliability rate of success (achieving targetReward with an environment defined with targetDomain)
targetReward : float.
initModelLoc : path to a stable_baselines model. Enables a previously trained model to be improved with domain randomization
render : bool. Default is 0. Renders Gifs of the target environment every 100000 steps.
verbose : bool. Default is 1. Display essential learning data in the shell.
tag : str. fefault is "". Puts a label on the best saved network
"""
self.step_total = trainingSteps
self.verbose = verbose
self.env = None
self.envFunct = envFunct
self.n_cpu = 8
self.modelLoc = initModelLoc
self.model = None
self.batchSize = batchSize
self.learningRate = learningRate
self.tag = tag
self.createDirectories()
#Callbacks parameters
self.refreshRate = 30000
self.betaRate = domainProgressRate
self.targetDomain = targetDomain
self.domainPowers = domainPowers
if not isinstance(startDomain,np.ndarray) :
self.curriculum = np.zeros(targetDomain.shape)
else :
self.curriculum = startDomain
if not isinstance(endDomain,np.ndarray) :
self.endDomain = self.targetDomain
else :
self.endDomain = endDomain
self.renders = render
self.ADRMethod = ADRMethod
#External logging
self.updateEnv(self.curriculum)
self.updateModel()
self.train()
wandb.join()
def train(self):
start = time.time()
#evalEnv = self.envFunct(self.targetDomain)
#self.model.learn(total_timesteps=self.step_total, eval_env = evalEnv, eval_freq = 20000, n_eval_episodes= 15,log_interval=1, tb_log_name="PPO",callback=smartCurriculumCallback(self.refreshRate, self.betaRate, self.curriculum, self.targetDomain, self.domainPowers, targetReliability=None, targetReward=None, renders = False, verbose=1))
#Using callbacks to perform evaluations instead :
callbackFunction = smartCurriculumCallback(self.envFunct, self.refreshRate, self.betaRate,
self.curriculum, self.endDomain, self.targetDomain,
self.domainPowers, ADRMethod = self.ADRMethod, targetReliability=None, targetReward=None,
renders = self.renders , verbose=1)
self.model.learn(total_timesteps=self.step_total,log_interval=1, tb_log_name="PPO",callback=callbackFunction)
end = time.time()
training_time = end - start
t = time.localtime()
timestamp = time.strftime('%b-%d-%Y_%H%M', t)
src_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models_tmp/best_network.plk")
dst_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models/best_network"+wandb.run.name+self.tag+timestamp+".plk")
shutil.copy(src_dir,dst_dir)
#Performance summary is now handled by the validation log.
def updateEnv(self, initCurriculum):
if self.env != None :
self.env.close()
self.env = self.envFunct(initCurriculum)
self.env = customMonitor.Monitor(self.env, allow_early_resets=True)
self.env = DummyVecEnv( [lambda: self.env for i in range(self.n_cpu)] )
def updateModel(self):
if self.modelLoc==None:
self.model = PPO(MlpPolicy, self.env, tensorboard_log="./logger/",verbose=1, device='cuda',n_steps = 2048, n_epochs=10, batch_size= self.batchSize, learning_rate= self.learningRate)
self.modelLoc = self.models_dir
else :
pass
def createDirectories(self):
self.models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models/")
self.models_tmp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models_tmp/")
self.log_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp")
self.gif_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp_gif/")
self.plt_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "plot")
self.logger_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "logger/")
os.makedirs(self.log_dir, exist_ok=True)
os.makedirs(self.gif_dir, exist_ok=True)
os.makedirs(self.models_dir, exist_ok=True)
os.makedirs(self.models_tmp_dir, exist_ok=True)
os.makedirs(self.plt_dir, exist_ok=True)
os.makedirs(self.logger_dir, exist_ok=True)
if __name__ == '__main__':
for i in range(5):
params_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "paramsADR.json")
with open(params_path) as json_file:
params = json.load(json_file)
tag = params["tag"]
wandb.init(project="Smart_Quad_Friction", sync_tensorboard=True, allow_val_change=True, reinit=True, tags=[tag])
print(str(wandb.run.name))
wandb.config.progress_rate = params["progress_rate"]
wandb.config.domain_powers = None
wandb.config.learningRate = 0.002
wandb.config.batchSize = 1800
training = PPOtraining(quad_env.QuadEnv, 2000000, np.array(params["targetDomain"]), np.array(params["domainPowers"]), wandb.config.progress_rate , learningRate = wandb.config.learningRate, batchSize = wandb.config.batchSize, startDomain= np.array(params["startDomain"]), endDomain = np.array(params["endDomain"]), ADRMethod = 'loss', targetReliability=None, targetReward=None, initModelLoc=None, render = False, verbose = 1, tag = tag)
for i in range(5):
params_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "paramsDR.json")
with open(params_path) as json_file:
params = json.load(json_file)
tag = params["tag"]
wandb.init(project="Smart_Quad_Friction", sync_tensorboard=True, allow_val_change=True, reinit=True, tags=[tag])
wandb.config.progress_rate = params["progress_rate"]
wandb.config.domain_powers = None
wandb.config.learningRate = 0.002
wandb.config.batchSize = 1800
training = PPOtraining(quad_env.QuadEnv, 2000000, np.array(params["targetDomain"]), np.array(params["domainPowers"]), wandb.config.progress_rate , learningRate = wandb.config.learningRate, batchSize = wandb.config.batchSize, startDomain= np.array(params["startDomain"]), endDomain = np.array(params["endDomain"]), ADRMethod = 'loss', targetReliability=None, targetReward=None, initModelLoc=None, render = False, verbose = 1, tag = tag)
for i in range(5):
params_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "paramsNoDR.json")
with open(params_path) as json_file:
params = json.load(json_file)
tag = params["tag"]
wandb.init(project="Smart_Quad_Friction", sync_tensorboard=True, allow_val_change=True, reinit=True, tags=[tag])
wandb.config.progress_rate = params["progress_rate"]
wandb.config.domain_powers = None
wandb.config.learningRate = 0.002
wandb.config.batchSize = 1800
training = PPOtraining(quad_env.QuadEnv, 2000000, np.array(params["targetDomain"]), np.array(params["domainPowers"]), wandb.config.progress_rate , learningRate = wandb.config.learningRate, batchSize = wandb.config.batchSize, startDomain= np.array(params["startDomain"]), endDomain = np.array(params["endDomain"]), ADRMethod = 'loss', targetReliability=None, targetReward=None, initModelLoc=None, render = False, verbose = 1, tag = tag)
|
py
|
1a56260b15b0b89f463c378ed6b857486057765e
|
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about contributing code to gsutil."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
We're open to incorporating gsutil code changes authored by users. Here
are some guidelines:
1. Before we can accept code submissions, we have to jump a couple of legal
hurdles. Please fill out either the individual or corporate Contributor
License Agreement:
- If you are an individual writing original source code and you're
sure you own the intellectual property,
then you'll need to sign an individual CLA
(https://cla.developers.google.com/about/google-individual).
- If you work for a company that wants to allow you to contribute your
work to gsutil, then you'll need to sign a corporate CLA
(https://cla.developers.google.com/about/google-corporate)
Follow either of the two links above to access the appropriate CLA and
instructions for how to sign and return it. Once we receive it, we'll
add you to the official list of contributors and be able to accept
your patches.
2. If you found a bug or have an idea for a feature enhancement, we suggest
you check https://github.com/GoogleCloudPlatform/gsutil/issues to see if it
has already been reported by another user. From there you can also
subscribe to updates to the issue.
3. If a GitHub issue doesn't already exist, create one about your idea before
sending actual code. Often we can discuss the idea and help propose things
that could save you later revision work.
4. We tend to avoid adding command line options that are of use to only
a very small fraction of users, especially if there's some other way
to accommodate such needs. Adding such options complicates the code and
also adds overhead to users having to read through an "alphabet soup"
list of option documentation.
5. While gsutil has a number of features specific to Google Cloud Storage,
it can also be used with other cloud storage providers. We're open to
including changes for making gsutil support features specific to other
providers, as long as those changes don't make gsutil work worse for Google
Cloud Storage. If you do make such changes we recommend including someone
with knowledge of the specific provider as a code reviewer (see below).
6. You can check out the gsutil code from the GitHub repository:
https://github.com/GoogleCloudPlatform/gsutil
To clone a read-only copy of the repository:
git clone git://github.com/GoogleCloudPlatform/gsutil.git
To push your own changes to GitHub, click the Fork button on the
repository page and clone the repository from your own fork.
7. The gsutil git repository uses git submodules to pull in external modules.
After checking out the repository, make sure to also pull the submodules
by entering into the gsutil top-level directory and run:
git submodule update --init --recursive
8. Please make sure to run all tests against your modified code. To
do this, change directories into the gsutil top-level directory and run:
./gsutil test
The above tests take a long time to run because they send many requests to
the production service. The gsutil test command has a -u argument that will
only run unit tests. These run quickly, as they are executed with an
in-memory mock storage service implementation. To run only the unit tests,
run:
./gsutil test -u
If you made changes to boto, please run the boto tests. For these tests you
need to use HMAC credentials (from gsutil config -a), because the current
boto test suite doesn't import the OAuth2 handler. You'll also need to
install some python modules. Change directories into the boto root
directory at third_party/boto and run:
pip install -r requirements.txt
(You probably need to run this command using sudo.)
Make sure each of the individual installations succeeded. If they don't
you may need to run the install command again.
Then ensure your .boto file has HMAC credentials defined (the boto tests
don't load the OAUTH2 plugin), and then change directories into boto's
tests directory and run:
python test.py unit
python test.py -t s3 -t gs -t ssl
9. Please consider contributing test code for your change, especially if the
change impacts any of the core gsutil code (like the gsutil cp command).
10. When it's time to send us code, please use the Rietveld code review tool
rather than simply sending us a code patch. Do this as follows:
- Check out the gsutil code from your fork of the gsutil repository and
apply your changes.
- Download the "upload.py" script from
https://github.com/rietveld-codereview/rietveld
- Run upload.py from your git directory with the changes.
- Click the codereview.appspot.com link it generates, click "Edit Issue",
and add [email protected] and [email protected] as reviewers, and
Cc [email protected].
- Click Publish+Mail Comments.
- Once your changes are accepted, submit a pull request on GitHub and we
will merge your commits.
""")
class CommandOptions(HelpProvider):
"""Additional help about contributing code to gsutil."""
# TODO: gsutil-beta: Add lint .rc file and linting instructions.
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='dev',
help_name_aliases=[
'development', 'developer', 'code', 'mods', 'software'],
help_type='additional_help',
help_one_line_summary='Contributing Code to gsutil',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
py
|
1a562805213fc53388a2243b39305b548daf188e
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
from collections import defaultdict
from datetime import datetime
from operator import attrgetter
from time import time
from typing import List, Optional, Tuple
from urllib.parse import quote
# Using `from elasticsearch import *` would break elasticsearch mocking used in unit test.
import elasticsearch
import pendulum
from elasticsearch_dsl import Search
from airflow.configuration import conf
from airflow.models import TaskInstance
from airflow.utils import timezone
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.json_formatter import JSONFormatter
from airflow.utils.log.logging_mixin import ExternalLoggingMixin, LoggingMixin
# Elasticsearch hosted log type
EsLogMsgType = List[Tuple[str, str]]
class ElasticsearchTaskHandler(FileTaskHandler, ExternalLoggingMixin, LoggingMixin):
"""
ElasticsearchTaskHandler is a python log handler that
reads logs from Elasticsearch. Note logs are not directly
indexed into Elasticsearch. Instead, it flushes logs
into local files. Additional software setup is required
to index the log into Elasticsearch, such as using
Filebeat and Logstash.
To efficiently query and sort Elasticsearch results, we assume each
log message has a field `log_id` consists of ti primary keys:
`log_id = {dag_id}-{task_id}-{execution_date}-{try_number}`
Log messages with specific log_id are sorted based on `offset`,
which is a unique integer indicates log message's order.
Timestamp here are unreliable because multiple log messages
might have the same timestamp.
"""
PAGE = 0
MAX_LINE_PER_PAGE = 1000
LOG_NAME = 'Elasticsearch'
def __init__(
self,
base_log_folder: str,
filename_template: str,
log_id_template: str,
end_of_log_mark: str,
write_stdout: bool,
json_format: bool,
json_fields: str,
host_field: str = "host",
offset_field: str = "offset",
host: str = "localhost:9200",
frontend: str = "localhost:5601",
es_kwargs: Optional[dict] = conf.getsection("elasticsearch_configs"),
):
"""
:param base_log_folder: base folder to store logs locally
:param log_id_template: log id template
:param host: Elasticsearch host name
"""
es_kwargs = es_kwargs or {}
super().__init__(base_log_folder, filename_template)
self.closed = False
self.client = elasticsearch.Elasticsearch([host], **es_kwargs)
self.log_id_template = log_id_template
self.frontend = frontend
self.mark_end_on_close = True
self.end_of_log_mark = end_of_log_mark
self.write_stdout = write_stdout
self.json_format = json_format
self.json_fields = [label.strip() for label in json_fields.split(",")]
self.host_field = host_field
self.offset_field = offset_field
self.handler = None
self.context_set = False
def _render_log_id(self, ti: TaskInstance, try_number: int) -> str:
if self.json_format:
execution_date = self._clean_execution_date(ti.execution_date)
else:
execution_date = ti.execution_date.isoformat()
return self.log_id_template.format(
dag_id=ti.dag_id, task_id=ti.task_id, execution_date=execution_date, try_number=try_number
)
@staticmethod
def _clean_execution_date(execution_date: datetime) -> str:
"""
Clean up an execution date so that it is safe to query in elasticsearch
by removing reserved characters.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters
:param execution_date: execution date of the dag run.
"""
return execution_date.strftime("%Y_%m_%dT%H_%M_%S_%f")
def _group_logs_by_host(self, logs):
grouped_logs = defaultdict(list)
for log in logs:
key = getattr(log, self.host_field, 'default_host')
grouped_logs[key].append(log)
# return items sorted by timestamp.
result = sorted(grouped_logs.items(), key=lambda kv: getattr(kv[1][0], 'message', '_'))
return result
def _read_grouped_logs(self):
return True
def _read(
self, ti: TaskInstance, try_number: int, metadata: Optional[dict] = None
) -> Tuple[EsLogMsgType, dict]:
"""
Endpoint for streaming log.
:param ti: task instance object
:param try_number: try_number of the task instance
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of tuple with host and log documents, metadata.
"""
if not metadata:
metadata = {'offset': 0}
if 'offset' not in metadata:
metadata['offset'] = 0
offset = metadata['offset']
log_id = self._render_log_id(ti, try_number)
logs = self.es_read(log_id, offset, metadata)
logs_by_host = self._group_logs_by_host(logs)
next_offset = offset if not logs else attrgetter(self.offset_field)(logs[-1])
# Ensure a string here. Large offset numbers will get JSON.parsed incorrectly
# on the client. Sending as a string prevents this issue.
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
metadata['offset'] = str(next_offset)
# end_of_log_mark may contain characters like '\n' which is needed to
# have the log uploaded but will not be stored in elasticsearch.
loading_hosts = [
item[0] for item in logs_by_host if item[-1][-1].message != self.end_of_log_mark.strip()
]
metadata['end_of_log'] = False if not logs else len(loading_hosts) == 0
cur_ts = pendulum.now()
# Assume end of log after not receiving new log for 5 min,
# as executor heartbeat is 1 min and there might be some
# delay before Elasticsearch makes the log available.
if 'last_log_timestamp' in metadata:
last_log_ts = timezone.parse(metadata['last_log_timestamp'])
if (
cur_ts.diff(last_log_ts).in_minutes() >= 5
or 'max_offset' in metadata
and int(offset) >= int(metadata['max_offset'])
):
metadata['end_of_log'] = True
if int(offset) != int(next_offset) or 'last_log_timestamp' not in metadata:
metadata['last_log_timestamp'] = str(cur_ts)
# If we hit the end of the log, remove the actual end_of_log message
# to prevent it from showing in the UI.
def concat_logs(lines):
log_range = (len(lines) - 1) if lines[-1].message == self.end_of_log_mark.strip() else len(lines)
return '\n'.join(self._format_msg(lines[i]) for i in range(log_range))
message = [(host, concat_logs(hosted_log)) for host, hosted_log in logs_by_host]
return message, metadata
def _format_msg(self, log_line):
"""Format ES Record to match settings.LOG_FORMAT when used with json_format"""
# Using formatter._style.format makes it future proof i.e.
# if we change the formatter style from '%' to '{' or '$', this will still work
if self.json_format:
try:
return self.formatter._style.format(_ESJsonLogFmt(self.json_fields, **log_line.to_dict()))
except Exception:
pass
# Just a safe-guard to preserve backwards-compatibility
return log_line.message
def es_read(self, log_id: str, offset: str, metadata: dict) -> list:
"""
Returns the logs matching log_id in Elasticsearch and next offset.
Returns '' if no log is found or there was an error.
:param log_id: the log_id of the log to read.
:type log_id: str
:param offset: the offset start to read log from.
:type offset: str
:param metadata: log metadata, used for steaming log download.
:type metadata: dict
"""
# Offset is the unique key for sorting logs given log_id.
search = Search(using=self.client).query('match_phrase', log_id=log_id).sort(self.offset_field)
search = search.filter('range', **{self.offset_field: {'gt': int(offset)}})
max_log_line = search.count()
if 'download_logs' in metadata and metadata['download_logs'] and 'max_offset' not in metadata:
try:
if max_log_line > 0:
metadata['max_offset'] = attrgetter(self.offset_field)(
search[max_log_line - 1].execute()[-1]
)
else:
metadata['max_offset'] = 0
except Exception:
self.log.exception('Could not get current log size with log_id: %s', log_id)
logs = []
if max_log_line != 0:
try:
logs = search[self.MAX_LINE_PER_PAGE * self.PAGE : self.MAX_LINE_PER_PAGE].execute()
except Exception:
self.log.exception('Could not read log with log_id: %s', log_id)
return logs
def emit(self, record):
if self.handler:
record.offset = int(time() * (10 ** 9))
self.handler.emit(record)
def set_context(self, ti: TaskInstance) -> None:
"""
Provide task_instance context to airflow task handler.
:param ti: task instance object
"""
self.mark_end_on_close = not ti.raw
if self.json_format:
self.formatter = JSONFormatter(
fmt=self.formatter._fmt,
json_fields=self.json_fields + [self.offset_field],
extras={
'dag_id': str(ti.dag_id),
'task_id': str(ti.task_id),
'execution_date': self._clean_execution_date(ti.execution_date),
'try_number': str(ti.try_number),
'log_id': self._render_log_id(ti, ti.try_number),
},
)
if self.write_stdout:
if self.context_set:
# We don't want to re-set up the handler if this logger has
# already been initialized
return
self.handler = logging.StreamHandler(stream=sys.__stdout__) # type: ignore
self.handler.setLevel(self.level) # type: ignore
self.handler.setFormatter(self.formatter) # type: ignore
else:
super().set_context(ti)
self.context_set = True
def close(self) -> None:
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
if not self.mark_end_on_close:
self.closed = True
return
# Case which context of the handler was not set.
if self.handler is None:
self.closed = True
return
# Reopen the file stream, because FileHandler.close() would be called
# first in logging.shutdown() and the stream in it would be set to None.
if self.handler.stream is None or self.handler.stream.closed:
self.handler.stream = self.handler._open()
# Mark the end of file using end of log mark,
# so we know where to stop while auto-tailing.
self.handler.stream.write(self.end_of_log_mark)
if self.write_stdout:
self.handler.close()
sys.stdout = sys.__stdout__
super().close()
self.closed = True
@property
def log_name(self) -> str:
"""The log name"""
return self.LOG_NAME
def get_external_log_url(self, task_instance: TaskInstance, try_number: int) -> str:
"""
Creates an address for an external log collecting service.
:param task_instance: task instance object
:type: task_instance: TaskInstance
:param try_number: task instance try_number to read logs from.
:type try_number: Optional[int]
:return: URL to the external log collection service
:rtype: str
"""
log_id = self._render_log_id(task_instance, try_number)
scheme = '' if '://' in self.frontend else 'https://'
return scheme + self.frontend.format(log_id=quote(log_id))
@property
def supports_external_link(self) -> bool:
"""Whether we can support external links"""
return bool(self.frontend)
class _ESJsonLogFmt:
"""Helper class to read ES Logs and re-format it to match settings.LOG_FORMAT"""
# A separate class is needed because 'self.formatter._style.format' uses '.__dict__'
def __init__(self, json_fields: List, **kwargs):
for field in json_fields:
self.__setattr__(field, '')
self.__dict__.update(kwargs)
|
py
|
1a56289098e99698d170631471bfb234884d0452
|
import pathlib
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="sectoolkit",
version="0.2.4",
description="Tools for working with Securities and Exchange Commission (SEC) indices, SGML header files, filing archives and individual filing documents.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/dlouton/sectoolkit",
author="Dave Louton",
author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(exclude=("tests",)),
include_package_data=True,
install_requires=["bs4", "numpy", "pandas", "xmltodict", "tqdm"],
# entry_points={
# "console_scripts": [
# "realpython=reader.__main__:main",
# ]
# },
)
|
py
|
1a562b8065e0e361000930a101c5855283085a9b
|
#!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for crackedcoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, bitcoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
|
py
|
1a562c2a7c05d82f8e1a3c086d17284f51b19657
|
from matplotlib import colors, colorbar
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Add colorbar to existing imshow
def imshow_add_color_bar(fig, ax, img):
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(img, cax=cax, orientation='vertical')
# Adds fake colorbar to any axis. That colorbar will linearly interpolate an existing colormap
def imshow_add_fake_color_bar(fig, ax, cmap, vmin=0, vmax=1):
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
norm = colors.Normalize(vmin=vmin, vmax=vmax)
cb1 = colorbar.ColorbarBase(cax, cmap=cmap, norm=norm, orientation='vertical')
|
py
|
1a562dc09bf52e5ef1c8432a589327e267a6b68b
|
import asyncio
import pytest
import time
from hddcoin.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from hddcoin.protocols.full_node_protocol import RespondBlock
from hddcoin.server.server import HDDcoinServer
from hddcoin.simulator.simulator_protocol import FarmNewBlockProtocol, ReorgProtocol
from hddcoin.types.peer_info import PeerInfo
from hddcoin.util.ints import uint16, uint32, uint64
from hddcoin.wallet.util.transaction_type import TransactionType
from hddcoin.wallet.transaction_record import TransactionRecord
from hddcoin.wallet.wallet_node import WalletNode
from hddcoin.wallet.wallet_state_manager import WalletStateManager
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert, time_out_assert_not_none
from tests.wallet.cc_wallet.test_cc_wallet import tx_in_pool
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestWalletSimulator:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes_five_freeze(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_sim_two_wallets(self):
async for _ in setup_simulators_and_wallets(3, 2, {}):
yield _
@pytest.mark.asyncio
async def test_wallet_coinbase(self, wallet_node):
num_blocks = 10
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
server_1: HDDcoinServer = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 2)
]
)
async def check_tx_are_pool_farm_rewards():
wsm: WalletStateManager = wallet_node.wallet_state_manager
all_txs = await wsm.get_all_transactions(1)
expected_count = (num_blocks + 1) * 2
if len(all_txs) != expected_count:
return False
pool_rewards = 0
farm_rewards = 0
for tx in all_txs:
if tx.type == TransactionType.COINBASE_REWARD:
pool_rewards += 1
elif tx.type == TransactionType.FEE_REWARD:
farm_rewards += 1
if pool_rewards != expected_count / 2:
return False
if farm_rewards != expected_count / 2:
return False
return True
await time_out_assert(10, check_tx_are_pool_farm_rewards, True)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
@pytest.mark.asyncio
async def test_wallet_make_transaction(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
server_1 = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds)
tx = await wallet.generate_signed_transaction(
10,
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
0,
)
await wallet.push_transaction(tx)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds - 10)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
new_funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, (2 * num_blocks))
]
)
await time_out_assert(5, wallet.get_confirmed_balance, new_funds - 10)
await time_out_assert(5, wallet.get_unconfirmed_balance, new_funds - 10)
@pytest.mark.asyncio
async def test_wallet_coinbase_reorg(self, wallet_node):
num_blocks = 5
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
fn_server = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(fn_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await full_node_api.reorg_from_index_to_new_index(ReorgProtocol(uint32(2), uint32(num_blocks + 6), 32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 2)
]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
@pytest.mark.asyncio
async def test_wallet_send_to_three_peers(self, three_sim_two_wallets):
num_blocks = 10
full_nodes, wallets = three_sim_two_wallets
wallet_0, wallet_server_0 = wallets[0]
full_node_api_0 = full_nodes[0]
full_node_api_1 = full_nodes[1]
full_node_api_2 = full_nodes[2]
full_node_0 = full_node_api_0.full_node
full_node_1 = full_node_api_1.full_node
full_node_2 = full_node_api_2.full_node
server_0 = full_node_0.server
server_1 = full_node_1.server
server_2 = full_node_2.server
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
# wallet0 <-> sever0
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
for i in range(0, num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
all_blocks = await full_node_api_0.get_all_full_blocks()
for block in all_blocks:
await full_node_1.respond_block(RespondBlock(block))
await full_node_2.respond_block(RespondBlock(block))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(10, 32 * b"0", 0)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await time_out_assert_not_none(5, full_node_0.mempool_manager.get_spendbundle, tx.spend_bundle.name())
# wallet0 <-> sever1
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_1._port)), wallet_0.on_connect)
await time_out_assert_not_none(5, full_node_1.mempool_manager.get_spendbundle, tx.spend_bundle.name())
# wallet0 <-> sever2
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_2._port)), wallet_0.on_connect)
await time_out_assert_not_none(5, full_node_2.mempool_manager.get_spendbundle, tx.spend_bundle.name())
@pytest.mark.asyncio
async def test_wallet_make_transaction_hop(self, two_wallet_nodes_five_freeze):
num_blocks = 10
full_nodes, wallets = two_wallet_nodes_five_freeze
full_node_api_0 = full_nodes[0]
full_node_0 = full_node_api_0.full_node
server_0 = full_node_0.server
wallet_node_0, wallet_0_server = wallets[0]
wallet_node_1, wallet_1_server = wallets[1]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
await wallet_0_server.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
await wallet_1_server.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
for i in range(0, num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet_0.get_confirmed_balance, funds)
await time_out_assert(5, wallet_0.get_unconfirmed_balance, funds)
assert await wallet_0.get_confirmed_balance() == funds
assert await wallet_0.get_unconfirmed_balance() == funds
tx = await wallet_0.generate_signed_transaction(
10,
await wallet_node_1.wallet_state_manager.main_wallet.get_new_puzzlehash(),
0,
)
await wallet_0.push_transaction(tx)
# Full node height 11, wallet height 9
await time_out_assert(5, wallet_0.get_confirmed_balance, funds)
await time_out_assert(5, wallet_0.get_unconfirmed_balance, funds - 10)
for i in range(0, 4):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
# here it's num_blocks + 1 because our last reward is included in the first block that we just farmed
new_funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
# Full node height 17, wallet height 15
await time_out_assert(5, wallet_0.get_confirmed_balance, new_funds - 10)
await time_out_assert(5, wallet_0.get_unconfirmed_balance, new_funds - 10)
await time_out_assert(5, wallet_1.get_confirmed_balance, 10)
tx = await wallet_1.generate_signed_transaction(5, await wallet_0.get_new_puzzlehash(), 0)
await wallet_1.push_transaction(tx)
for i in range(0, 4):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await wallet_0.get_confirmed_balance()
await wallet_0.get_unconfirmed_balance()
await wallet_1.get_confirmed_balance()
await time_out_assert(5, wallet_0.get_confirmed_balance, new_funds - 5)
await time_out_assert(5, wallet_0.get_unconfirmed_balance, new_funds - 5)
await time_out_assert(5, wallet_1.get_confirmed_balance, 5)
# @pytest.mark.asyncio
# async def test_wallet_finds_full_node(self):
# node_iters = [
# setup_full_node(
# test_constants,
# "blockchain_test.db",
# 11234,
# introducer_port=11236,
# simulator=False,
# ),
# setup_wallet_node(
# 11235,
# test_constants,
# None,
# introducer_port=11236,
# ),
# setup_introducer(11236),
# ]
#
# full_node_api = await node_iters[0].__anext__()
# wallet, wallet_server = await node_iters[1].__anext__()
# introducer, introducer_server = await node_iters[2].__anext__()
#
# async def has_full_node():
# outbound: List[WSHDDcoinConnection] = wallet.server.get_outgoing_connections()
# for connection in outbound:
# if connection.connection_type is NodeType.FULL_NODE:
# return True
# return False
#
# await time_out_assert(
# 2 * 60,
# has_full_node,
# True,
# )
# await _teardown_nodes(node_iters)
@pytest.mark.asyncio
async def test_wallet_make_transaction_with_fee(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_1.full_node.server._port)), None)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds)
assert await wallet.get_confirmed_balance() == funds
assert await wallet.get_unconfirmed_balance() == funds
tx_amount = 3200000000000
tx_fee = 10
tx = await wallet.generate_signed_transaction(
tx_amount,
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
tx_fee,
)
fees = tx.spend_bundle.fees()
assert fees == tx_fee
await wallet.push_transaction(tx)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds - tx_amount - tx_fee)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
new_funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(5, wallet.get_confirmed_balance, new_funds - tx_amount - tx_fee)
await time_out_assert(5, wallet.get_unconfirmed_balance, new_funds - tx_amount - tx_fee)
@pytest.mark.asyncio
async def test_wallet_create_hit_max_send_amount(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_1.full_node.server._port)), None)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
primaries = []
for i in range(0, 600):
primaries.append({"puzzlehash": ph, "amount": 100000000 + i})
tx_split_coins = await wallet.generate_signed_transaction(1, ph, 0, primaries=primaries)
await wallet.push_transaction(tx_split_coins)
await time_out_assert(
15, tx_in_pool, True, full_node_1.full_node.mempool_manager, tx_split_coins.spend_bundle.name()
)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(90, wallet.get_confirmed_balance, funds)
max_sent_amount = await wallet.get_max_send_amount()
# 1) Generate transaction that is under the limit
under_limit_tx = None
try:
under_limit_tx = await wallet.generate_signed_transaction(
max_sent_amount - 1,
ph,
0,
)
except ValueError:
assert ValueError
assert under_limit_tx is not None
# 2) Generate transaction that is equal to limit
at_limit_tx = None
try:
at_limit_tx = await wallet.generate_signed_transaction(
max_sent_amount,
ph,
0,
)
except ValueError:
assert ValueError
assert at_limit_tx is not None
# 3) Generate transaction that is greater than limit
above_limit_tx = None
try:
above_limit_tx = await wallet.generate_signed_transaction(
max_sent_amount + 1,
ph,
0,
)
except ValueError:
pass
assert above_limit_tx is None
@pytest.mark.asyncio
async def test_wallet_prevent_fee_theft(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_1.full_node.server._port)), None)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds)
assert await wallet.get_confirmed_balance() == funds
assert await wallet.get_unconfirmed_balance() == funds
tx_amount = 3200000000000
tx_fee = 300000000000
tx = await wallet.generate_signed_transaction(
tx_amount,
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
tx_fee,
)
# extract coin_solution from generated spend_bundle
for cs in tx.spend_bundle.coin_solutions:
if cs.additions() == []:
stolen_cs = cs
# get a legit signature
stolen_sb = await wallet.sign_transaction([stolen_cs])
now = uint64(int(time.time()))
add_list = list(stolen_sb.additions())
rem_list = list(stolen_sb.removals())
name = stolen_sb.name()
stolen_tx = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=now,
to_puzzle_hash=32 * b"0",
amount=0,
fee_amount=stolen_cs.coin.amount,
confirmed=False,
sent=uint32(0),
spend_bundle=stolen_sb,
additions=add_list,
removals=rem_list,
wallet_id=wallet.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=name,
)
await wallet.push_transaction(stolen_tx)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds - stolen_cs.coin.amount)
for i in range(0, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
# Funds have not decreased because stolen_tx was rejected
outstanding_coinbase_rewards = 2000000000000
await time_out_assert(5, wallet.get_confirmed_balance, funds + outstanding_coinbase_rewards)
await time_out_assert(5, wallet.get_confirmed_balance, funds + outstanding_coinbase_rewards)
@pytest.mark.asyncio
async def test_wallet_tx_reorg(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
fn_server = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet_node: WalletNode = wallet_node
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
ph2 = await wallet_2.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(fn_server._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(fn_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
# Waits a few seconds to receive rewards
all_blocks = await full_node_api.get_all_full_blocks()
# Ensure that we use a coin that we will not reorg out
coin = list(all_blocks[-3].get_included_reward_coins())[0]
await asyncio.sleep(5)
tx = await wallet.generate_signed_transaction(1000, ph2, coins={coin})
await wallet.push_transaction(tx)
await full_node_api.full_node.respond_transaction(tx.spend_bundle, tx.name)
await time_out_assert(5, wallet.get_confirmed_balance, funds)
for i in range(0, 2):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(5, wallet_2.get_confirmed_balance, 1000)
await time_out_assert(5, wallet_node.wallet_state_manager.blockchain.get_peak_height, 7)
peak_height = full_node_api.full_node.blockchain.get_peak().height
print(peak_height)
# Perform a reorg, which will revert the transaction in the full node and wallet, and cause wallet to resubmit
await full_node_api.reorg_from_index_to_new_index(
ReorgProtocol(uint32(peak_height - 3), uint32(peak_height + 3), 32 * b"0")
)
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, peak_height - 2)
]
)
await time_out_assert(7, full_node_api.full_node.blockchain.get_peak_height, peak_height + 3)
await time_out_assert(7, wallet_node.wallet_state_manager.blockchain.get_peak_height, peak_height + 3)
# Farm a few blocks so we can confirm the resubmitted transaction
for i in range(0, num_blocks):
await asyncio.sleep(1)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
# By this point, the transaction should be confirmed
print(await wallet.get_confirmed_balance())
await time_out_assert(15, wallet.get_confirmed_balance, funds - 1000)
unconfirmed = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(int(wallet.id()))
assert len(unconfirmed) == 0
tx_record = await wallet_node.wallet_state_manager.tx_store.get_transaction_record(tx.name)
removed = tx_record.removals[0]
added = tx_record.additions[0]
added_1 = tx_record.additions[1]
wallet_coin_record_rem = await wallet_node.wallet_state_manager.coin_store.get_coin_record(removed.name())
assert wallet_coin_record_rem.spent
coin_record_full_node = await full_node_api.full_node.coin_store.get_coin_record(removed.name())
assert coin_record_full_node.spent
add_1_coin_record_full_node = await full_node_api.full_node.coin_store.get_coin_record(added.name())
assert add_1_coin_record_full_node is not None
assert add_1_coin_record_full_node.confirmed_block_index > 0
add_2_coin_record_full_node = await full_node_api.full_node.coin_store.get_coin_record(added_1.name())
assert add_2_coin_record_full_node is not None
assert add_2_coin_record_full_node.confirmed_block_index > 0
|
py
|
1a562df387150f6f699b9af2b57bd1f131aaf1ef
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
import tensorflow_datasets as tfds
from beta.examples.tensorflow.common.logger import logger
from beta.examples.tensorflow.common.utils import set_hard_limit_num_open_files
class BaseDatasetBuilder(ABC):
"""Abstract dataset loader and input processing."""
def __init__(self, config, is_train, num_devices):
self._config = config
self._is_train = is_train
self._num_devices = num_devices
self._global_batch_size = config.batch_size
# Dataset params
self._dataset_dir = config.dataset_dir
self._dataset_name = config.get('dataset', None)
self._dataset_type = config.get('dataset_type', 'tfds')
self._as_supervised = False
# Dataset loader
self._dataset_loader = None
# TFDS params
self._skip_decoding = False
# Dict with TFRecordDatasets
self._tfrecord_datasets = {}
self._split = 'train' if self._is_train else 'validation'
@property
def is_train(self):
"""Returns a `bool` flag which specifies whether it is a training or evaluation dataset."""
return self._is_train
@property
def batch_size(self):
"""Returns per replica batch size."""
return self._global_batch_size // self._num_devices
@property
def global_batch_size(self):
"""Returns global batch size."""
return self.batch_size * self._num_devices
@property
def steps_per_epoch(self):
"""Returns steps per epoch"""
return self.num_examples // self.global_batch_size
@property
@abstractmethod
def num_examples(self):
"""Returns number of examples in the current dataset."""
@property
@abstractmethod
def num_classes(self):
"""Returns number of classes in the current dataset."""
@abstractmethod
def _pipeline(self, dataset):
"""The pipeline which decodes and preprocesses the input data for model."""
def build(self):
dataset_builders = {
'tfds': self._load_tfds,
'tfrecords': self._load_tfrecords,
}
builder = dataset_builders.get(self._dataset_type, None)
if builder is None:
raise ValueError('Unknown dataset type {}'.format(self._dataset_type))
dataset = builder()
dataset = self._pipeline(dataset)
return dataset
def _load_tfds(self):
logger.info('Using TFDS to load data.')
set_hard_limit_num_open_files()
self._dataset_loader = tfds.builder(self._dataset_name,
data_dir=self._dataset_dir)
self._dataset_loader.download_and_prepare()
decoders = {'image': tfds.decode.SkipDecoding()} \
if self._skip_decoding else None
read_config = tfds.ReadConfig(
interleave_cycle_length=64,
interleave_block_length=1)
dataset = self._dataset_loader.as_dataset(
split=self._split,
as_supervised=self._as_supervised,
shuffle_files=True,
decoders=decoders,
read_config=read_config)
return dataset
def _load_tfrecords(self):
logger.info('Using TFRecords to load data')
dataset_key = self._dataset_name.replace('/', '')
if dataset_key in self._tfrecord_datasets:
self._dataset_loader = self._tfrecord_datasets[dataset_key](
config=self._config, is_train=self._is_train
)
else:
raise ValueError('Unknown dataset name: {}'.format(self._dataset_name))
dataset = self._dataset_loader.as_dataset()
return dataset
|
py
|
1a562e15a05591f41e85736bd516a335d99eb391
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test the analysis code for the model chat task.
"""
import glob
import os
import pytest
from pytest_regressions.file_regression import FileRegressionFixture
import parlai.utils.testing as testing_utils
try:
from parlai.crowdsourcing.tasks.model_chat.analysis.compile_results import (
ModelChatResultsCompiler,
)
from parlai.crowdsourcing.utils.tests import check_stdout
class TestCompileResults:
"""
Test the analysis code for the model chat task.
"""
@pytest.fixture(scope="module")
def setup_teardown(self):
"""
Call code to set up and tear down tests.
Run this only once because we'll be running all analysis code before
checking any results.
"""
outputs = {}
# Paths
analysis_samples_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'analysis_samples'
)
analysis_outputs_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'test_model_chat_analysis'
)
outputs['expected_stdout_path'] = os.path.join(
analysis_outputs_folder, 'test_stdout.txt'
)
prefixes = ['results', 'worker_results']
with testing_utils.tempdir() as tmpdir:
# Run analysis
with testing_utils.capture_output() as output:
arg_string = f"""\
--results-folders {analysis_samples_folder}
--output-folder {tmpdir}
"""
parser_ = ModelChatResultsCompiler.setup_args()
args_ = parser_.parse_args(arg_string.split())
ModelChatResultsCompiler(vars(args_)).compile_and_save_results()
stdout = output.getvalue()
# Define output structure
filtered_stdout = '\n'.join(
[line for line in stdout.split('\n') if not line.endswith('.csv')]
)
# Don't track lines that record where a file was saved to, because filenames
# are timestamped
outputs['stdout'] = filtered_stdout
for prefix in prefixes:
results_path = list(glob.glob(os.path.join(tmpdir, f'{prefix}_*')))[
0
]
with open(results_path) as f:
outputs[prefix] = f.read()
yield outputs
# All code after this will be run upon teardown
def test_stdout(self, setup_teardown):
"""
Check the output against what it should be.
"""
outputs = setup_teardown
check_stdout(
actual_stdout=outputs['stdout'],
expected_stdout_path=outputs['expected_stdout_path'],
)
def test_results_file(
self, setup_teardown, file_regression: FileRegressionFixture
):
"""
Check the results file against what it should be.
We don't use DataFrameRegression fixture because the results might include
non-numeric data.
"""
prefix = 'results'
outputs = setup_teardown
file_regression.check(outputs[prefix], basename=prefix)
def test_worker_results_file(
self, setup_teardown, file_regression: FileRegressionFixture
):
"""
Check the worker_results file against what it should be.
We don't use DataFrameRegression fixture because the results might include
non-numeric data.
"""
prefix = 'worker_results'
outputs = setup_teardown
file_regression.check(outputs[prefix], basename=prefix)
except ImportError:
pass
|
py
|
1a5630ae99fec59a127cd7c9013aebf59ae7b0af
|
# As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import errno
import unittest
import warnings
import sys
import signal
import subprocess
import time
from test import test_support
try:
import mmap
except:
mmap = None
import uuid
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
test_support.gc_collect()
if os.path.exists(test_support.TESTFN):
os.unlink(test_support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(test_support.TESTFN, os.W_OK))
@unittest.skipIf(test_support.is_jython and os._name == "nt",
"Does not properly close files under Windows")
def test_closerange(self):
first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, "a")
@test_support.cpython_only
def test_rename(self):
path = unicode(test_support.TESTFN)
if not test_support.is_jython:
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
if not test_support.is_jython:
new = sys.getrefcount(path)
self.assertEqual(old, new)
class TemporaryFileTests(unittest.TestCase):
def setUp(self):
self.files = []
os.mkdir(test_support.TESTFN)
def tearDown(self):
for name in self.files:
os.unlink(name)
os.rmdir(test_support.TESTFN)
def check_tempfile(self, name):
# make sure it doesn't already exist:
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
# make sure we can create the file
open(name, "w")
self.files.append(name)
def test_tempnam(self):
if not hasattr(os, "tempnam"):
return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
self.check_tempfile(os.tempnam())
name = os.tempnam(test_support.TESTFN)
self.check_tempfile(name)
name = os.tempnam(test_support.TESTFN, "pfx")
self.assertTrue(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
def test_tmpfile(self):
if not hasattr(os, "tmpfile"):
return
# As with test_tmpnam() below, the Windows implementation of tmpfile()
# attempts to create a file in the root directory of the current drive.
# On Vista and Server 2008, this test will always fail for normal users
# as writing to the root directory requires elevated privileges. With
# XP and below, the semantics of tmpfile() are the same, but the user
# running the test is more likely to have administrative privileges on
# their account already. If that's the case, then os.tmpfile() should
# work. In order to make this test as useful as possible, rather than
# trying to detect Windows versions or whether or not the user has the
# right permissions, just try and create a file in the root directory
# and see if it raises a 'Permission denied' OSError. If it does, then
# test that a subsequent call to os.tmpfile() raises the same error. If
# it doesn't, assume we're on XP or below and the user running the test
# has administrative privileges, and proceed with the test as normal.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
if sys.platform == 'win32':
name = '\\python_test_os_test_tmpfile.txt'
if os.path.exists(name):
os.remove(name)
try:
fp = open(name, 'w')
except IOError, first:
# open() failed, assert tmpfile() fails in the same way.
# Although open() raises an IOError and os.tmpfile() raises an
# OSError(), 'args' will be (13, 'Permission denied') in both
# cases.
try:
fp = os.tmpfile()
except OSError, second:
self.assertEqual(first.args, second.args)
else:
self.fail("expected os.tmpfile() to raise OSError")
return
else:
# open() worked, therefore, tmpfile() should work. Close our
# dummy file and proceed with the test as normal.
fp.close()
os.remove(name)
fp = os.tmpfile()
fp.write("foobar")
fp.seek(0,0)
s = fp.read()
fp.close()
self.assertTrue(s == "foobar")
def test_tmpnam(self):
if not hasattr(os, "tmpnam"):
return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tmpnam", DeprecationWarning)
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
self.fname = os.path.join(test_support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write("ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
def test_stat_attributes(self):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(self.fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
try:
result[200]
self.fail("No exception thrown")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.st_rdev = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructr with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructr with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(test_support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(test_support.TESTFN)
self.assertEqual(st2.st_mtime, int(st.st_mtime-delta))
# Restrict test to Win32, since there is no guarantee other
# systems support centiseconds
if sys.platform == 'win32':
def get_file_system(path):
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_string_buffer("", 100)
if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
if get_file_system(test_support.TESTFN) == "NTFS":
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_large_time(self):
t1 = 5000000000 # some day in 2128
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError, e:
if e.errno == 2: # file does not exist; cannot run test
return
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
def setUp(self):
self.__save = dict(os.environ)
os.environ.clear()
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
# Bug 1110478
def test_update2(self):
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
with os.popen("/bin/sh -c 'echo $HELLO'") as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(test_support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(test_support.TESTFN, "TEST2")
tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = file(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if hasattr(os, "symlink"):
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if hasattr(os, "symlink"):
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(test_support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(test_support.TESTFN)
class MakedirTests (unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
def test_makedir(self):
base = test_support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def tearDown(self):
path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != test_support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests (unittest.TestCase):
def test_devnull(self):
f = file(os.devnull, 'w')
f.write('hello')
f.close()
f = file(os.devnull, 'r')
self.assertEqual(f.read(), '')
f.close()
class URandomTests (unittest.TestCase):
def test_urandom(self):
try:
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
# see http://bugs.python.org/issue3708
self.assertRaises(TypeError, os.urandom, 0.9)
self.assertRaises(TypeError, os.urandom, 1.1)
self.assertRaises(TypeError, os.urandom, 2.0)
except NotImplementedError:
pass
@unittest.skipIf(test_support.is_jython,
"Jython does not support os.execvpe.")
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, test_support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_mkdir(self):
f = open(test_support.TESTFN, "w")
try:
self.assertRaises(WindowsError, os.mkdir, test_support.TESTFN)
finally:
f.close()
os.unlink(test_support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None)
def test_chmod(self):
self.assertRaises(WindowsError, os.chmod, test_support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
fd = test_support.make_bad_fd()
f(fd, *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
except ValueError:
self.assertTrue(test_support.is_jython)
else:
self.fail("%r didn't raise a OSError with a bad file descriptor"
% f)
def test_isatty(self):
if hasattr(os, "isatty"):
self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
def test_closerange(self):
if hasattr(os, "closerange"):
fd = int(test_support.make_bad_fd()) # need to take an int for Jython, given this test
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
def test_dup2(self):
if hasattr(os, "dup2"):
self.check(os.dup2, 20)
def test_fchmod(self):
if hasattr(os, "fchmod"):
self.check(os.fchmod, 0)
def test_fchown(self):
if hasattr(os, "fchown"):
self.check(os.fchown, -1, -1)
def test_fpathconf(self):
if hasattr(os, "fpathconf"):
self.check(os.fpathconf, "PC_NAME_MAX")
def test_ftruncate(self):
if hasattr(os, "ftruncate"):
self.check(os.ftruncate, 0)
def test_lseek(self):
if hasattr(os, "lseek"):
self.check(os.lseek, 0, 0)
def test_read(self):
if hasattr(os, "read"):
self.check(os.read, 1)
def test_tcsetpgrpt(self):
if hasattr(os, "tcsetpgrp"):
self.check(os.tcsetpgrp, 0)
def test_write(self):
if hasattr(os, "write"):
self.check(os.write, " ")
if sys.platform != 'win32':
class Win32ErrorTests(unittest.TestCase):
pass
class PosixUidGidTests(unittest.TestCase):
if hasattr(os, 'setuid'):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
if hasattr(os, 'setgid'):
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
if hasattr(os, 'seteuid'):
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
if hasattr(os, 'setegid'):
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
if hasattr(os, 'setreuid'):
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
if hasattr(os, 'setregid'):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
else:
class PosixUidGidTests(unittest.TestCase):
pass
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value)
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = '0'
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 20
while count < max and proc.poll() is None:
if m[0] == '1':
break
time.sleep(0.5)
count += 1
else:
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting CTRL+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle CTRL+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
@unittest.skipIf(mmap == None, "This test depends on mmap")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
def test_main():
test_support.run_unittest(
FileTests,
TemporaryFileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
Win32KillTests
)
if __name__ == "__main__":
test_main()
|
py
|
1a5631660e25d8c0726aff46ae428d83704d13c3
|
# coding:utf-8
'''
温度补偿自动化测试
'''
import os
import datetime
import re
import json
from shutil import copyfile
import time
import modbus_tk
import threading
import atexit
from asgiref.sync import async_to_sync
from .api.fsv import FSVCtrl
from .baseboard.handle_board import BT2KHandler
from .excel.common_excel import BoardExcel
from commoninterface.master import THDevice
from .ftp.ftp_client import MyFTP
from .api.file_process import FlatProcess, GainProcess
from commoninterface.utils import PropagatingThread
class TcompTEST(object):
def __init__(self,chl_name,chl_layer,log):
self.channel_name = chl_name
self.channel_layer = chl_layer
self.logger = log
self.fsv = FSVCtrl() # 频谱仪
self.bd = None
self.bdexl = BoardExcel() # excel模板
self.th_dev = None
self.process_flat = FlatProcess()
self.process_gain = GainProcess()
self.adjust_evt = threading.Event()
self.adjust_evt.clear()
self.wait_evt = threading.Event()
self.wait_evt.clear()
self.wait_thread = PropagatingThread(target=self.check_cpu_temp)
self.wait_thread.setDaemon(True)
self.wait_thread.start()
self.adjust_thread = None
atexit.register(self.clean)
def rpt_message(self, msg):
try:
if self.channel_layer and self.channel_name:
print('rpt_msg')
async_to_sync(self.channel_layer.send)(
self.channel_name,
{
"type": "send.message",
"message": msg
}
)
except Exception as e:
print('rpt_msg error:{}'.format(e))
def clean(self):
self.fsv.close_inst()
self.bdexl.close_file()
def init_all(self, fsvconf, bdconf, thconf):
try:
fsvip = fsvconf['IP']
exlpath = fsvconf['DIR']
self.bd = BT2KHandler(**bdconf)
bip = bdconf['IP'] # 设备IP
if thconf:
self.th_dev = THDevice() # 高低温箱初始化
ret = self.bd.read_bb_sn()
if ret is None:
raise RuntimeError('no serial number or productno')
bbver, sn, productno = ret # 返回BB版本,序列号,物料号
excel_path = self.make_dirs(exlpath, sn, bbver, productno) # 复制excel模板
fsvoffset = self.read_offset(excel_path)
# for test
self.fsv.init_inst(fsvip)
self.fsv.set_offset(fsvoffset) # 设置衰减值
self.fsv.reset_fsv()
self.fsv.close_inst()
params_lst = [productno, excel_path, bip, fsvip]
self.gd_test(*params_lst, **thconf)
return True,excel_path
except Exception as e:
self.logger.error('error.{}.'.format(e))
self.rpt_message('ERROR.{}.'.format(e))
return False
finally:
self.bdexl.close_file()
self.fsv.close_inst()
def make_dirs(self, exlpath, sn, bbver, pno):
'''
根据excel测试模板复制一份excel
:param exlpath:
:return:
'''
try:
today = datetime.date.today().strftime('%y-%m-%d') # 返回字符串
dirname = os.path.dirname(exlpath)
new_path = os.path.join(dirname, today)
if sn:
new_path = os.path.join(new_path, sn)
if not os.path.exists(new_path):
os.makedirs(new_path)
# newexl_name = str(sn) + '.xlsx'
newexl_name = '1.xlsx'
end_path = os.path.join(new_path, newexl_name)
if os.path.exists(end_path):
return end_path
else:
copyfile(exlpath, end_path)
if self.bdexl.open_excel(end_path):
# 写入bb ver,serialnumber
self.bdexl.write_bbver_sn(bbver, sn)
self.bdexl.write_productno(pno)
else:
return None
return end_path
except Exception as e:
self.logger.error(e)
finally:
self.bdexl.close_file()
def gd_test(self, *args, **kwargs):
try:
thconf = kwargs
# if not thconf:
# raise ModuleNotFoundError('高低温箱没有配置项')
port = thconf.get('PORT', None)
# for test
# self.do_test(*args)
if self.th_dev.connect_th(PORT='COM{}'.format(port)):
self.logger.info('高低温箱connected**')
self.th_dev.set_fixed_mode()
self.th_dev.start_dev()
self.do_test(*args)
self.logger.debug('高低温箱20度运行')
# self.th_dev.stop_dev() # 停止运行
self.th_dev.set_temp_sv(int(20 * 10)) # 设置成20度
except modbus_tk.modbus.ModbusError as e:
self.logger.exception('{}'.format(e))
raise StopIteration('th_dev')
def do_test(self, *args):
productno, excel_path, bip, fsvip = args
if excel_path is None:
raise RuntimeError('excel does not exist!')
if not self.bd.do_set_bd():
raise RuntimeError('强建小区失败')
self.do_test_on_cellid(productno, excel_path, bip, fsvip)
self.bd.switch_reboot() # 开启检测不到采集就重启的开关
def set_bd_rf(self, freqpoint_dict):
key_bands = freqpoint_dict.keys()
for cellband in key_bands:
if 'CELL0' in cellband.upper():
cellid = '0'
self.bd.set_rf('1', 0)
else:
cellid = '1'
self.bd.set_rf('0', 0)
freq_points = freqpoint_dict[str(cellband)]
self.conf_board_on_some_freq(cellid, freq_points[0])
def do_test_on_cellid(self, productno, excel_path, bip, fsvip):
freqpoint_dict, freq_dict = self.read_boardtype(excel_path) # 返回各band的频点,频率
self.logger.debug(freqpoint_dict)
self.logger.debug(freq_dict)
power_range, test_temp, centertemp = self.read_excel_txatt_norm(excel_path) # 功率的指标[下限,标准值,上限]
self.logger.debug(power_range)
test_temp = [float(item) for item in test_temp]
centertemp = float(centertemp)
if not self.bd.do_compensation('0'):
return
# 初始化温补,频补表
self.init_flat_and_gain_comp(productno, freqpoint_dict, test_temp, centertemp, bip, excel_path)
# 打开该打开的射频开关并将档位设置成0档
self.set_bd_rf(freqpoint_dict)
self.set_cpu_temp(centertemp, 0,0)
if self.process_flat.read_and_set(freqpoint_dict, centertemp):
self.update_bb_flat_comp(productno, bip, excel_path)
if self.process_gain.read_and_set(freqpoint_dict):
self.update_bb_gain_comp(productno, bip, excel_path)
# for test
flg = self.repeat_flat_comp(centertemp, fsvip, freqpoint_dict, freq_dict, power_range, productno, bip,
excel_path)
if not flg:
self.rpt_message('基准温度补偿失败')
raise RuntimeError('基准温度补偿失败')
self.repeat_gain_comp(test_temp, centertemp, fsvip, power_range, freqpoint_dict,
freq_dict, productno, bip, excel_path)
def repeat_gain_comp(self, test_temp, centertemp, fsvip, power_range, freqpoint_dict, freq_dict,
productno, bip, excel_path):
'''
温度补偿
:return:
'''
key_bands = freqpoint_dict.keys()
target = power_range[1]
length = len(test_temp)
centeridx = test_temp.index(centertemp)
# 测试不同温度下补偿值
self.logger.debug('开始温度补偿测试')
self.rpt_message('开始温度补偿测试')
# 温度[20,10,0,-10,-20,40,50,60,70]
if centeridx >= 1:
newrange = list(range(centeridx - 1, -1, -1)) + list(range(centeridx + 1, length)) # 序号[]
else:
newrange = list(range(centeridx + 1, length))
self.logger.debug(newrange)
for index, idx in enumerate(newrange): # 按从基准温度降温到最低温度再升温度
temp = test_temp[idx] # 取温度
self.logger.debug('待测温度{}'.format(temp))
self.rpt_message('待测温度{}'.format(temp))
tempidx = self.process_gain.read_tempidx(temp)
# 取下一温度的tempidx
nexttemp = None
nexttempidx = None
if temp < centertemp:
nexttemp = int(temp) - 10 # 减10度,20,10,0,-10,-20,...
self.logger.debug('下一复制温度{}'.format(nexttemp))
nexttempidx = self.process_gain.read_tempidx(nexttemp)
else:
nexttemp = int(temp) + 10 # 加10度,40,50,60,70
nexttempidx = self.process_gain.read_tempidx(nexttemp)
self.logger.debug('下一复制温度{}'.format(nexttemp))
if temp > 0:
self.set_cpu_temp(temp - 1, 1, 1) # 低于目标温度1度即可,因为运行着温度会上升
else:
self.set_cpu_temp(temp + 1, 1, -1) # 低温时,最好高于目标温度1度,因为温度会下降
# fg = self.set_temp_comp(fsvip, target, freqpoint_dict, freq_dict, tempidx, nexttempidx)
# if not fg:
# raise RuntimeError('温度补偿失败')
# self.update_bb_gain_comp(productno, bip, excel_path)
self.logger.debug('复测{}度'.format(temp))
power_dict = dict() # {'B41':[power,power,power],'E':[power,power,power]}
d1 = dict()
try:
self.conf_device(fsvip)
for cellband in key_bands:
if 'CELL0' in cellband.upper():
cellid = '0'
self.bd.set_rf('1', 0)
else:
cellid = '1'
self.bd.set_rf('0', 0)
bandstr = cellband.split('_')[-1]
band = re.sub('\D', '', bandstr) # band的数字,1/3/41/38/39
freq_points = freqpoint_dict[str(cellband)]
freqs = freq_dict[str(cellband)]
power_dict.setdefault(cellband, [('', '')] * len(freq_points))
d1.setdefault(cellband, [''] * len(freq_points))
for ii, freq_point in enumerate(freq_points):
if not freq_point:
continue
freq = freqs[ii]
if not self.conf_board_on_some_freq(cellid, freq_point): # 设置基带板一类参数,并返回PCI
self.logger.error('设置一类参数异常')
continue
i = 0
while True:
i = i + 1
if i > 10:
self.logger.error('{}-{}温补失败'.format(temp, freq_point))
self.rpt_message('{}-{}温补失败'.format(temp, freq_point))
self.fsv.close_inst()
os.system('pause')
break
result = self.power_test_on_some_freq(cellid, fsvip, freq, power_range)
if result is None:
self.conf_board_on_some_freq(cellid, freq_point)
continue
if result[0]:
power_dict[cellband][ii] = result[1:]
break
else:
# if i > 7:
# self.logger.error('{}-{}温补失败'.format(temp, freq_point))
# self.rpt_message('{}-{}温补失败'.format(temp, freq_point))
# self.fsv.close_inst()
# os.system('pause')
# break
# for test
currenttemp = self.bd.repeat_get_temp() # 获取基带板温度
self.logger.debug('复补当前设备温度{}'.format(currenttemp))
self.rpt_message('复补当前设备温度{}'.format(currenttemp))
if abs(currenttemp - temp) >= 2:
if temp > 0:
self.set_cpu_temp(temp - 1, 1, 1)
else:
self.set_cpu_temp(temp + 1, 1, -1)
result = self.power_test_on_some_freq(cellid, fsvip, freq, power_range)
if result is None:
self.conf_board_on_some_freq(cellid, freq_point)
continue
power = result[1] # 获取功率
self.logger.debug('fsv read power={}'.format(power))
self.rpt_message('fsv read power={}'.format(power))
value = float(power) - float(target)
# if i > 1:
# value = value * 0.6
if abs(value)<=0.4:
value=0.15 if value>0 else -0.15
self.logger.debug('power-target={}'.format(value))
self.rpt_message('power-target={}'.format(value))
self.process_gain.set_bandinfo(tempidx, nexttempidx, band, freq_point,
float('%6.2f' % value))
self.update_bb_gain_comp(productno, bip, excel_path)
d1[cellband][ii] = self.process_gain.read_bandinfo(tempidx, band, freq_point)
except Exception as e:
self.logger.error(e)
self.rpt_message('ERROR:{}'.format(e))
finally:
self.fsv.close_inst()
try:
eid = (list(range(-20, 80, 10))).index(temp)
if self.bdexl.open_excel(excel_path):
self.bdexl.write_cali(eid, **d1)
self.bdexl.write_power(eid, **power_dict)
except Exception as e:
self.logger.error(e)
finally:
self.bdexl.close_file()
# 增加-30,-40度的补偿值,复制-20度的补偿值
self.process_gain.copy_30and40()
self.process_gain.copy_70()
self.update_bb_gain_comp(productno, bip, excel_path)
def update_bb_gain_comp(self, productno, bip, excel_path):
'''
将本地温补表更新到BB
:return:
'''
self.logger.debug('update_bb_gain_comp')
i = 0
while 1:
if i > 3:
raise RuntimeError('补偿文件异常')
try:
if self.bd.remove_gain_comp_json(): # 删除原gaincomp.json文件
if self.write_gain_comp_json(productno, bip, excel_path): # 写文件并上传
self.bd.refresh_comp() # 刷新
break
else:
self.reset_bd()
time.sleep(3)
i = i + 1
except Exception as e:
self.reset_bd()
i = i + 1
def update_bb_flat_comp(self, productno, bip, excel_path):
self.logger.debug('update_bb_flat_comp')
i = 0
while 1:
if i > 3:
raise RuntimeError('补偿文件异常')
try:
if self.bd.remove_flat_comp_json(): # 删除原gaincomp.json文件
if self.write_flat_comp_json(productno, bip, excel_path): # 写文件并上传
self.bd.refresh_comp() # 刷新
break
else:
self.reset_bd()
time.sleep(3)
i = i + 1
except Exception as e:
self.reset_bd()
i = i + 1
def init_flat_and_gain_comp(self, productno, freqpoint_dict, test_temp, centertemp, bip, excel_path):
'''
初始化温补,频补表到内存
:return:
'''
ret = self.bd.read_flat_and_gain_json(productno)
if ret is None:
self.process_flat.init_flat_comp(freqpoint_dict, test_temp, centertemp)
self.process_gain.init_gain_comp(freqpoint_dict, test_temp)
self.update_bb_flat_comp(productno, bip, excel_path)
self.update_bb_gain_comp(productno, bip, excel_path)
else:
fj, gj = ret
if fj is None:
self.process_flat.init_flat_comp(freqpoint_dict, test_temp, centertemp)
self.update_bb_flat_comp(productno, bip, excel_path)
else:
self.process_flat.init_comp_from_file(fj)
if gj is None:
self.process_gain.init_gain_comp(freqpoint_dict, test_temp)
self.update_bb_gain_comp(productno, bip, excel_path)
else:
self.process_gain.init_comp_from_file(gj)
def repeat_flat_comp(self, centertemp, fsvip, freqpoint_dict, freq_dict, power_range, productno, bip, excel_path):
'''
多次平坦度补偿,直到达标
:return:
'''
target = float(power_range[1])
lower = float(power_range[0]) # 下限
upper = float(power_range[2]) # 上限
freq_keys = freqpoint_dict.keys()
freq_values = freqpoint_dict.values()
tempidx = list(range(-20, 80, 10)).index(centertemp)
# self.set_flat_comp(fsvip, target, freqpoint_dict, freq_dict, productno, bip, excel_path)
# 基准温度下温补默认为0
temp_cali_dict = dict(zip(freq_keys, [[0] * len(list(freq_values)[0])] * len(freq_keys)))
power_dict = dict() # {'B41':[power,power,power],'E':[power,power,power]}
try:
self.conf_device(fsvip)
for cellband in freq_keys:
self.logger.debug('cellband={}'.format(cellband))
self.rpt_message('cellband={}'.format(cellband))
if 'CELL0' in cellband.upper():
cellid = '0'
self.bd.set_rf('1', 0)
else:
cellid = '1'
self.bd.set_rf('0', 0)
bandstr = cellband.split('_')[-1]
band = re.sub('\D', '', bandstr) # band的数字,1/3/41/38/39
freq_points = freqpoint_dict[str(cellband)]
freqs = freq_dict[str(cellband)]
power_dict.setdefault(cellband, [('', '')] * len(freq_points))
for idx, point in enumerate(freq_points):
freq = freqs[idx]
self.conf_board_on_some_freq(cellid, point) # 设置基带板频点
i = 0
while 1:
i = i + 1
if i > 9:
return False
# 复测
# for test
plst = self.get_fsv_power(fsvip, target, freq)
if plst is None:
time.sleep(10)
continue
power = float(plst) # 读取频谱仪功率
if lower <= power <= upper:
power_dict[cellband][idx] = power, 'PASS'
break
power_dict[cellband][idx] = power, 'FAIL'
delta = power - target
if abs(delta)<=0.4:
delta=0.15 if delta>0 else -0.15
self.logger.debug('flat delta={}'.format(delta))
cali = float('%.2f' % delta)
self.process_flat.set_bandinfo(band, point, cali)
# 更新设备频补补偿表
self.update_bb_flat_comp(productno, bip, excel_path)
else:
return True
except Exception as e:
self.logger.error(e)
finally:
self.fsv.close_inst()
try:
if self.bdexl.open_excel(excel_path):
self.bdexl.write_power(tempidx, **power_dict)
self.bdexl.write_cali(tempidx, **temp_cali_dict)
except Exception:
pass
finally:
self.bdexl.close_file()
def power_test_on_some_freq(self, cellid, fsvip, freq, power_range):
'''
freq:频率
power_range:功率标准[下限,理想值,上限,]
:return:
'''
lower = float(power_range[0]) # 下限
upper = float(power_range[2]) # 上限
# for test
plst = self.get_fsv_power(fsvip, float(power_range[1]), freq)
if plst is None:
return None
power = float(plst) # 读取频谱仪功率
# txatt = self.bd.read_txatt(cellid)
if power >= lower and power <= upper:
return True, power, 'PASS'
elif power > upper:
# self.bd.set_rf(cellid, 0) # 关闭射频
self.logger.error('功率{}超限'.format(power))
else:
# self.bd.set_rf(cellid, 0) # 关闭射频
self.logger.error('功率{}不达标'.format(power))
return False, power, 'FAIL'
def conf_board_on_some_freq(self, cellid, freq):
'''
基于某频点
freq:频点
:return:
'''
self.logger.debug('conf_board_on_some_freq')
try:
flag = self.bd.conf_para(cellid, freq) # 设置频点并打开功放
return flag
except Exception as e:
self.logger.error(e)
return False
def get_and_send_powercali(self, cellid, fsvip, band, freq_points, freqs, target):
'''
遍历band的三个频点,得到功率补偿,发送给BB
band:
freq_points:频点,用于发给基带板
freqs:频率,用于设置频谱仪
target:功率理想值dBm
return [[int(freq), float(cali),温度],,]
'''
temp = self.bd.repeat_get_temp() # 获取基带板温度
if temp is None:
raise IOError('get temp failed')
self.logger.debug('current temp={}'.format(temp))
for idx, point in enumerate(freq_points):
freq = freqs[idx]
self.conf_board_on_some_freq(cellid, point) # 设置基带板频点
plst = self.get_fsv_power(fsvip, target, freq)
if plst is None:
raise IOError('read fsv power failed')
power = plst
self.logger.debug('fsv read power={}'.format(power))
value = float(power) - float(target)
cali = float('%.2f' % value)
self.process_flat.set_bandinfo(band, point, cali) # 更新内存
def get_fsv_power(self, fsvip, upper, freq):
'''
读取频谱仪功率
upper:输出功率上限,用来设置ref level,ref level=upper+3
:return:
'''
i = 0
ref_level = float(upper) + 7
lowedge = float(upper) - 21 - 4
while 1:
try:
if i >= 3:
return None
i = i + 1
self.fsv.set_for_txatt(ref_level, freq)
time.sleep(1)
plst = []
j = 0
# sweep time 1s,读5次取平均值
# 12.23 频谱仪读平均值5次
while j < 1:
power = self.fsv.get_power(ref_level, freq) # 读取频谱仪功率,返回列表
self.logger.debug('get_fsv_power={}'.format(power[0]))
if power is not None:
# plst.append(power)
# for test
if float(power[0]) > lowedge:
plst.append(power)
else:
self.logger.error('before reset_bd,power={}'.format(power[0]))
self.reset_bd() # 可能设备重启了,导致输出-19以下
return None
else:
break
j = j + 1
if plst:
plst = [float(item[0]) for item in plst]
self.logger.debug('power list={}'.format(plst))
return sum(plst) / len(plst)
time.sleep(3)
except Exception as e:
self.logger.error(e)
time.sleep(3)
self.fsv.close_inst()
self.conf_device(fsvip)
time.sleep(3)
continue
def reset_bd(self):
if not self.bd.do_set_bd():
raise RuntimeError('强建小区失败')
def read_excel_txatt_norm(self, excel_path):
'''
读取excel的上下行功率,频点等参数
:param excel_path:
:return:
'''
try:
if self.bdexl.open_excel(excel_path):
normlist = self.bdexl.get_txatt_norm() # 读取输出功率标准
templist = self.bdexl.read_cpu_temp() # 读取一系列待测温度
centertemp = self.bdexl.read_cpu_center_temp() # 读取基准温度
return normlist, templist, centertemp
except Exception as e:
raise RuntimeError(e)
finally:
self.bdexl.close_file()
def read_boardtype(self, excel_path):
'''
从excel中读取board类型及主从片频点,频率
:param excel_path:
:return:
'''
try:
if self.bdexl.open_excel(excel_path):
freqpoint_dict, freq_dict = self.bdexl.get_set_condition()
return freqpoint_dict, freq_dict
except Exception as e:
raise RuntimeError('read_boardtype ERROR:{}'.format(e))
finally:
self.bdexl.close_file()
def read_offset(self, excel_path):
try:
if self.bdexl.open_excel(excel_path):
self.bdexl.get_dl_rows()
offset = self.bdexl.get_offset()
return offset
except Exception as e:
raise RuntimeError('read_offset ERROR:{}'.format(e))
finally:
self.bdexl.close_file()
def conf_device(self, fsvip):
'''
仪器初始化
:return:
'''
self.logger.debug('conf_fsv')
# for test
i = 0
while 1:
i = i + 1
if i >= 3:
self.logger.error('fsv error')
raise RuntimeError('fsv error')
try:
self.fsv.init_inst(fsvip)
time.sleep(1)
self.fsv.reset_fsv()
time.sleep(1)
except Exception as e:
self.logger.error(e)
time.sleep(10)
self.fsv.close_inst()
else:
break
def set_flat_comp(self, fsvip, target, freqpoint_dict, freq_dict, productno, bip, excel_path):
'''
平坦度补偿
:return:
'''
try:
self.logger.debug('基准平坦度补偿')
key_bands = freqpoint_dict.keys()
self.conf_device(fsvip)
for cellband in key_bands:
self.logger.debug('cellband={}'.format(cellband))
if 'CELL0' in cellband.upper():
cellid = '0'
self.bd.set_rf('1', 0)
else:
cellid = '1'
self.bd.set_rf('0', 0)
bandstr = cellband.split('_')[-1]
band = re.sub('\D', '', bandstr) # band的数字,1/3/41/38/39
freq_points = freqpoint_dict[str(cellband)]
freqs = freq_dict[str(cellband)]
self.get_and_send_powercali(cellid, fsvip, band, freq_points, freqs,
target) # 写平坦度补偿表
# 更新设备频补补偿表
self.update_bb_flat_comp(productno, bip, excel_path)
except Exception as e:
self.logger.error(e)
raise RuntimeError(e)
finally:
self.fsv.close_inst()
def set_temp_comp(self, fsvip, target, freqpoint_dict, freq_dict, tempidx, nexttempidx):
'''
温度补偿
:return:补偿值{'':[],'':[]}
'''
i = 0
while 1:
if i > 3:
return False
try:
key_bands = freqpoint_dict.keys()
self.conf_device(fsvip)
for cellband in key_bands:
self.logger.debug('cellband={}'.format(cellband))
if 'CELL0' in cellband.upper():
cellid = '0'
self.bd.set_rf('1', 0)
else:
cellid = '1'
self.bd.set_rf('0', 0)
bandstr = cellband.split('_')[-1]
band = re.sub('\D', '', bandstr) # band的数字,1/3/41/38/39
# bandinfo=self.process_gain.read_bandinfo(int(band))
freq_points = freqpoint_dict[str(cellband)]
freqs = freq_dict[str(cellband)]
for idx, point in enumerate(freq_points):
freq = freqs[idx]
self.conf_board_on_some_freq(cellid, point) # 设置基带板频点
# for test
plst = self.get_fsv_power(fsvip, target, freq)
if plst is None:
raise IOError('read fsv power failed')
power = plst
self.logger.debug('fsv read power={}'.format(power))
value = float(power) - float(target)
self.process_gain.set_bandinfo(tempidx, nexttempidx, band, point, float('%6.2f' % value))
return True
except Exception as e:
self.logger.error(e)
i = i + 1
finally:
self.fsv.close_inst()
def set_cpu_temp(self, target, bias, direction):
'''
设定设备到达某温度
target:温度
bias:偏离目标温度多少度
:return:
'''
# for test
# logger.debug('wait for test...')
# time.sleep(10)
self.logger.debug('温度设定目标{}'.format(target))
self.rpt_message('温度设定目标{}'.format(target))
# time.sleep(3)
# for test
if not self.adjust_thread or not self.adjust_thread.is_alive():
self.adjust_thread = PropagatingThread(target=self.adjust_cpu_temp, args=(target, bias, direction))
self.adjust_evt.set()
self.adjust_thread.setDaemon(True)
self.adjust_thread.start()
self.adjust_thread.join()
def write_gain_comp_json(self, productno, bip, excel_path):
'''
写文件并ftp上传给T2K
:return:
'''
myftp = MyFTP(str(bip))
try:
js = self.process_gain.get_json()
dirname = os.path.dirname(excel_path)
pno = productno
remote_file = '/mnt/flash/scbs/{}_GainComp.json'.format(pno)
local_file = os.path.join(dirname, 'GainComp.json')
with open(local_file, 'wb') as f:
f.write(json.dumps(js, indent=4, ensure_ascii=False).encode('utf-8'))
if myftp.rpt_json(local_file, remote_file):
return True
except Exception as e:
self.logger.error('write_gain_comp_json ERROR:{}'.format(e))
return False
finally:
myftp.close()
def write_flat_comp_json(self, productno, bip, excel_path):
'''
将本地频补表上传给BB
:param productno:
:param bip:
:param excel_path:
:return:
'''
myftp = MyFTP(str(bip))
try:
js = self.process_flat.get_json()
dirname = os.path.dirname(excel_path)
pno = productno
remote_file = '/mnt/flash/scbs/{}_FlatComp.json'.format(pno)
local_file = os.path.join(dirname, 'FlatComp.json')
with open(local_file, 'wb') as f:
f.write(json.dumps(js, indent=4, ensure_ascii=False).encode('utf-8'))
if myftp.rpt_json(local_file, remote_file):
return True
except Exception as e:
self.logger.error('write_flat_comp_json ERROR:{}'.format(e))
return False
finally:
myftp.close()
def check_cpu_temp(self):
'''
读取设备温度
:return:
'''
a, b, c, d, e = [-100] * 5
i = 0
MAX = 90
while True:
if i > MAX:
break
if self.wait_evt.is_set():
temp = self.bd.repeat_get_temp() # 获取基带板温度
self.logger.debug('current cpu temp={}'.format(temp))
self.rpt_message('current cpu temp={}'.format(temp))
if temp is None:
i = i + 1
continue
f = temp
a, b, c, d, e = b, c, d, e, f
if a == e or abs(a - e) <= 1:
self.logger.debug('cpu hit {}'.format(e))
self.rpt_message('cpu hit {}'.format(e))
self.wait_evt.clear()
# self.adjust_flag = True
self.adjust_evt.set()
else:
time.sleep(50)
else:
self.logger.debug('wait evt')
self.wait_evt.wait()
i = 0
a, b, c, d, e = [-100] * 5
i = i + 1
def adjust_cpu_temp(self, target, bias, direction=1):
'''
:param target:
:param bias:
:param direction: 1,表示目标温度为正,-1表示目标温度为负或0
:return:
'''
x = 0.7
y = 0.4
z = 0.7
i = 0
period = 1
oldt = None
if bias == 0:
trg = [0, 0]
else:
if direction > 0:
trg = [-2, 0]
else:
trg = [0, 2]
while True:
Tset = self.th_dev.get_temp_pv() / 10.0 # 温箱温度
self.logger.debug('th temp={}'.format(Tset))
self.logger.debug('last th setvalue={}'.format(oldt))
if oldt is not None and abs(Tset - oldt) >= 0.3:
time.sleep(30)
self.logger.debug('wait th-dev hit setvalue')
continue
if oldt is not None and self.adjust_evt.is_set():
self.wait_evt.set()
self.adjust_evt.clear()
self.logger.debug('wait adjust_evt')
self.adjust_evt.wait()
try:
if self.adjust_evt.is_set():
Tact = self.bd.repeat_get_temp() # 获取基带板温度
self.logger.debug('cpu temp={}'.format(Tact))
self.rpt_message('cpu temp={}'.format(Tact))
if Tact is None:
raise IOError('get temp failed')
delta = float(target) - float(Tact)
self.logger.debug('temp delta={}'.format(delta))
if trg[0] <= delta <= trg[1]:
i += 1
time.sleep(30)
elif abs(delta) >= 10:
i = 0
T = Tset + delta * x
oldt = T
self.logger.debug('SET T={}'.format(T))
self.th_dev.set_temp_sv(int(T * 10))
time.sleep(60 * 10)
elif abs(delta) >= 3:
i = 0
T = Tset + delta * y
oldt = T
self.logger.debug('SET T={}'.format(T))
self.th_dev.set_temp_sv(int(T * 10))
time.sleep(60 * int(period))
else:
i = 0
if delta > 0:
T = Tset + z
else:
T = Tset - z
oldt = T
self.th_dev.set_temp_sv(int(T * 10))
time.sleep(30 * 1) # 1分钟
if i >= 1:
self.logger.debug('hit target')
break
except Exception as e:
self.logger.error(e)
self.reset_bd()
|
py
|
1a5631b814f95da20315dfaf3465eb392e50aebe
|
import argparse
import os
import data
import models
import visualize
def main():
parser = argparse.ArgumentParser(description='Yelp Rating Interpretation')
parser.add_argument('--n-estimators', type=int, default=100)
parser.add_argument('--criterion', type=str, default='gini',
choices=['gini', 'entropy'])
parser.add_argument('--max-depth', type=int, default=20)
parser.add_argument('--seed', type=int, default=23)
parser.add_argument('--top-n-features', type=int)
parser.add_argument('--train-datafile', type=str,
default='data/train.csv')
parser.add_argument('--test-datafile', type=str,
default='data/test.csv')
parser.add_argument('--model-path', type=str,
default='models/model.pkl')
parser.add_argument('--fig-path', type=str,
default='figure/importance.png')
args = parser.parse_args()
model = models.RatingInterpreter(n_estimators=args.n_estimators,
criterion=args.criterion,
max_depth=args.max_depth,
seed=args.seed,
top_n_features=args.top_n_features)
# if os.path.exists(args.model_path):
# model.load(args.model_path)
# else:
train_dataset = data.Dataset(args.train_datafile)
test_dataset = data.Dataset(args.test_datafile)
# acc, rmse = model.train(train_dataset, test_dataset)
acc = model.train(train_dataset, test_dataset)
model.save(args.model_path)
importances, std = model.get_importance()
# visualize.display(importances, std, acc, rmse, args.fig_path,
# top_n_features=args.top_n_features)
visualize.display(importances, std, acc, args.fig_path,
top_n_features=args.top_n_features)
if __name__ == '__main__':
main()
|
py
|
1a5631fe71f7c43f31d34cce61a6f64d4d84f245
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SISeguimiento.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py
|
1a56330b237c4d855e8e0a7bf495ce4e69cd350e
|
# Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import socket
from paasta_tools.smartstack_tools import get_multiple_backends
def get_replication_for_services(synapse_host, synapse_port, services):
"""Returns the replication level for the provided services
This check is intended to be used with an haproxy load balancer, and
relies on the implementation details of that choice.
:param synapse_host: The hose that this check should contact for replication information.
:param synapse_port: The port number that this check should contact for replication information.
:param services: A list of strings that are the service names
that should be checked for replication.
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available
replicas
:returns None: If it cannot connect to the specified synapse host and port
"""
backends = get_multiple_backends(
services=services,
synapse_host=synapse_host,
synapse_port=synapse_port,
)
counter = collections.Counter([b['pxname'] for b in backends if backend_is_up(b)])
return dict((sn, counter[sn]) for sn in services)
def backend_is_up(backend):
"""Returns whether a server is receiving traffic in HAProxy.
:param backend: backend dict, like one of those returned by smartstack_tools.get_multiple_backends.
:returns is_up: Whether the backend is in a state that receives traffic.
"""
return str(backend['status']).startswith('UP')
def ip_port_hostname_from_svname(svname):
"""This parses the haproxy svname that smartstack creates, which is in the form ip:port_hostname.
:param svname: A string in the format ip:port_hostname
:returns ip_port_hostname: A tuple of ip, port, hostname.
"""
ip, port_hostname = svname.split(':', 1)
port, hostname = port_hostname.split('_', 1)
return ip, int(port), hostname
def get_registered_marathon_tasks(
synapse_host,
synapse_port,
service,
marathon_tasks,
):
"""Returns the marathon tasks that are registered in haproxy under a given service (nerve_ns).
:param synapse_host: The host that this check should contact for replication information.
:param synapse_port: The port that this check should contact for replication information.
:param service: A list of strings that are the service names that should be checked for replication.
:param marathon_tasks: A list of MarathonTask objects, whose tasks we will check for in the HAProxy status.
"""
backends = get_multiple_backends([service], synapse_host=synapse_host, synapse_port=synapse_port)
healthy_tasks = []
for backend, task in match_backends_and_tasks(backends, marathon_tasks):
if backend is not None and task is not None and backend['status'].startswith('UP'):
healthy_tasks.append(task)
return healthy_tasks
def match_backends_and_tasks(backends, tasks):
"""Returns tuples of matching (backend, task) pairs, as matched by IP and port. Each backend will be listed exactly
once, and each task will be listed once per port. If a backend does not match with a task, (backend, None) will
be included. If a task's port does not match with any backends, (None, task) will be included.
:param backends: An iterable of haproxy backend dictionaries, e.g. the list returned by
smartstack_tools.get_multiple_backends.
:param tasks: An iterable of MarathonTask objects.
"""
backends_by_ip_port = collections.defaultdict(list) # { (ip, port) : [backend1, backend2], ... }
backend_task_pairs = []
for backend in backends:
ip, port, _ = ip_port_hostname_from_svname(backend['svname'])
backends_by_ip_port[ip, port].append(backend)
for task in tasks:
ip = socket.gethostbyname(task.host)
for port in task.ports:
for backend in backends_by_ip_port.pop((ip, port), [None]):
backend_task_pairs.append((backend, task))
# we've been popping in the above loop, so anything left didn't match a marathon task.
for backends in backends_by_ip_port.values():
for backend in backends:
backend_task_pairs.append((backend, None))
return backend_task_pairs
|
py
|
1a56339c906cc8bb6a2424e82a118cf2bf3d04fe
|
"""
WSGI config for BabbelLearn project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# babbel_learn directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "babbel_learn"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
py
|
1a56340b3b3dd1d87a3b71e0efab0faaa7576ad1
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <[email protected]>
# Angelos Tzotsos <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
""" ISO metadata parser """
from __future__ import (absolute_import, division, print_function)
import warnings
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gfc","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
""" Process gmd:MD_Metadata """
def __init__(self, md=None):
if md is None:
self.xml = None
self.identifier = None
self.parentidentifier = None
self.language = None
self.dataseturi = None
self.languagecode = None
self.datestamp = None
self.charset = None
self.hierarchy = None
self.contact = []
self.datetimestamp = None
self.stdname = None
self.stdver = None
self.locales = []
self.referencesystem = None
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
self.contentinfo = []
self.distribution = None
self.dataquality = None
else:
if hasattr(md, 'getroot'): # standalone document
self.xml = etree.tostring(md.getroot())
else: # part of a larger document
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLAttribute(val, 'codeListValue')
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
self.locales = []
for i in md.findall(util.nspath_eval('gmd:locale/gmd:PT_Locale', namespaces)):
self.locales.append(PT_Locale(i))
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
# TODO: merge .identificationinfo into .identification
warnings.warn(
'the .identification and .serviceidentification properties will merge into '
'.identification being a list of properties. This is currently implemented '
'in .identificationinfo. '
'Please see https://github.com/geopython/OWSLib/issues/38 for more information',
FutureWarning)
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
if len(idinfo) > 0:
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
self.contentinfo = []
for contentinfo in md.findall(util.nspath_eval('gmd:contentInfo/gmd:MD_FeatureCatalogueDescription', namespaces)):
self.contentinfo.append(MD_FeatureCatalogueDescription(contentinfo))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
def get_default_locale(self):
""" get default gmd:PT_Locale based on gmd:language """
for loc in self.locales:
if loc.languagecode == self.language:
return loc
return None
class PT_Locale(object):
""" process PT_Locale """
def __init__(self, md=None):
if md is None:
self.id = None
self.languagecode = None
self.charset = None
else:
self.id = md.attrib.get('id')
self.languagecode = md.find(util.nspath_eval('gmd:languageCode/gmd:LanguageCode', namespaces)).attrib.get('codeListValue')
self.charset = md.find(util.nspath_eval('gmd:characterEncoding/gmd:MD_CharacterSetCode', namespaces)).attrib.get('codeListValue')
class CI_Date(object):
""" process CI_Date """
def __init__(self, md=None):
if md is None:
self.date = None
self.type = None
else:
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
""" process CI_ResponsibleParty """
def __init__(self, md=None):
if md is None:
self.name = None
self.organization = None
self.position = None
self.phone = None
self.fax = None
self.address = None
self.city = None
self.region = None
self.postcode = None
self.country = None
self.email = None
self.onlineresource = None
self.role = None
else:
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organisationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:voice/gco:CharacterString', namespaces))
self.phone = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:facsimile/gco:CharacterString', namespaces))
self.fax = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:deliveryPoint/gco:CharacterString', namespaces))
self.address = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:city/gco:CharacterString', namespaces))
self.city = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:administrativeArea/gco:CharacterString', namespaces))
self.region = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:postalCode/gco:CharacterString', namespaces))
self.postcode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:country/gco:CharacterString', namespaces))
self.country = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString', namespaces))
self.email = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:onlineResource/gmd:CI_OnlineResource', namespaces))
if val is not None:
self.onlineresource = CI_OnlineResource(val)
else:
self.onlineresource = None
self.role = _testCodeListValue(md.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces)))
class MD_Keywords(object):
"""
Class for the metadata MD_Keywords element
"""
def __init__(self, md=None):
if md is None:
self.keywords = []
self.type = None
self.thesaurus = None
self.kwdtype_codeList = 'http://standards.iso.org/ittf/PubliclyAvailableStandards/ISO_19139_Schemas/resources/codelist/gmxCodelists.xml#MD_KeywordTypeCode'
else:
self.keywords = []
val = md.findall(util.nspath_eval('gmd:keyword/gco:CharacterString', namespaces))
for word in val:
self.keywords.append(util.testXMLValue(word))
self.type = None
val = md.find(util.nspath_eval('gmd:type/gmd:MD_KeywordTypeCode', namespaces))
self.type = util.testXMLAttribute(val, 'codeListValue')
self.thesaurus = None
val = md.find(util.nspath_eval('gmd:thesaurusName/gmd:CI_Citation', namespaces))
if val is not None:
self.thesaurus = {}
thesaurus = val.find(util.nspath_eval('gmd:title/gco:CharacterString', namespaces))
self.thesaurus['title'] = util.testXMLValue(thesaurus)
thesaurus = val.find(util.nspath_eval('gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
self.thesaurus['date'] = util.testXMLValue(thesaurus)
thesaurus = val.find(util.nspath_eval('gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.thesaurus['datetype'] = util.testXMLAttribute(thesaurus, 'codeListValue')
class MD_DataIdentification(object):
""" process MD_DataIdentification """
def __init__(self, md=None, identtype=None):
if md is None:
self.identtype = None
self.title = None
self.alternatetitle = None
self.aggregationinfo = None
self.uricode = []
self.uricodespace = []
self.date = []
self.datetype = []
self.uselimitation = []
self.uselimitation_url = []
self.accessconstraints = []
self.classification = []
self.otherconstraints = []
self.securityconstraints = []
self.useconstraints = []
self.denominators = []
self.distance = []
self.uom = []
self.resourcelanguage = []
self.resourcelanguagecode = []
self.creator = []
self.publisher = []
self.contributor = []
self.edition = None
self.abstract = None
self.abstract_url = None
self.purpose = None
self.status = None
self.contact = []
self.keywords = []
self.keywords2 = []
self.topiccategory = []
self.supplementalinformation = None
self.extent = None
self.bbox = None
self.temporalextent_start = None
self.temporalextent_end = None
self.spatialrepresentationtype = []
else:
self.identtype = identtype
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', namespaces))
self.alternatetitle = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:aggregationInfo', namespaces))
self.aggregationinfo = util.testXMLValue(val)
self.uricode = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)) + \
md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricode.append(val)
self.uricodespace = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricodespace.append(val)
self.date = []
self.datetype = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
self.date.append(CI_Date(i))
self.uselimitation = []
self.uselimitation_url = []
for i in \
md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useLimitation/gco:CharacterString', namespaces)) + \
md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uselimitation.append(val)
for i in \
md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useLimitation/gmx:Anchor', namespaces)) + \
md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gmx:Anchor', namespaces)):
val = util.testXMLValue(i)
val1 = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
if val is not None:
self.uselimitation.append(val)
self.uselimitation_url.append(val1)
self.accessconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.accessconstraints.append(val)
self.classification = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.classification.append(val)
self.otherconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.otherconstraints.append(val)
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:classification/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.securityconstraints.append(val)
self.useconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.useconstraints.append(val)
self.denominators = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.denominators.append(val)
self.distance = []
self.uom = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.distance.append(val)
self.uom.append(i.get("uom"))
self.resourcelanguagecode = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.resourcelanguagecode.append(val)
self.resourcelanguage = []
for i in md.findall(util.nspath_eval('gmd:language/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.resourcelanguage.append(val)
self.creator = []
self.publisher = []
self.contributor = []
for val in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
role = val.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces))
if role is not None:
clv = _testCodeListValue(role)
rp = CI_ResponsibleParty(val)
if clv == 'originator':
self.creator.append(rp)
elif clv == 'publisher':
self.publisher.append(rp)
elif clv == 'author':
self.contributor.append(rp)
val = md.find(util.nspath_eval('gmd:edition/gco:CharacterString', namespaces))
self.edition = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gmx:Anchor', namespaces))
self.abstract_url = None
if val is not None:
self.abstract = util.testXMLValue(val)
self.abstract_url = val.attrib.get(util.nspath_eval('xlink:href', namespaces))
val = md.find(util.nspath_eval('gmd:purpose/gco:CharacterString', namespaces))
self.purpose = util.testXMLValue(val)
self.status = _testCodeListValue(md.find(util.nspath_eval('gmd:status/gmd:MD_ProgressCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
self.spatialrepresentationtype = []
for val in md.findall(util.nspath_eval('gmd:spatialRepresentationType/gmd:MD_SpatialRepresentationTypeCode', namespaces)):
val = util.testXMLAttribute(val, 'codeListValue')
if val:
self.spatialrepresentationtype.append(val)
warnings.warn(
'The .keywords and .keywords2 properties will merge into the '
'.keywords property in the future, with .keywords becoming a list '
'of MD_Keywords instances. This is currently implemented in .keywords2. '
'Please see https://github.com/geopython/OWSLib/issues/301 for more information',
FutureWarning)
self.keywords = []
for i in md.findall(util.nspath_eval('gmd:descriptiveKeywords', namespaces)):
mdkw = {}
mdkw['type'] = _testCodeListValue(i.find(util.nspath_eval('gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', namespaces)))
mdkw['thesaurus'] = {}
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
mdkw['thesaurus']['title'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
mdkw['thesaurus']['datetype'] = util.testXMLAttribute(val, 'codeListValue')
mdkw['keywords'] = []
for k in i.findall(util.nspath_eval('gmd:MD_Keywords/gmd:keyword', namespaces)):
val = k.find(util.nspath_eval('gco:CharacterString', namespaces))
if val is not None:
val2 = util.testXMLValue(val)
if val2 is not None:
mdkw['keywords'].append(val2)
self.keywords.append(mdkw)
self.keywords2 = []
for mdkw in md.findall(util.nspath_eval('gmd:descriptiveKeywords/gmd:MD_Keywords', namespaces)):
self.keywords2.append(MD_Keywords(mdkw))
self.topiccategory = []
for i in md.findall(util.nspath_eval('gmd:topicCategory/gmd:MD_TopicCategoryCode', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.topiccategory.append(val)
val = md.find(util.nspath_eval('gmd:supplementalInformation/gco:CharacterString', namespaces))
self.supplementalinformation = util.testXMLValue(val)
# There may be multiple geographicElement, create an extent
# from the one containing either an EX_GeographicBoundingBox or EX_BoundingPolygon.
# The schema also specifies an EX_GeographicDescription. This is not implemented yet.
val = None
val2 = None
val3 = None
extents = md.findall(util.nspath_eval('gmd:extent', namespaces))
extents.extend(md.findall(util.nspath_eval('srv:extent', namespaces)))
for extent in extents:
if val is None:
for e in extent.findall(util.nspath_eval('gmd:EX_Extent/gmd:geographicElement', namespaces)):
if e.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces)) is not None or e.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces)) is not None:
val = e
break
self.extent = EX_Extent(val)
self.bbox = self.extent.boundingBox # for backwards compatibility
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', namespaces))
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition', namespaces))
self.temporalextent_start = util.testXMLValue(val2)
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', namespaces))
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition', namespaces))
self.temporalextent_end = util.testXMLValue(val3)
class MD_Distributor(object):
""" process MD_Distributor """
def __init__(self, md=None):
if md is None:
self.contact = None
self.online = []
else:
self.contact = None
val = md.find(util.nspath_eval('gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.online = []
for ol in md.findall(util.nspath_eval('gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class MD_Distribution(object):
""" process MD_Distribution """
def __init__(self, md=None):
if md is None:
self.format = None
self.version = None
self.distributor = []
self.online = []
pass
else:
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', namespaces))
self.format = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:version/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
self.distributor = []
for dist in md.findall(util.nspath_eval('gmd:distributor', namespaces)):
self.distributor.append(MD_Distributor(dist))
self.online = []
for ol in md.findall(util.nspath_eval('gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class DQ_DataQuality(object):
''' process DQ_DataQuality'''
def __init__(self, md=None):
if md is None:
self.conformancetitle = []
self.conformancedate = []
self.conformancedatetype = []
self.conformancedegree = []
self.lineage = None
self.lineage_url = None
self.specificationtitle = None
self.specificationdate = []
else:
self.conformancetitle = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancetitle.append(val)
self.conformancedate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedate.append(val)
self.conformancedatetype = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.conformancedatetype.append(val)
self.conformancedegree = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedegree.append(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', namespaces))
self.lineage = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gmx:Anchor', namespaces))
if val is not None:
self.lineage = util.testXMLValue(val)
self.lineage_url = val.attrib.get(util.nspath_eval('xlink:href', namespaces))
val = md.find(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.specificationtitle = util.testXMLValue(val)
self.specificationdate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.specificationdate.append(val)
class SV_ServiceIdentification(object):
""" process SV_ServiceIdentification """
def __init__(self, md=None):
if md is None:
self.title = None
self.abstract = None
self.contact = None
self.identtype = 'service'
self.type = None
self.version = None
self.fees = None
self.bbox = None
self.couplingtype = None
self.operations = []
self.operateson = []
else:
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title=util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
self.contact = None
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.identtype = 'service'
val = md.find(util.nspath_eval('srv:serviceType/gco:LocalName', namespaces))
self.type = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:serviceTypeVersion/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:accessProperties/gmd:MD_StandardOrderProcess/gmd:fees/gco:CharacterString', namespaces))
self.fees = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:extent/gmd:EX_Extent', namespaces))
if val is not None:
self.bbox = EX_Extent(val)
else:
self.bbox = None
self.couplingtype = _testCodeListValue(md.find(util.nspath_eval('gmd:couplingType/gmd:SV_CouplingType', namespaces)))
self.operations = []
for i in md.findall(util.nspath_eval('srv:containsOperations', namespaces)):
tmp = {}
val = i.find(util.nspath_eval('srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', namespaces))
tmp['name'] = util.testXMLValue(val)
tmp['dcplist'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:DCP', namespaces)):
tmp2 = _testCodeListValue(d.find(util.nspath_eval('srv:DCPList', namespaces)))
tmp['dcplist'].append(tmp2)
tmp['connectpoint'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:connectPoint', namespaces)):
tmp3 = d.find(util.nspath_eval('gmd:CI_OnlineResource', namespaces))
tmp['connectpoint'].append(CI_OnlineResource(tmp3))
self.operations.append(tmp)
self.operateson = []
for i in md.findall(util.nspath_eval('srv:operatesOn', namespaces)):
tmp = {}
tmp['uuidref'] = i.attrib.get('uuidref')
tmp['href'] = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
tmp['title'] = i.attrib.get(util.nspath_eval('xlink:title', namespaces))
self.operateson.append(tmp)
class CI_OnlineResource(object):
""" process CI_OnlineResource """
def __init__(self,md=None):
if md is None:
self.url = None
self.protocol = None
self.name = None
self.description = None
self.function = None
else:
val = md.find(util.nspath_eval('gmd:linkage/gmd:URL', namespaces))
self.url = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:protocol/gco:CharacterString', namespaces))
self.protocol = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:description/gco:CharacterString', namespaces))
self.description = util.testXMLValue(val)
self.function = _testCodeListValue(md.find(util.nspath_eval('gmd:function/gmd:CI_OnLineFunctionCode', namespaces)))
class EX_GeographicBoundingBox(object):
def __init__(self, md=None):
if md is None:
self.minx = None
self.maxx = None
self.miny = None
self.maxy = None
else:
val = md.find(util.nspath_eval('gmd:westBoundLongitude/gco:Decimal', namespaces))
self.minx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:eastBoundLongitude/gco:Decimal', namespaces))
self.maxx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:southBoundLatitude/gco:Decimal', namespaces))
self.miny = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:northBoundLatitude/gco:Decimal', namespaces))
self.maxy = util.testXMLValue(val)
class EX_Polygon(object):
def __init__(self, md=None):
if md is None:
self.exterior_ring = None
self.interior_rings = []
else:
linear_ring = md.find(util.nspath_eval('gml32:Polygon/gml32:exterior/gml32:LinearRing', namespaces))
if linear_ring is not None:
self.exterior_ring = self._coordinates_for_ring(linear_ring)
interior_ring_elements = md.findall(util.nspath_eval('gml32:Polygon/gml32:interior', namespaces))
self.interior_rings = []
for iring_element in interior_ring_elements:
linear_ring = iring_element.find(util.nspath_eval('gml32:LinearRing', namespaces))
self.interior_rings.append(self._coordinates_for_ring(linear_ring))
def _coordinates_for_ring(self, linear_ring):
coordinates = []
positions = linear_ring.findall(util.nspath_eval('gml32:pos', namespaces))
for pos in positions:
tokens = pos.text.split()
coords = tuple([float(t) for t in tokens])
coordinates.append(coords)
return coordinates
class EX_GeographicBoundingPolygon(object):
def __init__(self, md=None):
if md is None:
self.is_extent = None
self.polygons = []
else:
val = md.find(util.nspath_eval('gmd:extentTypeCode', namespaces))
self.is_extent = util.testXMLValue(val)
md_polygons = md.findall(util.nspath_eval('gmd:polygon', namespaces))
self.polygons = []
for val in md_polygons:
self.polygons.append(EX_Polygon(val))
class EX_Extent(object):
""" process EX_Extent """
def __init__(self, md=None):
if md is None:
self.boundingBox = None
self.boundingPolygon = None
self.description_code = None
else:
self.boundingBox = None
self.boundingPolygon = None
if md is not None:
bboxElement = md.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces))
if bboxElement is not None:
self.boundingBox = EX_GeographicBoundingBox(bboxElement)
polygonElement = md.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces))
if polygonElement is not None:
self.boundingPolygon = EX_GeographicBoundingPolygon(polygonElement)
val = md.find(util.nspath_eval('gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces))
self.description_code = util.testXMLValue(val)
class MD_ReferenceSystem(object):
""" process MD_ReferenceSystem """
def __init__(self, md=None):
if md is None:
self.code = None
self.codeSpace = None
self.version = None
else:
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces))
if val is not None:
self.code = util.testXMLValue(val)
else:
self.code = None
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString', namespaces))
if val is not None:
self.codeSpace = util.testXMLValue(val)
else:
self.codeSpace = None
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:version/gco:CharacterString', namespaces))
if val is not None:
self.version = util.testXMLValue(val)
else:
self.version = None
def _testCodeListValue(elpath):
""" get gco:CodeListValue_Type attribute, else get text content """
if elpath is not None: # try to get @codeListValue
val = util.testXMLValue(elpath.attrib.get('codeListValue'), True)
if val is not None:
return val
else: # see if there is element text
return util.testXMLValue(elpath)
else:
return None
class CodelistCatalogue(object):
""" process CT_CodelistCatalogue """
def __init__(self, ct):
val = ct.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:scope/gco:CharacterString', namespaces))
self.scope = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:fieldOfApplication/gco:CharacterString', namespaces))
self.fieldapp = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionNumber/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.date = util.testXMLValue(val)
self.dictionaries = {}
for i in ct.findall(util.nspath_eval('gmx:codelistItem/gmx:CodeListDictionary', namespaces)):
id = i.attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id] = {}
val = i.find(util.nspath_eval('gml32:description', namespaces))
self.dictionaries[id]['description'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gml32:identifier', namespaces))
self.dictionaries[id]['identifier'] = util.testXMLValue(val)
self.dictionaries[id]['entries'] = {}
for j in i.findall(util.nspath_eval('gmx:codeEntry', namespaces)):
id2 = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id]['entries'][id2] = {}
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:description', namespaces))
self.dictionaries[id]['entries'][id2]['description'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:identifier', namespaces))
self.dictionaries[id]['entries'][id2]['identifier'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get('codeSpace')
self.dictionaries[id]['entries'][id2]['codespace'] = util.testXMLValue(val, True)
def getcodelistdictionaries(self):
return list(self.dictionaries.keys())
def getcodedefinitionidentifiers(self, cdl):
if cdl in self.dictionaries:
ids = []
for i in self.dictionaries[cdl]['entries']:
ids.append(self.dictionaries[cdl]['entries'][i]['identifier'])
return ids
else:
return None
class MD_FeatureCatalogueDescription(object):
"""Process gmd:MD_FeatureCatalogueDescription"""
def __init__(self, fcd=None):
if fcd is None:
self.xml = None
self.compliancecode = None
self.language = []
self.includedwithdataset = None
self.featuretypenames = []
self.featurecatalogues = []
else:
if hasattr(fcd, 'getroot'): # standalone document
self.xml = etree.tostring(fcd.getroot())
else: # part of a larger document
self.xml = etree.tostring(fcd)
self.compliancecode = None
val = fcd.find(util.nspath_eval('gmd:complianceCode/gco:Boolean', namespaces))
val = util.testXMLValue(val)
if val is not None:
self.compliancecode = util.getTypedValue('boolean', val)
self.language = []
for i in fcd.findall(util.nspath_eval('gmd:language/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.language.append(val)
self.includedwithdataset = None
val = fcd.find(util.nspath_eval('gmd:includedWithDataset/gco:Boolean', namespaces))
val = util.testXMLValue(val)
if val is not None:
self.includedwithdataset = util.getTypedValue('boolean', val)
self.featuretypenames = []
for i in fcd.findall(util.nspath_eval('gmd:featureTypes/gco:LocalName', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.featuretypenames.append(val)
for i in fcd.findall(util.nspath_eval('gmd:featureTypes/gco:ScopedName', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.featuretypenames.append(val)
self.featurecatalogues = []
for i in fcd.findall(util.nspath_eval('gmd:featureCatalogueCitation', namespaces)):
val = i.attrib.get('uuidref')
val = util.testXMLValue(val, attrib=True)
if val is not None:
self.featurecatalogues.append(val)
class FC_FeatureCatalogue(object):
"""Process gfc:FC_FeatureCatalogue"""
def __init__(self, fc=None):
if fc is None:
self.xml = None
self.identifier = None
self.name = None
self.versiondate = None
self.producer = None
self.featuretypes = []
else:
if hasattr(fc, 'getroot'): # standalone document
self.xml = etree.tostring(fc.getroot())
else: # part of a larger document
self.xml = etree.tostring(fc)
val = fc.attrib['uuid']
self.identifier = util.testXMLValue(val, attrib=True)
val = fc.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = fc.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.versiondate = util.testXMLValue(val)
if not self.versiondate:
val = fc.find(util.nspath_eval('gmx:versionDate/gco:DateTime', namespaces))
self.versiondate = util.testXMLValue(val)
self.producer = None
prod = fc.find(util.nspath_eval('gfc:producer/gmd:CI_ResponsibleParty', namespaces))
if prod is not None:
self.producer = CI_ResponsibleParty(prod)
self.featuretypes = []
for i in fc.findall(util.nspath_eval('gfc:featureType/gfc:FC_FeatureType', namespaces)):
self.featuretypes.append(FC_FeatureType(i))
class FC_FeatureType(object):
"""Process gfc:FC_FeatureType"""
def __init__(self, ft=None):
if ft is None:
self.xml = None
self.identifier = None
self.typename = None
self.definition = None
self.isabstract = None
self.aliases = []
self.attributes = []
else:
if hasattr(ft, 'getroot'): # standalone document
self.xml = etree.tostring(ft.getroot())
else: # part of a larger document
self.xml = etree.tostring(ft)
val = ft.attrib['uuid']
self.identifier = util.testXMLValue(val, attrib=True)
val = ft.find(util.nspath_eval('gfc:typeName/gco:LocalName', namespaces))
self.typename = util.testXMLValue(val)
val = ft.find(util.nspath_eval('gfc:definition/gco:CharacterString', namespaces))
self.definition = util.testXMLValue(val)
self.isabstract = None
val = ft.find(util.nspath_eval('gfc:isAbstract/gco:Boolean', namespaces))
val = util.testXMLValue(val)
if val is not None:
self.isabstract = util.getTypedValue('boolean', val)
self.aliases = []
for i in ft.findall(util.nspath_eval('gfc:aliases/gco:LocalName', namespaces)):
self.aliases.append(util.testXMLValue(i))
self.attributes = []
for i in ft.findall(util.nspath_eval('gfc:carrierOfCharacteristics/gfc:FC_FeatureAttribute', namespaces)):
self.attributes.append(FC_FeatureAttribute(i))
class FC_FeatureAttribute(object):
"""Process gfc:FC_FeatureAttribute"""
def __init__(self, fa=None):
if fa is None:
self.xml = None
self.membername = None
self.definition = None
self.code = None
self.valuetype = None
self.listedvalues = []
else:
if hasattr(fa, 'getroot'): # standalone document
self.xml = etree.tostring(fa.getroot())
else: # part of a larger document
self.xml = etree.tostring(fa)
val = fa.find(util.nspath_eval('gfc:memberName/gco:LocalName', namespaces))
self.membername = util.testXMLValue(val)
val = fa.find(util.nspath_eval('gfc:definition/gco:CharacterString', namespaces))
self.definition = util.testXMLValue(val)
val = fa.find(util.nspath_eval('gfc:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
val = fa.find(util.nspath_eval('gfc:valueType/gco:TypeName/gco:aName/gco:CharacterString', namespaces))
self.valuetype = util.testXMLValue(val)
self.listedvalues = []
for i in fa.findall(util.nspath_eval('gfc:listedValue/gfc:FC_ListedValue', namespaces)):
self.listedvalues.append(FC_ListedValue(i))
class FC_ListedValue(object):
"""Process gfc:FC_ListedValue"""
def __init__(self, lv=None):
if lv is None:
self.xml = None
self.label = None
self.code = None
self.definition = None
else:
if hasattr(lv, 'getroot'): # standalone document
self.xml = etree.tostring(lv.getroot())
else: # part of a larger document
self.xml = etree.tostring(lv)
val = lv.find(util.nspath_eval('gfc:label/gco:CharacterString', namespaces))
self.label = util.testXMLValue(val)
val = lv.find(util.nspath_eval('gfc:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
val = lv.find(util.nspath_eval('gfc:definition/gco:CharacterString', namespaces))
self.definition = util.testXMLValue(val)
|
py
|
1a563417a959e16c19b8fa9aa19a03938132f133
|
import socket
sock = socket.socket()
sock.connect(('127.0.0.1', 9900))
sock.send(b"Hello, server!\n")
data = sock.recv(1024)
udata = data.decode("utf-8")
print(udata)
sock.close()
|
py
|
1a5634ccfdcbf33ab2cccdb92e010356bacc5a15
|
import re
from wanikani_api import constants
from .response_mocks import *
def mock_subjects(requests_mock):
requests_mock.get(
re.compile(constants.SUBJECT_ENDPOINT),
json=SUBJECTS_PAGE,
headers={"Etag": "abc123"},
)
def mock_single_subject(requests_mock):
requests_mock.get(
re.compile(constants.SINGLE_SUBJECT_ENPOINT),
json=SINGLE_SUBJECT,
headers={"Etag": "abc123"},
)
def mock_empty_subjects(requests_mock):
requests_mock.get(
re.compile(constants.SUBJECT_ENDPOINT),
json=EMPTY_SUBJECTS_PAGE,
headers={"Etag": "abc123"},
)
# When making multiple calls to the subject endpoint, only answer with real data once, then just return a 304.
def mock_subjects_with_cache(requests_mock):
requests_mock.register_uri(
"GET",
re.compile(constants.SUBJECT_ENDPOINT),
[
{"json": SUBJECTS_PAGE, "status_code": 200, "headers": {"Etag": "abc123"}},
{"json": None, "status_code": 304},
],
)
def mock_user_info(requests_mock):
requests_mock.get(
re.compile(constants.USER_ENDPOINT),
json=USER_INFORMATION,
headers={"Etag": "abc123"},
)
def mock_assignments(requests_mock):
requests_mock.get(
re.compile(constants.ASSIGNMENT_ENDPOINT),
json=ASSIGNMENTS_PAGE,
headers={"Etag": "abc123"},
)
def mock_review_statistics(requests_mock):
requests_mock.get(
re.compile(constants.REVIEW_STATS_ENDPOINT),
json=REVIEW_STATISTICS_PAGE,
headers={"Etag": "abc123"},
)
def mock_level_progressions(requests_mock):
requests_mock.get(
re.compile(constants.LEVEL_PROGRESSIONS_ENDPOINT),
json=LEVEL_PROGRESSIONS_PAGE,
headers={"Etag": "abc123"},
)
def mock_summary(requests_mock):
requests_mock.get(
re.compile(constants.SUMMARY_ENDPOINT), json=SUMMARY, headers={"Etag": "abc123"}
)
def mock_resets(requests_mock):
requests_mock.get(
re.compile(constants.RESETS_ENDPOINT),
json=RESETS_PAGE,
headers={"Etag": "abc123"},
)
def mock_reviews(requests_mock):
requests_mock.get(
re.compile(constants.REVIEWS_ENDPOINT),
json=REVIEWS_PAGE,
headers={"Etag": "abc123"},
)
def mock_study_materials(requests_mock):
requests_mock.get(
re.compile(constants.STUDY_MATERIALS_ENDPOINT),
json=STUDY_MATERIALS_PAGE,
headers={"Etag": "abc123"},
)
|
py
|
1a56357db612219475dc0b08127cedba353b3f3c
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Dict, Optional, Sequence, Union
import numpy as np
import torch
from monai.config import IndexSelection, KeysCollection
from monai.networks.layers import GaussianFilter
from monai.transforms import Resize, SpatialCrop
from monai.transforms.transform import MapTransform, Randomizable, Transform
from monai.transforms.utils import generate_spatial_bounding_box
from monai.utils import InterpolateMode, ensure_tuple_rep, min_version, optional_import
measure, _ = optional_import("skimage.measure", "0.14.2", min_version)
distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
# Transforms to support Training for Deepgrow models
class FindAllValidSlicesd(Transform):
"""
Find/List all valid slices in the label.
Label is assumed to be a 4D Volume with shape CDHW, where C=1.
Args:
label: key to the label source.
sids: key to store slices indices having valid label map.
"""
def __init__(self, label: str = "label", sids: str = "sids"):
self.label = label
self.sids = sids
def _apply(self, label):
sids = []
for sid in range(label.shape[1]): # Assume channel is first
if np.sum(label[0][sid]) != 0:
sids.append(sid)
return np.asarray(sids)
def __call__(self, data):
d: Dict = dict(data)
label = d[self.label]
if label.shape[0] != 1:
raise ValueError("Only supports single channel labels!")
if len(label.shape) != 4: # only for 3D
raise ValueError("Only supports label with shape CDHW!")
sids = self._apply(label)
if sids is not None and len(sids):
d[self.sids] = sids
return d
class AddInitialSeedPointd(Randomizable):
"""
Add random guidance as initial seed point for a given label.
Note that the label is of size (C, D, H, W) or (C, H, W)
The guidance is of size (2, N, # of dims) where N is number of guidance added.
# of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W)
Args:
label: label source.
guidance: key to store guidance.
sids: key that represents list of valid slice indices for the given label.
sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen.
connected_regions: maximum connected regions to use for adding initial points.
"""
def __init__(
self,
label: str = "label",
guidance: str = "guidance",
sids: str = "sids",
sid: str = "sid",
connected_regions: int = 5,
):
self.label = label
self.sids_key = sids
self.sid_key = sid
self.sid = None
self.guidance = guidance
self.connected_regions = connected_regions
def randomize(self, data):
sid = data.get(self.sid_key, None)
sids = data.get(self.sids_key, None)
if sids is not None:
if sid is None or sid not in sids:
sid = self.R.choice(sids, replace=False)
else:
sid = None
self.sid = sid
def _apply(self, label, sid):
dimensions = 3 if len(label.shape) > 3 else 2
default_guidance = [-1] * (dimensions + 1)
dims = dimensions
if sid is not None and dimensions == 3:
dims = 2
label = label[0][sid][np.newaxis] # Assume channel is first
label = (label > 0.5).astype(np.float32)
blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label
if np.max(blobs_labels) <= 0:
raise AssertionError("Not a valid Label")
pos_guidance = []
for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1):
if dims == 2:
label = (blobs_labels == ridx).astype(np.float32)
if np.sum(label) == 0:
pos_guidance.append(default_guidance)
continue
distance = distance_transform_cdt(label).flatten()
probability = np.exp(distance) - 1.0
idx = np.where(label.flatten() > 0)[0]
seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
dst = distance[seed]
g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0]
g[0] = dst[0] # for debug
if dimensions == 2 or dims == 3:
pos_guidance.append(g)
else:
pos_guidance.append([g[0], sid, g[-2], g[-1]])
return np.asarray([pos_guidance, [default_guidance] * len(pos_guidance)])
def __call__(self, data):
d = dict(data)
self.randomize(data)
d[self.guidance] = self._apply(d[self.label], self.sid)
return d
class AddGuidanceSignald(Transform):
"""
Add Guidance signal for input image.
Based on the "guidance" points, apply gaussian to them and add them as new channel for input image.
Args:
image: key to the image source.
guidance: key to store guidance.
sigma: standard deviation for Gaussian kernel.
number_intensity_ch: channel index.
batched: whether input is batched or not.
"""
def __init__(
self,
image: str = "image",
guidance: str = "guidance",
sigma: int = 2,
number_intensity_ch: int = 1,
batched: bool = False,
):
self.image = image
self.guidance = guidance
self.sigma = sigma
self.number_intensity_ch = number_intensity_ch
self.batched = batched
def _get_signal(self, image, guidance):
dimensions = 3 if len(image.shape) > 3 else 2
guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance
if dimensions == 3:
signal = np.zeros((len(guidance), image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32)
else:
signal = np.zeros((len(guidance), image.shape[-2], image.shape[-1]), dtype=np.float32)
sshape = signal.shape
for i in range(len(guidance)):
for point in guidance[i]:
if np.any(np.asarray(point) < 0):
continue
if dimensions == 3:
p1 = max(0, min(int(point[-3]), sshape[-3] - 1))
p2 = max(0, min(int(point[-2]), sshape[-2] - 1))
p3 = max(0, min(int(point[-1]), sshape[-1] - 1))
signal[i, p1, p2, p3] = 1.0
else:
p1 = max(0, min(int(point[-2]), sshape[-2] - 1))
p2 = max(0, min(int(point[-1]), sshape[-1] - 1))
signal[i, p1, p2] = 1.0
if np.max(signal[i]) > 0:
signal_tensor = torch.tensor(signal[i])
pt_gaussian = GaussianFilter(len(signal_tensor.shape), sigma=self.sigma)
signal_tensor = pt_gaussian(signal_tensor.unsqueeze(0).unsqueeze(0))
signal_tensor = signal_tensor.squeeze(0).squeeze(0)
signal[i] = signal_tensor.detach().cpu().numpy()
signal[i] = (signal[i] - np.min(signal[i])) / (np.max(signal[i]) - np.min(signal[i]))
return signal
def _apply(self, image, guidance):
if not self.batched:
signal = self._get_signal(image, guidance)
return np.concatenate([image, signal], axis=0)
images = []
for i, g in zip(image, guidance):
i = i[0 : 0 + self.number_intensity_ch, ...]
signal = self._get_signal(i, g)
images.append(np.concatenate([i, signal], axis=0))
return images
def __call__(self, data):
d = dict(data)
image = d[self.image]
guidance = d[self.guidance]
d[self.image] = self._apply(image, guidance)
return d
class FindDiscrepancyRegionsd(Transform):
"""
Find discrepancy between prediction and actual during click interactions during training.
If batched is true:
label is in shape (B, C, D, H, W) or (B, C, H, W)
pred has same shape as label
discrepancy will have shape (B, 2, C, D, H, W) or (B, 2, C, H, W)
Args:
label: key to label source.
pred: key to prediction source.
discrepancy: key to store discrepancies found between label and prediction.
batched: whether input is batched or not.
"""
def __init__(
self, label: str = "label", pred: str = "pred", discrepancy: str = "discrepancy", batched: bool = True
):
self.label = label
self.pred = pred
self.discrepancy = discrepancy
self.batched = batched
@staticmethod
def disparity(label, pred):
label = (label > 0.5).astype(np.float32)
pred = (pred > 0.5).astype(np.float32)
disparity = label - pred
pos_disparity = (disparity > 0).astype(np.float32)
neg_disparity = (disparity < 0).astype(np.float32)
return [pos_disparity, neg_disparity]
def _apply(self, label, pred):
if not self.batched:
return self.disparity(label, pred)
disparity = []
for la, pr in zip(label, pred):
disparity.append(self.disparity(la, pr))
return disparity
def __call__(self, data):
d = dict(data)
label = d[self.label]
pred = d[self.pred]
d[self.discrepancy] = self._apply(label, pred)
return d
class AddRandomGuidanced(Randomizable):
"""
Add random guidance based on discrepancies that were found between label and prediction.
If batched is True, input shape is as below:
Guidance is of shape (B, 2, N, # of dim) where B is batch size, 2 means positive and negative,
N means how many guidance points, # of dim is the total number of dimensions of the image
(for example if the image is CDHW, then # of dim would be 4).
Discrepancy is of shape (B, 2, C, D, H, W) or (B, 2, C, H, W)
Probability is of shape (B, 1)
else:
Guidance is of shape (2, N, # of dim)
Discrepancy is of shape (2, C, D, H, W) or (2, C, H, W)
Probability is of shape (1)
Args:
guidance: key to guidance source.
discrepancy: key that represents discrepancies found between label and prediction.
probability: key that represents click/interaction probability.
batched: whether input is batched or not.
"""
def __init__(
self,
guidance: str = "guidance",
discrepancy: str = "discrepancy",
probability: str = "probability",
batched: bool = True,
):
self.guidance = guidance
self.discrepancy = discrepancy
self.probability = probability
self.batched = batched
self._will_interact = None
def randomize(self, data=None):
probability = data[self.probability]
if not self.batched:
self._will_interact = self.R.choice([True, False], p=[probability, 1.0 - probability])
else:
self._will_interact = []
for p in probability:
self._will_interact.append(self.R.choice([True, False], p=[p, 1.0 - p]))
def find_guidance(self, discrepancy):
distance = distance_transform_cdt(discrepancy).flatten()
probability = np.exp(distance) - 1.0
idx = np.where(discrepancy.flatten() > 0)[0]
if np.sum(discrepancy > 0) > 0:
seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
dst = distance[seed]
g = np.asarray(np.unravel_index(seed, discrepancy.shape)).transpose().tolist()[0]
g[0] = dst[0]
return g
return None
def add_guidance(self, discrepancy, will_interact):
if not will_interact:
return None, None
pos_discr = discrepancy[0]
neg_discr = discrepancy[1]
can_be_positive = np.sum(pos_discr) > 0
can_be_negative = np.sum(neg_discr) > 0
correct_pos = np.sum(pos_discr) >= np.sum(neg_discr)
if correct_pos and can_be_positive:
return self.find_guidance(pos_discr), None
if not correct_pos and can_be_negative:
return None, self.find_guidance(neg_discr)
return None, None
def _apply(self, guidance, discrepancy):
guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance
if not self.batched:
pos, neg = self.add_guidance(discrepancy, self._will_interact)
if pos:
guidance[0].append(pos)
guidance[1].append([-1] * len(pos))
if neg:
guidance[0].append([-1] * len(neg))
guidance[1].append(neg)
else:
for g, d, w in zip(guidance, discrepancy, self._will_interact):
pos, neg = self.add_guidance(d, w)
if pos:
g[0].append(pos)
g[1].append([-1] * len(pos))
if neg:
g[0].append([-1] * len(neg))
g[1].append(neg)
return np.asarray(guidance)
def __call__(self, data):
d = dict(data)
guidance = d[self.guidance]
discrepancy = d[self.discrepancy]
self.randomize(data)
d[self.guidance] = self._apply(guidance, discrepancy)
return d
class SpatialCropForegroundd(MapTransform):
"""
Crop only the foreground object of the expected images.
Difference VS :py:class:`monai.transforms.CropForegroundd`:
1. If the bounding box is smaller than spatial size in all dimensions then this transform will crop the
object using box's center and spatial_size.
2. This transform will set "start_coord_key", "end_coord_key", "original_shape_key" and "cropped_shape_key"
in data[{key}_{meta_key_postfix}]
The typical usage is to help training and evaluation if the valid part is small in the whole medical image.
The valid part can be determined by any field in the data with `source_key`, for example:
- Select values > 0 in image field as the foreground and crop on all fields specified by `keys`.
- Select label = 3 in label field as the foreground to crop on all fields specified by `keys`.
- Select label > 0 in the third channel of a One-Hot label field as the foreground to crop all `keys` fields.
Users can define arbitrary function to select expected foreground from the whole source image or specified
channels. And it can also add margin to every dim of the bounding box of foreground object.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.MapTransform`
source_key: data source to generate the bounding box of foreground, can be image or label, etc.
spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in.
select_fn: function to select expected foreground, default is to select values > 0.
channel_indices: if defined, select foreground only on the specified channels
of image. if None, select foreground on the whole image.
margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
meta_key_postfix: use `{key}_{meta_key_postfix}` to to fetch/store the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
start_coord_key: key to record the start coordinate of spatial bounding box for foreground.
end_coord_key: key to record the end coordinate of spatial bounding box for foreground.
original_shape_key: key to record original shape for foreground.
cropped_shape_key: key to record cropped shape for foreground.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
source_key: str,
spatial_size: Union[Sequence[int], np.ndarray],
select_fn: Callable = lambda x: x > 0,
channel_indices: Optional[IndexSelection] = None,
margin: int = 0,
meta_key_postfix="meta_dict",
start_coord_key: str = "foreground_start_coord",
end_coord_key: str = "foreground_end_coord",
original_shape_key: str = "foreground_original_shape",
cropped_shape_key: str = "foreground_cropped_shape",
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.source_key = source_key
self.spatial_size = list(spatial_size)
self.select_fn = select_fn
self.channel_indices = channel_indices
self.margin = margin
self.meta_key_postfix = meta_key_postfix
self.start_coord_key = start_coord_key
self.end_coord_key = end_coord_key
self.original_shape_key = original_shape_key
self.cropped_shape_key = cropped_shape_key
def __call__(self, data):
d = dict(data)
box_start, box_end = generate_spatial_bounding_box(
d[self.source_key], self.select_fn, self.channel_indices, self.margin
)
center = list(np.mean([box_start, box_end], axis=0).astype(int))
current_size = list(np.subtract(box_end, box_start).astype(int))
if np.all(np.less(current_size, self.spatial_size)):
cropper = SpatialCrop(roi_center=center, roi_size=self.spatial_size)
box_start = cropper.roi_start
box_end = cropper.roi_end
else:
cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)
for key in self.key_iterator(d):
meta_key = f"{key}_{self.meta_key_postfix}"
d[meta_key][self.start_coord_key] = box_start
d[meta_key][self.end_coord_key] = box_end
d[meta_key][self.original_shape_key] = d[key].shape
image = cropper(d[key])
d[meta_key][self.cropped_shape_key] = image.shape
d[key] = image
return d
# Transforms to support Inference for Deepgrow models
class AddGuidanceFromPointsd(Transform):
"""
Add guidance based on user clicks.
We assume the input is loaded by LoadImaged and has the shape of (H, W, D) originally.
Clicks always specify the coordinates in (H, W, D)
If depth_first is True:
Input is now of shape (D, H, W), will return guidance that specifies the coordinates in (D, H, W)
else:
Input is now of shape (H, W, D), will return guidance that specifies the coordinates in (H, W, D)
Args:
ref_image: key to reference image to fetch current and original image details.
guidance: output key to store guidance.
foreground: key that represents user foreground (+ve) clicks.
background: key that represents user background (-ve) clicks.
axis: axis that represents slices in 3D volume. (axis to Depth)
depth_first: if depth (slices) is positioned at first dimension.
dimensions: dimensions based on model used for deepgrow (2D vs 3D).
slice_key: key that represents applicable slice to add guidance.
meta_key_postfix: use `{ref_image}_{postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
"""
def __init__(
self,
ref_image,
guidance: str = "guidance",
foreground: str = "foreground",
background: str = "background",
axis: int = 0,
depth_first: bool = True,
dimensions: int = 2,
slice_key: str = "slice",
meta_key_postfix: str = "meta_dict",
):
self.ref_image = ref_image
self.guidance = guidance
self.foreground = foreground
self.background = background
self.axis = axis
self.depth_first = depth_first
self.dimensions = dimensions
self.slice = slice_key
self.meta_key_postfix = meta_key_postfix
def _apply(self, pos_clicks, neg_clicks, factor, slice_num):
pos = neg = []
if self.dimensions == 2:
points = list(pos_clicks)
points.extend(neg_clicks)
points = np.array(points)
slices = list(np.unique(points[:, self.axis]))
slice_idx = slices[0] if slice_num is None else next(x for x in slices if x == slice_num)
if len(pos_clicks):
pos_clicks = np.array(pos_clicks)
pos = (pos_clicks[np.where(pos_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist()
if len(neg_clicks):
neg_clicks = np.array(neg_clicks)
neg = (neg_clicks[np.where(neg_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist()
guidance = [pos, neg, slice_idx]
else:
if len(pos_clicks):
pos = np.multiply(pos_clicks, factor).astype(int).tolist()
if len(neg_clicks):
neg = np.multiply(neg_clicks, factor).astype(int).tolist()
guidance = [pos, neg]
return guidance
def __call__(self, data):
d = dict(data)
meta_dict_key = f"{self.ref_image}_{self.meta_key_postfix}"
if meta_dict_key not in d:
raise RuntimeError(f"Missing meta_dict {meta_dict_key} in data!")
if "spatial_shape" not in d[meta_dict_key]:
raise RuntimeError('Missing "spatial_shape" in meta_dict!')
original_shape = d[meta_dict_key]["spatial_shape"]
current_shape = list(d[self.ref_image].shape)
if self.depth_first:
if self.axis != 0:
raise RuntimeError("Depth first means the depth axis should be 0.")
# in here we assume the depth dimension was in the last dimension of "original_shape"
original_shape = np.roll(original_shape, 1)
factor = np.array(current_shape) / original_shape
fg_bg_clicks = []
for key in [self.foreground, self.background]:
clicks = d[key]
clicks = list(np.array(clicks).astype(int))
if self.depth_first:
for i in range(len(clicks)):
clicks[i] = list(np.roll(clicks[i], 1))
fg_bg_clicks.append(clicks)
d[self.guidance] = self._apply(fg_bg_clicks[0], fg_bg_clicks[1], factor, d.get(self.slice))
return d
class SpatialCropGuidanced(MapTransform):
"""
Crop image based on guidance with minimal spatial size.
- If the bounding box is smaller than spatial size in all dimensions then this transform will crop the
object using box's center and spatial_size.
- This transform will set "start_coord_key", "end_coord_key", "original_shape_key" and "cropped_shape_key"
in data[{key}_{meta_key_postfix}]
Input data is of shape (C, spatial_1, [spatial_2, ...])
Args:
keys: keys of the corresponding items to be transformed.
guidance: key to the guidance. It is used to generate the bounding box of foreground
spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in.
margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
start_coord_key: key to record the start coordinate of spatial bounding box for foreground.
end_coord_key: key to record the end coordinate of spatial bounding box for foreground.
original_shape_key: key to record original shape for foreground.
cropped_shape_key: key to record cropped shape for foreground.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
guidance: str,
spatial_size,
margin=20,
meta_key_postfix="meta_dict",
start_coord_key: str = "foreground_start_coord",
end_coord_key: str = "foreground_end_coord",
original_shape_key: str = "foreground_original_shape",
cropped_shape_key: str = "foreground_cropped_shape",
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.guidance = guidance
self.spatial_size = list(spatial_size)
self.margin = margin
self.meta_key_postfix = meta_key_postfix
self.start_coord_key = start_coord_key
self.end_coord_key = end_coord_key
self.original_shape_key = original_shape_key
self.cropped_shape_key = cropped_shape_key
def bounding_box(self, points, img_shape):
ndim = len(img_shape)
margin = ensure_tuple_rep(self.margin, ndim)
for m in margin:
if m < 0:
raise ValueError("margin value should not be negative number.")
box_start = [0] * ndim
box_end = [0] * ndim
for di in range(ndim):
dt = points[..., di]
min_d = max(min(dt - margin[di]), 0)
max_d = min(img_shape[di], max(dt + margin[di] + 1))
box_start[di], box_end[di] = min_d, max_d
return box_start, box_end
def __call__(self, data):
d: Dict = dict(data)
guidance = d[self.guidance]
original_spatial_shape = d[self.keys[0]].shape[1:]
box_start, box_end = self.bounding_box(np.array(guidance[0] + guidance[1]), original_spatial_shape)
center = list(np.mean([box_start, box_end], axis=0).astype(int))
spatial_size = self.spatial_size
box_size = list(np.subtract(box_end, box_start).astype(int))
spatial_size = spatial_size[-len(box_size) :]
if len(spatial_size) < len(box_size):
# If the data is in 3D and spatial_size is specified as 2D [256,256]
# Then we will get all slices in such case
diff = len(box_size) - len(spatial_size)
spatial_size = list(original_spatial_shape[1 : (1 + diff)]) + spatial_size
if np.all(np.less(box_size, spatial_size)):
if len(center) == 3:
# 3D Deepgrow: set center to be middle of the depth dimension (D)
center[0] = spatial_size[0] // 2
cropper = SpatialCrop(roi_center=center, roi_size=spatial_size)
else:
cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)
# update bounding box in case it was corrected by the SpatialCrop constructor
box_start = np.array([s.start for s in cropper.slices])
box_end = np.array([s.stop for s in cropper.slices])
for key in self.key_iterator(d):
if not np.array_equal(d[key].shape[1:], original_spatial_shape):
raise RuntimeError("All the image specified in keys should have same spatial shape")
meta_key = f"{key}_{self.meta_key_postfix}"
d[meta_key][self.start_coord_key] = box_start
d[meta_key][self.end_coord_key] = box_end
d[meta_key][self.original_shape_key] = d[key].shape
image = cropper(d[key])
d[meta_key][self.cropped_shape_key] = image.shape
d[key] = image
pos_clicks, neg_clicks = guidance[0], guidance[1]
pos = np.subtract(pos_clicks, box_start).tolist() if len(pos_clicks) else []
neg = np.subtract(neg_clicks, box_start).tolist() if len(neg_clicks) else []
d[self.guidance] = [pos, neg]
return d
class ResizeGuidanced(Transform):
"""
Resize the guidance based on cropped vs resized image.
This transform assumes that the images have been cropped and resized. And the shape after cropped is store inside
the meta dict of ref image.
Args:
guidance: key to guidance
ref_image: key to reference image to fetch current and original image details
meta_key_postfix: use `{ref_image}_{postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
cropped_shape_key: key that records cropped shape for foreground.
"""
def __init__(
self,
guidance: str,
ref_image: str,
meta_key_postfix="meta_dict",
cropped_shape_key: str = "foreground_cropped_shape",
) -> None:
self.guidance = guidance
self.ref_image = ref_image
self.meta_key_postfix = meta_key_postfix
self.cropped_shape_key = cropped_shape_key
def __call__(self, data):
d = dict(data)
guidance = d[self.guidance]
meta_dict: Dict = d[f"{self.ref_image}_{self.meta_key_postfix}"]
current_shape = d[self.ref_image].shape[1:]
cropped_shape = meta_dict[self.cropped_shape_key][1:]
factor = np.divide(current_shape, cropped_shape)
pos_clicks, neg_clicks = guidance[0], guidance[1]
pos = np.multiply(pos_clicks, factor).astype(int).tolist() if len(pos_clicks) else []
neg = np.multiply(neg_clicks, factor).astype(int).tolist() if len(neg_clicks) else []
d[self.guidance] = [pos, neg]
return d
class RestoreLabeld(MapTransform):
"""
Restores label based on the ref image.
The ref_image is assumed that it went through the following transforms:
1. Fetch2DSliced (If 2D)
2. Spacingd
3. SpatialCropGuidanced
4. Resized
And its shape is assumed to be (C, D, H, W)
This transform tries to undo these operation so that the result label can be overlapped with original volume.
It does the following operation:
1. Undo Resized
2. Undo SpatialCropGuidanced
3. Undo Spacingd
4. Undo Fetch2DSliced
The resulting label is of shape (D, H, W)
Args:
keys: keys of the corresponding items to be transformed.
ref_image: reference image to fetch current and original image details
slice_only: apply only to an applicable slice, in case of 2D model/prediction
mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``,
``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
One of the listed string values or a user supplied function for padding. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
It also can be a sequence of bool, each element corresponds to a key in ``keys``.
meta_key_postfix: use `{ref_image}_{meta_key_postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
start_coord_key: key that records the start coordinate of spatial bounding box for foreground.
end_coord_key: key that records the end coordinate of spatial bounding box for foreground.
original_shape_key: key that records original shape for foreground.
cropped_shape_key: key that records cropped shape for foreground.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
ref_image: str,
slice_only: bool = False,
mode: Union[Sequence[Union[InterpolateMode, str]], InterpolateMode, str] = InterpolateMode.NEAREST,
align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,
meta_key_postfix: str = "meta_dict",
start_coord_key: str = "foreground_start_coord",
end_coord_key: str = "foreground_end_coord",
original_shape_key: str = "foreground_original_shape",
cropped_shape_key: str = "foreground_cropped_shape",
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.ref_image = ref_image
self.slice_only = slice_only
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.meta_key_postfix = meta_key_postfix
self.start_coord_key = start_coord_key
self.end_coord_key = end_coord_key
self.original_shape_key = original_shape_key
self.cropped_shape_key = cropped_shape_key
def __call__(self, data):
d = dict(data)
meta_dict: Dict = d[f"{self.ref_image}_{self.meta_key_postfix}"]
for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners):
image = d[key]
# Undo Resize
current_shape = image.shape
cropped_shape = meta_dict[self.cropped_shape_key]
if np.any(np.not_equal(current_shape, cropped_shape)):
resizer = Resize(spatial_size=cropped_shape[1:], mode=mode)
image = resizer(image, mode=mode, align_corners=align_corners)
# Undo Crop
original_shape = meta_dict[self.original_shape_key]
result = np.zeros(original_shape, dtype=np.float32)
box_start = meta_dict[self.start_coord_key]
box_end = meta_dict[self.end_coord_key]
spatial_dims = min(len(box_start), len(image.shape[1:]))
slices = [slice(None)] + [slice(s, e) for s, e in zip(box_start[:spatial_dims], box_end[:spatial_dims])]
slices = tuple(slices)
result[slices] = image
# Undo Spacing
current_size = result.shape[1:]
# change spatial_shape from HWD to DHW
spatial_shape = list(np.roll(meta_dict["spatial_shape"], 1))
spatial_size = spatial_shape[-len(current_size) :]
if np.any(np.not_equal(current_size, spatial_size)):
resizer = Resize(spatial_size=spatial_size, mode=mode)
result = resizer(result, mode=mode, align_corners=align_corners)
# Undo Slicing
slice_idx = meta_dict.get("slice_idx")
if slice_idx is None or self.slice_only:
final_result = result if len(result.shape) <= 3 else result[0]
else:
slice_idx = meta_dict["slice_idx"][0]
final_result = np.zeros(tuple(spatial_shape))
final_result[slice_idx] = result
d[key] = final_result
meta = d.get(f"{key}_{self.meta_key_postfix}")
if meta is None:
meta = dict()
d[f"{key}_{self.meta_key_postfix}"] = meta
meta["slice_idx"] = slice_idx
meta["affine"] = meta_dict["original_affine"]
return d
class Fetch2DSliced(MapTransform):
"""
Fetch one slice in case of a 3D volume.
The volume only contains spatial coordinates.
Args:
keys: keys of the corresponding items to be transformed.
guidance: key that represents guidance.
axis: axis that represents slice in 3D volume.
meta_key_postfix: use `key_{meta_key_postfix}` to to fetch the meta data according to the key data,
default is `meta_dict`, the meta data is a dictionary object.
For example, to handle key `image`, read/write affine matrices from the
metadata `image_meta_dict` dictionary's `affine` field.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys,
guidance="guidance",
axis: int = 0,
meta_key_postfix: str = "meta_dict",
allow_missing_keys: bool = False,
):
super().__init__(keys, allow_missing_keys)
self.guidance = guidance
self.axis = axis
self.meta_key_postfix = meta_key_postfix
def _apply(self, image, guidance):
slice_idx = guidance[2] # (pos, neg, slice_idx)
idx = []
for i in range(len(image.shape)):
idx.append(slice_idx) if i == self.axis else idx.append(slice(0, image.shape[i]))
idx = tuple(idx)
return image[idx], idx
def __call__(self, data):
d = dict(data)
guidance = d[self.guidance]
if len(guidance) < 3:
raise RuntimeError("Guidance does not container slice_idx!")
for key in self.key_iterator(d):
img_slice, idx = self._apply(d[key], guidance)
d[key] = img_slice
d[f"{key}_{self.meta_key_postfix}"]["slice_idx"] = idx
return d
|
py
|
1a56365bca861672b576f9682f40e6cc7b2623f0
|
"""
*Judgment* _turnstile_
[TODO] fs placement
"""
|
py
|
1a56373f61e7fbe19292c217ba68ef36f23a1ba3
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/3/13 10:58
# @Author : Money
# @Site :
# @File : middlewareloginrequired.py
# @Software: PyCharm
from functools import wraps
from django.conf import settings
from django.shortcuts import HttpResponseRedirect
from django.urls import RegexURLPattern # django2.0以上替换为:from django.urls import URLPattern
from . import urls
class MiddlewareLoginRequired(object):
"""
需要用户登录之后才能访问页面的中间件,
使用session判断用户是否登录
"""
_NO_NEED_LOGIN = [] #用来存放不需要做登录认证的view
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request, *args, **kwargs):
response = self.get_response(request)
user_hash = request.session.get('_auth_user_hash','')
if not user_hash:
url = request.path
if url in self.exclude_url_path():
return response
else:
return HttpResponseRedirect(settings.LOGIN_URL + '?next=' + url)
return response
@staticmethod
def no_need_login(func):
view_func = func.__module__ + '.' + func.__name__
MiddlewareLoginRequired._NO_NEED_LOGIN.append(view_func)
def get_all_urls(self, patterns, pre_fix, result):
"""
获取所有的view函数和url的映射关系,
:param patterns: urlpatterns
:param pre_fix:
:param result: 字典,{view函数:url}
:return:
"""
for item in patterns:
part = item.regex.pattern.strip("^$") #django2.0以上替换为:part = item.pattern.regex.pattern.strip("^$")
if isinstance(item, RegexURLPattern): #django2.0以上替换为:if isinstance(item, URLPattern):
# django2.0以上替换为:url_path = item.pattern.regex.pattern.strip("^$").replace('\\', "")
url_path = item.regex.pattern.strip("^$").replace('\\', "")
view_func = item.callback.__module__ + '.' + item.callback.__name__
if view_func.startswith(('django',)):
continue
result.setdefault(view_func, pre_fix + url_path)
else:
self.get_all_urls(item.url_patterns, pre_fix + part, result=result)
return result
def exclude_url_path(self):
view_url_dicts = self.get_all_urls(urls.urlpatterns, pre_fix="/", result={})
url_paths = list([view_url_dicts[view] for view in self._NO_NEED_LOGIN
if view in view_url_dicts])
return url_paths
def login_excepted(func=None):
"""
类似login_required,
使用这个函数装饰的view不需要登录就可以访问,
使用方法:@login_excepted 或者
@login_excepted()
:param func:
:return:
"""
def _wrapped(func):
MiddlewareLoginRequired.no_need_login(func)
@wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
if not func:
return _wrapped
return _wrapped(func)
|
py
|
1a5637bbda24c54ccc832afe9faace5536abdbe6
|
import re
#This function writes to terms.txt file
def prepTerms(inFile):
termsFile = open("terms.txt", "w+") #Creates terms.txt with w+ (writing and reading) rights
with open(inFile, 'r') as file: #Opens inFile (xml file passed as argument)
for line in file: #for loop for each line
if line.startswith("<mail>"): #Only take lines starting with <mail>
if line.split("<subj>")[1].split("</subj>")[0] != "": #checks if <subj> content is non-empty
for term in re.split("[^A-Za-z0-9\-_]+", re.sub("&.*?;", " ", line.split("<subj>")[1].split("</subj>")[0])): #splits by all chars except [A-Za-z0-9_-], substitutes all instances of &xxx; with space char, splits by <subj> and </subj> to get contents
if len(term) > 2: #only write to file if the term length is greater than 2
termsFile.write("s-%s:%s\n" %(term.lower(), line.split("<row>")[1].split("</row>")[0])) #write the term and row id
if line.split("<body>")[1].split("</body>") != "": #checks if <body> content is non-empty
for term in re.split("[^A-Za-z0-9\-_]+", re.sub("&.*?;", " ", line.split("<body>")[1].split("</body>")[0])): #splits the same as above for <subj>
if len(term) > 2: #only write term length > 2
termsFile.write("b-%s:%s\n" %(term.lower(), line.split("<row>")[1].split("</row>")[0])) #write the term and row id
#This functions write to emails.txt file
def prepEmails(inFile):
emailsFile = open("emails.txt", "w+") #same as above but for emails.txt
with open(inFile, 'r') as file: #same as above
for line in file: #same as above
if line.startswith("<mail>"): #same as above
emailsFile.write("from-%s:%s\n" %(line.split("<from>")[1].split("</from>")[0],line.split("<row>")[1].split("</row>")[0])) #write <from> contents into file. No condition since will always have from email
if line.split("<to>")[1].split("</to>")[0] != "": #checks if <to> content is non-empty
for email in line.split("<to>")[1].split("</to>")[0].split(","): #for loop to print all emails in <to> split by ','
emailsFile.write("to-%s:%s\n" %(email,line.split("<row>")[1].split("</row>")[0])) #writes <to> contents and row id to file
if line.split("<cc>")[1].split("</cc>")[0] != "": #checks if <cc> content is non-empty
for email in line.split("<cc>")[1].split("</cc>")[0].split(","): #for loop to print all emails in <cc> split by ','
emailsFile.write("cc-%s:%s\n" %(email,line.split("<row>")[1].split("</row>")[0])) #writes <cc> contents and row id to file
if line.split("<bcc>")[1].split("</bcc>")[0] != "": #checks if <bcc> content is non-empty
for email in line.split("<bcc>")[1].split("</bcc>")[0].split(","): #for loop to print all emails in <bcc> split by ','
emailsFile.write("bcc-%s:%s\n" %(email,line.split("<row>")[1].split("</row>")[0])) #writes <bcc> contents and row id to file
def prepDates(inFile):
datesFile = open("dates.txt", "w+") #same as above but for dates.txt
with open(inFile, 'r') as file: #same as above
for line in file: #same as above
if line.startswith("<mail>"): #same as above
datesFile.write("%s:%s\n" %(line.split("<date>")[1].split("</date>")[0],line.split("<row>")[1].split("</row>")[0])) #writes <date> content and row id
def prepRecs(inFile):
recsFile = open("recs.txt", "w+") #same as above but for recs.txt
with open(inFile, 'r') as file: #same as above
for line in file: #same as above
if line.startswith("<mail>"): #same as above
recsFile.write("%s:%s" %(line.split("<row>")[1].split("</row>")[0], line)) #writes row id and full line
|
py
|
1a563955ed28ea64170b1ae7bb4fd40efb752c27
|
# -*- coding: utf-8 -*-
# File: graph.py
""" Graph related callbacks"""
import tensorflow as tf
import os
import numpy as np
from six.moves import zip
from ..utils import logger
from .base import Callback
from ..tfutils.common import get_op_tensor_name
__all__ = ['RunOp', 'RunUpdateOps', 'ProcessTensors', 'DumpTensors',
'DumpTensor', 'DumpTensorAsImage', 'DumpParamAsImage']
class RunOp(Callback):
""" Run an Op. """
_chief_only = False
def __init__(self, op,
run_before=True, run_as_trigger=True,
run_step=False, verbose=False):
"""
Args:
op (tf.Operation or function): an Op, or a function that returns the Op in the graph.
The function will be called after the main graph has been created (in the `setup_graph` callback).
run_before (bool): run the Op before training
run_as_trigger (bool): run the Op on every :meth:`trigger()` call.
run_step (bool): run the Op every step (along with training)
verbose (bool): print logs when the op is run.
Example:
The `DQN Example
<https://github.com/tensorpack/tensorpack/blob/master/examples/DeepQNetwork/>`_
uses this callback to update target network.
"""
if not callable(op):
self.setup_func = lambda: op # noqa
else:
self.setup_func = op
self.run_before = run_before
self.run_as_trigger = run_as_trigger
self.run_step = run_step
self.verbose = verbose
def _setup_graph(self):
self._op = self.setup_func()
if self.run_step:
self._fetch = tf.train.SessionRunArgs(fetches=self._op)
def _before_train(self):
if self.run_before:
self._print()
self._op.run()
def _trigger(self):
if self.run_as_trigger:
self._print()
self._op.run()
def _before_run(self, _):
if self.run_step:
self._print()
return self._fetch
def _print(self):
if self.verbose:
logger.info("Running Op {} ...".format(self._op.name))
class RunUpdateOps(RunOp):
"""
Run ops from the collection UPDATE_OPS every step.
The ops will be hooked to `trainer.hooked_sess` and run along with
each `sess.run` call.
"""
def __init__(self, collection=tf.GraphKeys.UPDATE_OPS):
"""
Args:
collection (str): collection of ops to run. Defaults to ``tf.GraphKeys.UPDATE_OPS``
"""
name = 'UPDATE_OPS' if collection == tf.GraphKeys.UPDATE_OPS else collection
def f():
ops = tf.get_collection(collection)
if ops:
logger.info("Applying collection {} of {} ops.".format(name, len(ops)))
return tf.group(*ops, name='update_ops')
else:
return tf.no_op(name='empty_update_ops')
super(RunUpdateOps, self).__init__(
f, run_before=False, run_as_trigger=False, run_step=True)
class ProcessTensors(Callback):
"""
Fetch extra tensors **along with** each training step,
and call some function over the values.
It uses `_{before,after}_run` method to inject `tf.train.SessionRunHooks`
to the session.
You can use it to print tensors, save tensors to file, etc.
Example:
.. code-block:: python
ProcessTensors(['mycost1', 'mycost2'], lambda c1, c2: print(c1, c2, c1 + c2))
"""
def __init__(self, names, fn):
"""
Args:
names (list[str]): names of tensors
fn: a function taking all requested tensors as input
"""
assert isinstance(names, (list, tuple)), names
self._names = names
self._fn = fn
def _setup_graph(self):
tensors = self.get_tensors_maybe_in_tower(self._names)
self._fetch = tf.train.SessionRunArgs(fetches=tensors)
def _before_run(self, _):
return self._fetch
def _after_run(self, _, rv):
results = rv.results
self._fn(*results)
class DumpTensors(ProcessTensors):
"""
Dump some tensors to a file.
Every step this callback fetches tensors and write them to a npz file
under ``logger.get_logger_dir``.
The dump can be loaded by ``dict(np.load(filename).items())``.
"""
def __init__(self, names):
"""
Args:
names (list[str]): names of tensors
"""
assert isinstance(names, (list, tuple)), names
self._names = names
dir = logger.get_logger_dir()
def fn(*args):
dic = {}
for name, val in zip(self._names, args):
dic[name] = val
fname = os.path.join(
dir, 'DumpTensor-{}.npz'.format(self.global_step))
np.savez(fname, **dic)
super(DumpTensors, self).__init__(names, fn)
class DumpTensorAsImage(Callback):
"""
Dump a tensor to image(s) to ``logger.get_logger_dir()`` once triggered.
Note that it requires the tensor is directly evaluable, i.e. either inputs
are not its dependency (e.g. the weights of the model), or the inputs are
feedfree (in which case this callback will take an extra datapoint from the input pipeline).
"""
def __init__(self, tensor_name, prefix=None, map_func=None, scale=255):
"""
Args:
tensor_name (str): the name of the tensor.
prefix (str): the filename prefix for saved images. Defaults to the Op name.
map_func: map the value of the tensor to an image or list of
images of shape [h, w] or [h, w, c]. If None, will use identity.
scale (float): a multiplier on pixel values, applied after map_func.
"""
op_name, self.tensor_name = get_op_tensor_name(tensor_name)
self.func = map_func
if prefix is None:
self.prefix = op_name
else:
self.prefix = prefix
self.log_dir = logger.get_logger_dir()
self.scale = scale
def _before_train(self):
self._tensor = self.graph.get_tensor_by_name(self.tensor_name)
def _trigger(self):
val = self.trainer.sess.run(self._tensor)
if self.func is not None:
val = self.func(val)
if isinstance(val, list) or val.ndim == 4:
for idx, im in enumerate(val):
self._dump_image(im, idx)
else:
self._dump_image(val)
self.trainer.monitors.put_image(self.prefix, val)
def _dump_image(self, im, idx=None):
assert im.ndim in [2, 3], str(im.ndim)
fname = os.path.join(
self.log_dir,
self.prefix + '-ep{:03d}{}.png'.format(
self.epoch_num, '-' + str(idx) if idx else ''))
res = im * self.scale
res = np.clip(res, 0, 255)
cv2.imwrite(fname, res.astype('uint8'))
try:
import cv2
except ImportError:
from ..utils.develop import create_dummy_class
DumpTensorAsImage = create_dummy_class('DumpTensorAsImage', 'cv2') # noqa
# alias
DumpParamAsImage = DumpTensorAsImage
DumpTensor = DumpTensors
|
py
|
1a563a2f3b3b665c06bff23009f5a044eb797b8f
|
from office365.entity import Entity
class PlannerPlan(Entity):
"""The plannerPlan resource represents a plan in Microsoft 365. A plan can be owned by a group
and contains a collection of plannerTasks. It can also have a collection of plannerBuckets.
Each plan object has a details object that can contain more information about the plan.
For more information about the relationships between groups, plans, and tasks, see Planner.
"""
pass
|
py
|
1a563b9ef8cb5d6156fa5040fb08822a3e0a2a22
|
WRONG_CONFIG_CHANNELS_SHOULD_BE_LIST_TUPLE = (
'Wrong configuration, channels is a list or tuple of channel names.')
WRONG_CONFIG_CHANNEL_NAME_SHOULD_BE_STR = 'Wrong configuration, channel names should be a string.'
|
py
|
1a563bb988b35542716f7a82b2028e5461edd39c
|
# -*- coding: utf-8 -*-
import json
import shutil
import logging
from pathlib import Path
from tempfile import TemporaryDirectory
import numpy as np
import rasterio
import rasterio.mask
from retrying import retry
try:
import gdal
except ModuleNotFoundError as e:
try:
from osgeo import gdal
except ModuleNotFoundError:
raise e
from ost.helpers import vector as vec
from ost.helpers import helpers as h
logger = logging.getLogger(__name__)
def create_timeseries_mosaic_vrt(list_of_args):
ts_dir, product, outfiles = list_of_args
gdal.BuildVRT(
str(ts_dir.joinpath(f'{product}.Timeseries.vrt')),
[str(outfile) for outfile in outfiles],
options=gdal.BuildVRTOptions(srcNodata=0, separate=True)
)
@retry(stop_max_attempt_number=3, wait_fixed=1)
def mosaic(filelist, outfile, config_file, cut_to_aoi=None, harm=None):
if outfile.parent.joinpath(f'.{outfile.name[:-4]}.processed').exists():
logger.info(f'{outfile} already exists.')
return
logger.info(f'Mosaicking file {outfile}.')
with open(config_file, 'r') as ard_file:
config_dict = json.load(ard_file)
temp_dir = config_dict['temp_dir']
aoi = config_dict['aoi']
epsg = config_dict['processing']['single_ARD']['dem']['out_projection']
if not harm:
harm = config_dict['processing']['mosaic']['harmonization']
if not cut_to_aoi:
cut_to_aoi = config_dict['processing']['mosaic']['cut_to_aoi']
logfile = outfile.parent.joinpath(f'{str(outfile)[:-4]}.errLog')
with TemporaryDirectory(prefix=f'{temp_dir}/') as temp:
temp = Path(temp)
# get datatype from first image in our mosaic filelist
with rasterio.open(filelist.split(' ')[0]) as src:
dtype = src.meta['dtype']
dtype = 'float' if dtype == 'float32' else dtype
if cut_to_aoi:
tempfile = temp.joinpath(outfile.name)
else:
tempfile = outfile
harm = 'band' if harm else 'none'
cmd = (
f"otbcli_Mosaic -ram 8192 -progress 1 "
f"-comp.feather large "
f"-harmo.method {harm} "
f"-harmo.cost rmse "
f"-tmpdir {str(temp)} "
f"-interpolator bco"
f" -il {filelist} "
f" -out {str(tempfile)} {dtype}"
)
return_code = h.run_command(cmd, logfile)
if return_code != 0:
if tempfile.exists():
tempfile.unlink()
return
if cut_to_aoi:
# get aoi in a way rasterio wants it
aoi_gdf = vec.wkt_to_gdf(aoi)
features = vec.gdf_to_json_geometry(aoi_gdf.to_crs(epsg=epsg))
# import raster and mask
with rasterio.open(tempfile) as src:
out_image, out_transform = rasterio.mask.mask(src, features,
crop=True)
out_meta = src.meta.copy()
ndv = src.nodata
out_image = np.ma.masked_where(out_image == ndv, out_image)
out_meta.update({
'driver': 'GTiff',
'height': out_image.shape[1],
'width': out_image.shape[2],
'transform': out_transform,
'tiled': True,
'blockxsize': 128,
'blockysize': 128
})
with rasterio.open(outfile, 'w', **out_meta) as dest:
dest.write(out_image.data)
# remove intermediate file
tempfile.unlink()
# check
return_code = h.check_out_tiff(outfile)
if return_code != 0:
if outfile.exists():
outfile.unlink()
else:
check_file = outfile.parent.joinpath(
f'.{outfile.name[:-4]}.processed'
)
with open(str(check_file), 'w') as file:
file.write('passed all tests \n')
def gd_mosaic(list_of_args):
filelist, outfile, config_file = list_of_args
mosaic(filelist, outfile, config_file)
def _burst_list(track, date, product, subswath, config_dict):
from shapely.wkt import loads
import geopandas as gpd
aoi = loads(config_dict['aoi'])
processing_dir = Path(config_dict['processing_dir'])
# adjust search pattern in case of coherence
search_last = f'*.{product}.tif' if 'coh' in product else f'{product}.tif'
# search for all bursts within subswath(s) in time-series
list_of_files = list(processing_dir.glob(
f'[A,D]{track}_{subswath}*/Timeseries/'
f'*.{date}.{search_last}'
))
# search for timescans (in case timeseries not found)
if not list_of_files:
list_of_files = list(processing_dir.glob(
f'[A,D]{track}_{subswath}*/Timescan/'
f'*.{product}.{date}.tif'
))
if not list_of_files:
return None
# get a list of all extent files to check for real AOI overlap
if config_dict['processing']['time-series_ARD']['apply_ls_mask']:
list_of_extents = processing_dir.glob(
f'*{track}_{subswath}*/*{track}*.valid.json'
)
else:
list_of_extents = processing_dir.glob(
f'*{track}_{subswath}*/*{track}*.min_bounds.json'
)
list_of_actual_extents = []
for burst_extent in list_of_extents:
burst = gpd.read_file(burst_extent)
if any(burst.intersects(aoi)):
burst_name = Path(str(burst_extent).split('.')[-3]).name
list_of_actual_extents.append(burst_name)
# filter the bursts for real AOI overlap
list_of_files = [
file for file in list_of_files for pattern in list_of_actual_extents
if pattern in str(file)
]
# and join them into a otb readable list
list_of_files = ' '.join([str(file) for file in list_of_files])
return list_of_files
def mosaic_slc_acquisition(track, date, product, outfile, config_file):
# -------------------------------------
# 1 load project config
with open(config_file, 'r') as ard_file:
config_dict = json.load(ard_file)
temp_dir = Path(config_dict['temp_dir'])
# create a list of bursts that actually overlap theAOI
list_of_iw12 = _burst_list(track, date, product, 'IW[1,2]', config_dict)
list_of_iw3 = _burst_list(track, date, product, 'IW3', config_dict)
if list_of_iw12:
logger.info(
f'Pre-mosaicking {product} acquisition\'s IW1 and IW2 subswaths '
f'from {track} taken at {date}.'
)
temp_iw12 = temp_dir.joinpath(f'{date}_{track}_{product}_IW1_2.tif')
mosaic(list_of_iw12, temp_iw12, config_file, harm=False)
if list_of_iw3:
logger.info(
f'Pre-mosaicking {product} acquisition\'s IW3 subswath '
f'from {track} taken at {date}.'
)
temp_iw3 = temp_dir.joinpath(f'{date}_{track}_{product}_IW3.tif')
mosaic(list_of_iw3, temp_iw3, config_file, harm=False)
if list_of_iw12 and list_of_iw3:
mosaic(
' '.join([str(temp_iw12), str(temp_iw3)]), outfile, config_file,
False, harm=True
)
temp_iw12.unlink()
temp_iw3.unlink()
elif list_of_iw12 and not list_of_iw3:
shutil.move(temp_iw12, outfile)
elif not list_of_iw12 and list_of_iw3:
shutil.move(temp_iw3, outfile)
else:
return
def gd_mosaic_slc_acquisition(list_of_args):
track, date, product, outfile, config_file = list_of_args
mosaic_slc_acquisition(track, date, product, outfile, config_file)
|
py
|
1a563bfeffefefb5ed8852512a6e6df784e7dd6b
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.datastream import TimeCharacteristic
from pyflink.table import expressions as expr
from pyflink.table.types import DataTypes
from pyflink.table.udf import udaf, udf, AggregateFunction
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \
PyFlinkBlinkStreamTableTestCase
class BatchPandasUDAFITTests(PyFlinkBlinkBatchTableTestCase):
def test_group_aggregate_function(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
# general udf
add = udf(lambda a: a + 1, result_type=DataTypes.INT())
# pandas udf
substract = udf(lambda a: a - 1, result_type=DataTypes.INT(), func_type="pandas")
max_udaf = udaf(lambda a: a.max(), result_type=DataTypes.INT(), func_type="pandas")
t.group_by("a") \
.select(t.a, mean_udaf(add(t.b)), max_udaf(substract(t.c))) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,6.0,5", "2,3.0,3", "3,3.0,2"])
def test_group_aggregate_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a'],
[DataTypes.INT()])
min_add = udaf(lambda a, b, c: a.min() + b.min() + c.min(),
result_type=DataTypes.INT(), func_type="pandas")
self.t_env.register_table_sink("Results", table_sink)
t.select(min_add(t.a, t.b, t.c)) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["5"])
def test_group_aggregate_with_aux_group(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.TINYINT(), DataTypes.INT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'true')
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
t.group_by("a") \
.select("a, a + 1 as b, a + 2 as c") \
.group_by("a, b") \
.select("a, b, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2,2.0,6", "2,3,3.0,8", "3,4,4.0,10"])
def test_tumble_group_window_aggregate_function(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()
])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(tumble_window) \
.group_by("w") \
.select("w.start, w.end, mean_udaf(b)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.2",
"2018-03-11 04:00:00.0,2018-03-11 05:00:00.0,8.0"])
def test_slide_group_window_aggregate_function(self):
import datetime
from pyflink.table.window import Slide
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT(),
DataTypes.INT()
])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
slide_window = Slide.over(expr.lit(1).hours) \
.every(expr.lit(30).minutes) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(slide_window) \
.group_by("a, w") \
.select("a, w.start, w.end, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["1,2018-03-11 02:30:00.0,2018-03-11 03:30:00.0,2.0,6",
"1,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.5,7",
"1,2018-03-11 03:30:00.0,2018-03-11 04:30:00.0,5.5,14",
"1,2018-03-11 04:00:00.0,2018-03-11 05:00:00.0,8.0,14",
"2,2018-03-11 02:30:00.0,2018-03-11 03:30:00.0,1.0,4",
"2,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.0,10",
"2,2018-03-11 03:30:00.0,2018-03-11 04:30:00.0,3.0,10",
"3,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.0,7",
"3,2018-03-11 02:30:00.0,2018-03-11 03:30:00.0,2.0,7"])
def test_over_window_aggregate_function(self):
import datetime
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT(), DataTypes.FLOAT(),
DataTypes.FLOAT(), DataTypes.FLOAT(), DataTypes.FLOAT(), DataTypes.FLOAT(),
DataTypes.FLOAT(), DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.register_table("T", t)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND UNBOUNDED FOLLOWING),
max_add(b, c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND 0 FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from T
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["1,4.3333335,5,4.3333335,3.0,3.0,2.5,4.3333335,3.0,2.0",
"1,4.3333335,13,5.5,3.0,3.0,4.3333335,8.0,5.0,5.0",
"1,4.3333335,6,4.3333335,2.0,3.0,2.5,4.3333335,3.0,2.0",
"2,2.0,9,2.0,4.0,4.0,2.0,2.0,4.0,4.0",
"2,2.0,3,2.0,2.0,4.0,1.0,2.0,4.0,2.0",
"3,2.0,3,2.0,1.0,1.0,2.0,2.0,1.0,1.0"])
class StreamPandasUDAFITTests(PyFlinkBlinkStreamTableTestCase):
def test_sliding_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Slide.over("1.hours").every("30.minutes").on("rowtime").alias("w")) \
.group_by("a, b, w") \
.select("a, w.start, w.end, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["1,2018-03-11 02:30:00.0,2018-03-11 03:30:00.0,2.0",
"1,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.5",
"1,2018-03-11 03:30:00.0,2018-03-11 04:30:00.0,5.5",
"1,2018-03-11 04:00:00.0,2018-03-11 05:00:00.0,8.0",
"2,2018-03-11 02:30:00.0,2018-03-11 03:30:00.0,1.0",
"2,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.0",
"2,2018-03-11 03:30:00.0,2018-03-11 04:30:00.0,3.0",
"3,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.0",
"3,2018-03-11 02:30:00.0,2018-03-11 03:30:00.0,2.0"])
os.remove(source_path)
def test_sliding_group_window_over_count(self):
self.env.set_parallelism(1)
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
from pyflink.datastream import TimeCharacteristic
self.env.set_stream_time_characteristic(TimeCharacteristic.ProcessingTime)
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'd'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Slide.over("2.rows").every("1.rows").on("protime").alias("w")) \
.group_by("a, b, w") \
.select("a, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2.5", "1,5.5", "2,2.0", "3,2.5"])
os.remove(source_path)
def test_tumbling_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_tumbling_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Tumble.over("1.hours").on("rowtime").alias("w")) \
.group_by("a, b, w") \
.select("a, w.start, w.end, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["1,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.5",
"1,2018-03-11 04:00:00.0,2018-03-11 05:00:00.0,8.0",
"2,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.0",
"3,2018-03-11 03:00:00.0,2018-03-11 04:00:00.0,2.0"])
os.remove(source_path)
def test_tumbling_group_window_over_count(self):
self.env.set_parallelism(1)
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00',
'1,1,4,2018-03-11 04:20:00',
]
source_path = tmp_dir + '/test_group_window_aggregate_function_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
from pyflink.datastream import TimeCharacteristic
self.env.set_stream_time_characteristic(TimeCharacteristic.ProcessingTime)
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'd'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Tumble.over("2.rows").on("protime").alias("w")) \
.group_by("a, b, w") \
.select("a, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2.5", "1,6.0", "2,2.0", "3,2.5"])
os.remove(source_path)
@udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
def mean_udaf(v):
return v.mean()
class MaxAdd(AggregateFunction, unittest.TestCase):
def open(self, function_context):
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def get_value(self, accumulator):
# counter
self.counter.inc(10)
self.counter_sum += 10
self.assertEqual(self.counter_sum, self.counter.get_count())
return accumulator[0]
def create_accumulator(self):
return []
def accumulate(self, accumulator, *args):
result = 0
for arg in args:
result += arg.max()
accumulator.append(result)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
py
|
1a563cae44ed2fa7da18477c08f8f8e735145582
|
s = input('Write something: ')
#s = 'azcbobobegghakl'
count = 0
for position in range(0, len(s)-2):
word = s[position] + s[position+1] + s[position+2]
if word == 'bob':
count += 1
print('Number of times bob occurs is: {}'.format(count))
|
py
|
1a563d9084f2f552af23894fdcf2ef5607fd88a9
|
from discord import Game
from aiohttp import ClientSession
from discord.ext import commands, tasks
# -----------------------------------------------------
#-------------------- START CONFIG --------------------
# -----------------------------------------------------
discordBotToken = "" #type: str
battleMetricsServerID = None #type: int
# -----------------------------------------------------
#--------------------- END CONFIG ---------------------
# -----------------------------------------------------
client = commands.Bot(command_prefix="-",help_command=None)
@client.event
async def on_command_error(ctx, error):
if isinstance(error, commands.errors.CommandNotFound):
pass
@client.event
async def on_ready():
print(f"Bot successfully started\n")
@tasks.loop(seconds=60)
async def change_status():
await client.wait_until_ready()
serverData = await makeWebRequest(f"https://api.battlemetrics.com/servers/{battleMetricsServerID}")
if serverData == None:
return
serverPlayers = serverData['data']['attributes']['players']
serverMaxPlayers = serverData['data']['attributes']['maxPlayers']
serverQueue = serverData['data']['attributes']['details']['rust_queued_players']
if serverQueue > 0:
await client.change_presence(activity=Game(f"{serverPlayers}/{serverMaxPlayers} Queue {serverQueue}"))
else:
await client.change_presence(activity=Game(f"{serverPlayers}/{serverMaxPlayers}"))
async def makeWebRequest(URL):
async with ClientSession() as session:
async with session.get(URL) as preJSData:
if preJSData.status == 200:
return await preJSData.json()
else:
print(f"BattleMetrics Error [Code {preJSData.status}]")
change_status.start()
client.run(discordBotToken)
|
py
|
1a563dcc0e0ae0f447a82bc4c2749ec9a774ae48
|
def main(request, response):
_URL = request.url
_CSP = "default-src " + \
_URL[:_URL.index(
'/csp') + 1] + " self; script-src * 'unsafe-inline'; style-src 'unsafe-inline'"
_CSSURL = _URL[:_URL.index('/csp') + 1] + "csp/support/w3c/CanvasTest.ttf"
print _CSSURL
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_default-src_cross-origin_font_allowed</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<script src="resources/server.js?pipe=sub"></script>
<style>@font-face {font-family: Canvas;src: url(""" + _CSSURL + """);} #test {font-family: Canvas;}</style>
</head>
<body>
<p>Test passes if the two lines are different in font</p>
<div id="test">1234 ABCD</div>
<div>1234 ABCD</div>
</script>
</body>
</html> """
|
py
|
1a563e5811ee95383ab9863e2587a50050e62b32
|
"""Definitions of common enumerations to be used together with ``Enum`` property. """
from __future__ import absolute_import
from six import string_types
from . import colors, icons, palettes
class Enumeration(object):
pass
def enumeration(*values):
if not (values and all(isinstance(value, string_types) and value for value in values)):
raise ValueError("expected a non-empty sequence of strings, got %s" % values)
if len(values) != len(set(values)):
raise ValueError("enumeration items must be unique, got %s" % values)
attrs = dict([ (value, value) for value in values ])
attrs.update({
"__slots__": [],
"_values": list(values),
"_default": values[0],
})
return type("Enumeration", (Enumeration,), attrs)()
LineJoin = enumeration("miter", "round", "bevel")
LineDash = enumeration("solid", "dashed", "dotted", "dotdash", "dashdot")
LineCap = enumeration("butt", "round", "square")
FontStyle = enumeration("normal", "italic", "bold")
TextAlign = enumeration("left", "right", "center")
TextBaseline = enumeration("top", "middle", "bottom", "alphabetic", "hanging")
Direction = enumeration("clock", "anticlock")
Units = enumeration("screen", "data")
AngleUnits = enumeration("deg", "rad")
DatetimeUnits = enumeration("microseconds", "milliseconds", "seconds", "minsec", "minutes", "hourmin", "hours", "days", "months", "years")
Dimension = enumeration("width", "height", "x", "y")
Anchor = enumeration("top_left", "top_center", "top_right", "right_center", "bottom_right", "bottom_center", "bottom_left", "left_center", "center")
Location = enumeration("above", "below", "left", "right")
Orientation = enumeration("top_right", "top_left", "bottom_left", "bottom_right")
DashPattern = enumeration("solid", "dashed", "dotted", "dotdash", "dashdot")
ButtonType = enumeration("default", "primary", "success", "warning", "danger", "link")
NamedColor = enumeration(*colors.__colors__)
NamedIcon = enumeration(*icons.__icons__)
Palette = enumeration(*palettes.__palettes__)
MapType = enumeration("satellite", "roadmap", "terrain", "hybrid")
DateFormat = enumeration("ATOM", "W3C", "RFC-3339", "ISO-8601", "COOKIE", "RFC-822", "RFC-850", "RFC-1036", "RFC-1123", "RFC-2822", "RSS", "TICKS", "TIMESTAMP")
RoundingFunction = enumeration("round", "nearest", "floor", "rounddown", "ceil", "roundup")
NumeralLanguage = enumeration("be-nl", "chs", "cs", "da-dk", "de-ch", "de", "en", "en-gb", "es-ES", "es", "et", "fi", "fr-CA", "fr-ch", "fr", "hu", "it", "ja", "nl-nl", "pl", "pt-br", "pt-pt", "ru", "ru-UA", "sk", "th", "tr", "uk-UA")
|
py
|
1a563eb67c1efff2c7a671ff8c687b7e98acac25
|
__all__ = ["Tithiwa"]
from .session import Session
from .settings import Settings
from .group import Group
from .contact import Contact
class Tithiwa(Session, Settings, Group, Contact):
def __init__(self, browser=None):
super().__init__(browser)
# def __del__(self):
# self.browser.quit()
|
py
|
1a563ff85481628bd4fbd72c1aee9d6f39a009b4
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import cirq
from cirq import Extensions, ops
from cirq import abc
from cirq.ops import gate_features
class QCircuitDiagrammable(metaclass=abc.ABCMeta):
@abc.abstractmethod
def qcircuit_diagram_info(self, args: ops.TextDiagramInfoArgs
) -> ops.TextDiagramInfo:
pass
def _escape_text_for_latex(text):
escaped = (text
.replace('\\', '\\textbackslash{}')
.replace('^', '\\textasciicircum{}')
.replace('~', '\\textasciitilde{}')
.replace('_', '\\_')
.replace('{', '\\{')
.replace('}', '\\}')
.replace('$', '\\$')
.replace('%', '\\%')
.replace('&', '\\&')
.replace('#', '\\#'))
return '\\text{' + escaped + '}'
class _HardcodedQCircuitSymbolsGate(QCircuitDiagrammable):
def __init__(self, *symbols: str) -> None:
self.symbols = symbols
def qcircuit_diagram_info(self, args: ops.TextDiagramInfoArgs
) -> ops.TextDiagramInfo:
return ops.TextDiagramInfo(self.symbols)
def _get_multigate_parameters(gate: ops.TextDiagrammable,
args: ops.TextDiagramInfoArgs
) -> Optional[Tuple[int, int]]:
if not isinstance(gate, gate_features.InterchangeableQubitsGate):
return None
if args.qubit_map is None or args.known_qubits is None:
return None
indices = [args.qubit_map[q] for q in args.known_qubits]
min_index = min(indices)
n_qubits = len(args.known_qubits)
if sorted(indices) != list(range(min_index, min_index + n_qubits)):
return None
return min_index, n_qubits
class _TextToQCircuitDiagrammable(QCircuitDiagrammable):
def __init__(self, sub: ops.TextDiagrammable) -> None:
self.sub = sub
def qcircuit_diagram_info(self, args: ops.TextDiagramInfoArgs
) -> ops.TextDiagramInfo:
info = self.sub.text_diagram_info(args)
multigate_parameters = _get_multigate_parameters(self.sub, args)
if multigate_parameters is not None:
min_index, n_qubits = multigate_parameters
name = _escape_text_for_latex(str(self.sub).rsplit('**', 1)[0])
if info.exponent != 1:
name += '^{' + str(info.exponent) + '}'
box = '\multigate{' + str(n_qubits - 1) + '}{' + name + '}'
ghost = '\ghost{' + name + '}'
assert args.qubit_map is not None
assert args.known_qubits is not None
symbols = tuple(box if (args.qubit_map[q] == min_index) else
ghost for q in args.known_qubits)
return ops.TextDiagramInfo(symbols, exponent=info.exponent,
connected=False)
s = [_escape_text_for_latex(e) for e in info.wire_symbols]
if info.exponent != 1:
s[0] += '^{' + str(info.exponent) + '}'
return ops.TextDiagramInfo(tuple('\\gate{' + e + '}' for e in s))
class _FallbackQCircuitGate(QCircuitDiagrammable):
def __init__(self, sub: ops.Gate) -> None:
self.sub = sub
def qcircuit_diagram_info(self, args: ops.TextDiagramInfoArgs
) -> ops.TextDiagramInfo:
name = str(self.sub)
qubit_count = ((len(args.known_qubits) if
(args.known_qubits is not None) else 1)
if args.known_qubit_count is None
else args.known_qubit_count)
symbols = tuple(_escape_text_for_latex('{}:{}'.format(name, i))
for i in range(qubit_count))
return ops.TextDiagramInfo(symbols)
fallback_qcircuit_extensions = Extensions()
fallback_qcircuit_extensions.add_cast(
QCircuitDiagrammable,
ops.TextDiagrammable,
_TextToQCircuitDiagrammable)
fallback_qcircuit_extensions.add_recursive_cast(
QCircuitDiagrammable,
ops.GateOperation,
lambda ext, op: ext.try_cast(QCircuitDiagrammable, op.gate))
fallback_qcircuit_extensions.add_cast(
QCircuitDiagrammable,
ops.RotXGate,
lambda gate:
_HardcodedQCircuitSymbolsGate('\\targ')
if gate.half_turns == 1
else None)
fallback_qcircuit_extensions.add_cast(
QCircuitDiagrammable,
ops.MeasurementGate,
lambda gate: _HardcodedQCircuitSymbolsGate('\\meter'))
fallback_qcircuit_extensions.add_cast(
QCircuitDiagrammable,
cirq.google.ExpWGate,
lambda gate:
_HardcodedQCircuitSymbolsGate('\\targ')
if gate.half_turns == 1 and gate.axis_half_turns == 0
else None)
fallback_qcircuit_extensions.add_cast(
QCircuitDiagrammable,
ops.Rot11Gate,
lambda gate:
_HardcodedQCircuitSymbolsGate('\\control', '\\control')
if gate.half_turns == 1
else None)
fallback_qcircuit_extensions.add_cast(
QCircuitDiagrammable,
ops.CNotGate,
lambda gate: _HardcodedQCircuitSymbolsGate('\\control', '\\targ'))
fallback_qcircuit_extensions.add_cast(
QCircuitDiagrammable,
ops.Gate,
_FallbackQCircuitGate)
|
py
|
1a56402df14da0bf6b48f5ebd8348a47820d4634
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayBossFncGfsettlePaycontractConfirmResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncGfsettlePaycontractConfirmResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayBossFncGfsettlePaycontractConfirmResponse, self).parse_response_content(response_content)
|
py
|
1a564067cc630d2c1e0336e7d1e0af839bf04abc
|
import numpy as np
import time
from deprecated import deprecated
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import datasets
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn import ensemble
from scipy.special import expit, logsumexp
from CartTree_regression_xrh import *
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
class GBDT_Regressor:
"""
适用于 回归问题 的 梯度提升树
基分类器为 CART 回归树
Author: xrh
Date: 2021-04-04
ref: https://blog.csdn.net/zpalyq110/article/details/79527653
test0: 回归 任务
数据集:boston房价 数据集
参数: error_rate_threshold=0.01, max_iter=100, max_depth=3,learning_rate=0.1
训练集数量:455
测试集数量:51
测试集的 MSE: 7.610308909461337
模型训练时长:160s
"""
def __init__(self, square_loss_threshold=0.05, max_iter=10, max_depth=3):
# 训练中止条件 square_loss < self.square_loss_threshold ( 若当前得到的基分类器的组合 平方损失 小于阈值, 则停止训练)
self.square_loss_threshold = square_loss_threshold
# 最大迭代次数
self.max_iter = max_iter
self.max_depth = max_depth
self.G = [] # 弱分类器 集合
def fit(self, X, y, learning_rate):
"""
对训练数据进行学习
"""
f = 0 # 基分类器 的加权和
y_predict = np.mean(y)
# y_predict =0
self.G.append(y_predict)
f += y_predict
feature_value_set = RegresionTree.get_feature_value_set(X) # 可供选择的特征集合 , 包括 (特征, 切分值)
for m in range(self.max_iter): # 进行 第 m 轮迭代
r = y - f # 残差
RT = RegresionTree(threshold=0.01, max_depth=self.max_depth,print_log=False)
# RT=DecisionTreeRegressor(max_depth=self.max_depth) # sklearn 的 回归树
RT.fit(X, r, feature_value_set=feature_value_set)
y_predict = RT.predict(X)
self.G.append((learning_rate, RT)) # 存储 基分类器
# 当前 所有弱分类器加权 得到的 最终分类器 的 分类错误率
f += learning_rate * y_predict
square_loss = np.average(np.square(f - y)) # 平方误差损失
print('round:{}, square_loss:{}'.format(m, square_loss))
print('======================')
if square_loss < self.square_loss_threshold: # 错误率 已经小于 阈值, 则停止训练
break
def predict(self, X):
"""
对新数据进行预测
"""
f = 0 # 最终分类器
f += self.G[0] # 第一个 存储的是 初始化情况
for alpha, RT in self.G[1:]:
y_predict = RT.predict(X)
f += alpha * y_predict
return f
def score(self, X, y):
"""对训练效果进行评价"""
f = self.predict(X)
square_loss = np.average(np.square(f - y)) # 平方误差损失
return square_loss
class GBDT_2Classifier:
"""
适用于 二分类 问题 的 梯度提升树
基分类器为 CART 回归树
Author: xrh
Date: 2021-04-10
ref: https://zhuanlan.zhihu.com/p/89549390
test1: 二分类任务
数据集:Mnist
参数: error_rate_threshold=0.01, max_iter=30, max_depth=3,learning_rate=0.2
训练集数量:60000
测试集数量:10000
正确率: 0.9891
模型训练时长:205.7052161693573
"""
def __init__(self, error_rate_threshold=0.05, max_iter=10, max_depth=1):
"""
:param error_rate_threshold: 训练中止条件, 若当前得到的基分类器的组合 的错误率 小于阈值, 则停止训练
:param max_iter: 最大迭代次数
:param max_depth: CART 回归树 的最大深度
"""
# 训练中止条件 error_rate < self.error_rate_threshold ( 若当前得到的基分类器的组合 的错误率 小于阈值, 则停止训练)
self.error_rate_threshold = error_rate_threshold
# 最大迭代次数
self.max_iter = max_iter
# CART 回归树 的最大深度
self.max_depth = max_depth
self.G = [] # 弱分类器 集合
def sigmoid( self , X ):
"""
sigmoid 激活函数
:param X:
:return:
"""
return 1 / (1 + np.exp(-X))
def fit(self, X, y, learning_rate):
"""
用 训练数据 拟合模型
:param X: 特征数据 , shape=(N_sample, N_feature)
:param y: 标签数据 , shape=(N_sample,)
:param learning_rate: 学习率
:return:
"""
N = np.shape(X)[0] # 样本的个数
f = 0 # 基分类器 的加权和
P_1 = len(y[y == 1]) / len(y)
y_predict = np.log(P_1 / (1 - P_1))
self.G.append(y_predict)
f += y_predict
feature_value_set = RegresionTree.get_feature_value_set(X) # 可供选择的特征集合 , 包括 (特征, 切分值)
for m in range(self.max_iter): # 进行 第 m 轮迭代
r = y - self.sigmoid(f) # 残差
RT = RegresionTree_GBDT(min_square_loss=0.1, max_depth=self.max_depth,print_log=False)
RT.fit(X, r, y, feature_value_set=feature_value_set)
y_predict = RT.predict(X)
self.G.append((learning_rate, RT)) # 存储 基分类器
f += learning_rate * y_predict
# 计算 当前 所有弱分类器加权 得到的 最终分类器 的 分类错误率
G = self.sigmoid(f)
#TODO: 负例 设置为 -1 会导致 在训练集的 训练误差率无法下降 \
# 原因: 二分类时 ,默认 y = {0,1}, 若要 改为 y={-1,1} 则 损失函数 要使用另外的形式
G[G >= 0.5] = 1 # 概率 大于 0.5 被标记为 正例
G[G < 0.5] = 0 # 概率 小于 0.5 被标记为 负例
err_arr = np.ones(N, dtype=int)
err_arr[G == y] = 0
err_rate = np.mean(err_arr)
print('round:{}, err_rate:{}'.format(m, err_rate))
print('======================')
if err_rate < self.error_rate_threshold: # 错误率 已经小于 阈值, 则停止训练
break
def predict(self, X):
"""
对 测试 数据进行预测, 返回预测的标签
:param X: 特征数据 , shape=(N_sample, N_feature)
:return:
"""
f = 0 # 最终分类器
f += self.G[0] # 第一个 存储的是 初始化情况
for alpha, RT in self.G[1:]:
y_predict = RT.predict(X)
f += alpha * y_predict
# print('f:',f)
G = self.sigmoid(f)
# print('G:',G)
G[G >= 0.5] = 1 # 概率 大于 0.5 被标记为 正例
G[G < 0.5] = 0 # 概率 小于 0.5 被标记为 负例
return G
def predict_proba(self, X):
"""
对 测试 数据进行预测, 返回预测的 概率值
:param X: 特征数据 , shape=(N_sample, N_feature)
:return:
"""
f = 0 # 最终分类器
f += self.G[0] # 第一个 存储的是 初始化情况
for alpha, RT in self.G[1:]:
y_predict = RT.predict(X)
f += alpha * y_predict
# print('f:',f)
G = self.sigmoid(f)
return G
def score(self, X, y):
"""
使用 测试数据集 对模型进行评价, 返回正确率
:param X: 特征数据 , shape=(N_sample, N_feature)
:param y: 标签数据 , shape=(N_sample,)
:return: 正确率 accuracy
"""
N = np.shape(X)[0] # 样本的个数
G = self.predict(X)
err_arr = np.ones(N, dtype=int)
err_arr[G == y] = 0
err_rate = np.mean(err_arr)
accuracy = 1 - err_rate
return accuracy
class GBDT_MultiClassifier:
"""
适用于 二分类 问题 的 梯度提升树
基分类器为 CART 回归树
Author: xrh
Date: 2021-04-18
ref: https://zhuanlan.zhihu.com/p/91652813
test1: 多分类任务
数据集:Mnist
参数: error_rate_threshold=0.01, max_iter=20, max_depth=3 , learning_rate=0.5
训练集数量:60000
测试集数量:10000
正确率: 0.915
模型训练时长:1542s
"""
def __init__(self, error_rate_threshold=0.05, max_iter=10, max_depth=1):
"""
:param error_rate_threshold: 训练中止条件, 若当前得到的基分类器的组合 的错误率 小于阈值, 则停止训练
:param max_iter: 最大迭代次数
:param max_depth: CART 回归树 的最大深度
"""
# 训练中止条件 error_rate < self.error_rate_threshold ( 若当前得到的基分类器的组合 的错误率 小于阈值, 则停止训练)
self.error_rate_threshold = error_rate_threshold
# 最大迭代次数
self.max_iter = max_iter
# CART 回归树 的最大深度
self.max_depth = max_depth
self.G = [] # 弱分类器 集合
def sigmoid( self , X ):
"""
sigmoid 激活函数
:param X:
:return:
"""
return 1 / (1 + np.exp(-X))
@deprecated(version='1.0', reason="You should use another function")
def softmax_v1_0(self,X):
"""
softmax处理,将结果转化为概率
:param X:
:return:
"""
#TODO: 导致 上溢出 和 下溢出 问题
return np.exp(X) / np.sum( np.exp(X) , axis=0 ) # softmax处理,将结果转化为概率
@deprecated(version='1.1', reason="You should use another function")
def softmax_v1_1(self,X):
"""
softmax处理,将结果转化为概率
解决了 softmax的 上溢出 和 下溢出的问题
ref: https://www.cnblogs.com/guoyaohua/p/8900683.html
:param X: shape (K,N)
:return: shape (N,)
"""
X_max= np.max( X, axis=0)
X= X-X_max
return np.exp(X) / np.sum( np.exp(X) , axis=0 ) # softmax处理,将结果转化为概率
def softmax(self,X):
"""
softmax处理,将结果转化为概率
解决了 softmax的 溢出问题
np.nan_to_num : 使用0代替数组x中的nan元素,使用有限的数字代替inf元素
ref: sklearn 源码
MultinomialDeviance -> def negative_gradient
:param X: shape (K,N)
:return: shape (N,)
"""
return np.nan_to_num( np.exp(X - logsumexp(X, axis=0)) ) # softmax处理,将结果转化为概率
def fit(self, X, y, learning_rate):
"""
用 训练数据 拟合模型
:param X: 特征数据 , shape=(N_sample, N_feature)
:param y: 标签数据 , shape=(N_sample,)
:param learning_rate: 学习率
:return:
"""
N = np.shape(X)[0] # 样本的个数
self.K = len({ele for ele in y}) # y 中有多少种不同的标签, K分类
print('according to the training dataset : K={} classification task'.format(self.K))
F_0 = np.zeros( (self.K ),dtype=float) # shape : (K,)
for k in range(self.K): # 遍历 所有的 类别
F_0[k] = len(y[y == k]) / len(y)
self.G.append(F_0)
F = np.transpose([F_0] * N) # 对 F_0 进行复制, shape : (K, N)
feature_value_set = RegresionTree.get_feature_value_set(X) # 可供选择的特征集合 , 包括 (特征, 切分值)
y_one_hot = (y == np.array(range(self.K)).reshape(-1, 1)).astype(
np.int8) # 将 预测向量 扩展为 one-hot , shape: (K,N)
for m in range(1,self.max_iter): # 进行 第 m 轮迭代
p = self.softmax( F ) # shape: (K,N)
DT_list=[]
for k in range(self.K): # 依次训练 K 个 二分类器
print( '======= train No.{} 2Classifier ======='.format(k) )
r = y_one_hot[k] - p[k] # 残差 shape:(N,)
# 训练 用于 2分类的 回归树
DT = RegresionTree_GBDT(min_square_loss=0.1, max_depth=self.max_depth,print_log=True)
DT.fit(X, r, y_one_hot[k], feature_value_set=feature_value_set)
y_predict = (self.K / (self.K-1)) * ( DT.predict(X) ) # shape:(N,)
DT_list.append(DT)
F[k] += learning_rate * y_predict # F[k] shape:(N,)
# print('======= end =======')
self.G.append( (learning_rate, DT_list) ) # 存储 基分类器
# 计算 当前 所有弱分类器加权 得到的 最终分类器 的 分类错误率
G = self.softmax( F ) # F shape: (K,N)
G_label = np.argmax( G, axis=0 ) # 取 概率最大的 作为 预测的标签
err_arr = np.ones( N, dtype=int )
err_arr[G_label == y] = 0
err_rate = np.mean(err_arr) # 计算训练误差
print('round:{}, err_rate:{}'.format(m, err_rate))
print('======================')
if err_rate < self.error_rate_threshold: # 错误率 已经小于 阈值, 则停止训练
break
def predict(self, X):
"""
对 测试 数据进行预测, 返回预测的标签
:param X: 特征数据 , shape=(N_sample, N_feature)
:return:
"""
N = np.shape(X)[0] # 样本的个数
F_0 = self.G[0] # G中 第一个 存储的是 初始化情况
F = np.transpose([F_0] * N) # shape : (K, N)
for alpha, DT_list in self.G[1:]:
for k in range(self.K):
DT = DT_list[k]
y_predict = (self.K / (self.K - 1)) * (DT.predict(X)) # shape:(N,)
F[k] += alpha * y_predict # F[k] shape:(N,)
G = self.softmax(F)
G_label = np.argmax(G, axis=0)
return G_label
def predict_proba(self, X):
"""
对 测试 数据进行预测, 返回预测的 概率值
:param X: 特征数据 , shape=(N_sample, N_feature)
:return:
"""
F = self.G[0] # 第一个 存储的是 初始化情况
for alpha, DT_list in self.G[1:]:
for k in range(self.K):
DT = DT_list[k]
y_predict = (self.K / (self.K - 1)) * (DT.predict(X)) # shape:(N,)
DT_list.append(DT)
F[k] += alpha * y_predict # F[k] shape:(N,)
G = self.softmax(F)
return G
def score(self, X, y):
"""
使用 测试数据集 对模型进行评价, 返回正确率
:param X: 特征数据 , shape=(N_sample, N_feature)
:param y: 标签数据 , shape=(N_sample,)
:return: 正确率 accuracy
"""
N = np.shape(X)[0] # 样本的个数
G = self.predict(X)
err_arr = np.ones(N, dtype=int)
err_arr[G == y] = 0
err_rate = np.mean(err_arr)
accuracy = 1 - err_rate
return accuracy
class Test:
def test_tiny_regress_dataset(self):
"""
利用 https://blog.csdn.net/zpalyq110/article/details/79527653 中的数据集
测试 GBDT 回归
:return:
"""
# 获取训练集
dataset = np.array(
[[5, 20, 1.1],
[7, 30, 1.3],
[21, 70, 1.7],
[30, 60, 1.8],
])
columns = ['id', 'age', 'weight', 'label']
X = dataset[:, 0:2]
y = dataset[:, 2]
# 开始时间
start = time.time()
# 创建决策树
print('start create model')
clf = GBDT_Regressor(max_iter=5, max_depth=3)
clf.fit(X, y, learning_rate=0.1)
print(' model complete ')
# 结束时间
end = time.time()
print('time span:', end - start)
# 测试数据集
X_test = np.array([
[25, 65]
])
y_predict = clf.predict(X_test)
print('res: ', y_predict)
def test_regress_dataset(self):
"""
利用 boston房价 数据集
测试 GBDT 回归
:return:
"""
# 加载sklearn自带的波士顿房价数据集
dataset = load_boston()
# 提取特征数据和目标数据
X = dataset.data
y = dataset.target
# 将数据集以9:1的比例随机分为训练集和测试集,为了重现随机分配设置随机种子,即random_state参数
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.9, test_size=0.1, random_state=188)
# 实例化估计器对象
params = {'n_estimators': 100, 'max_depth': 3, 'min_samples_split': 2,
'learning_rate': 0.1, 'loss': 'ls'}
gbr = ensemble.GradientBoostingRegressor(**params)
# 估计器拟合训练数据
gbr.fit(X_train, y_train)
# 训练完的估计器对测试数据进行预测
y_pred = gbr.predict(X_test)
# 输出特征重要性列表
# print(gbr.feature_importances_)
start = time.time()
print('start create model')
clf = GBDT_Regressor(max_iter=100, max_depth=3)
clf.fit(X_train, y_train, learning_rate=0.1)
print(' model complete ')
# 结束时间
end = time.time()
print('time span:', end - start)
y_pred_test = clf.predict(X_test)
print('by sklearn , the squared_error:', mean_squared_error(y_test, y_pred)) # the squared_error: 8.46788133276128
print('by xrh , the squared_error:', mean_squared_error(y_test, y_pred_test)) #
def test_tiny_2classification_dataset(self):
"""
利用 https://blog.csdn.net/zpalyq110/article/details/79527653 中的数据集
测试 GBDT 回归
:return:
"""
dataset = np.array(
[[5, 20, 0],
[7, 30, 0],
[21, 70, 1],
[30, 60, 1],
])
columns = ['age', 'weight', 'label']
X = dataset[:, 0:2]
y = dataset[:, 2]
clf = GBDT_2Classifier(error_rate_threshold=0.0, max_iter=5, max_depth=3)
clf.fit(X, y, learning_rate=0.1)
X_test = np.array(
[[25, 65]])
print('y predict:', clf.predict(X_test))
def loadData_2classification(self, fileName, n=1000):
'''
加载文件
将 数据集 的标签 转换为 二分类的标签
:param fileName:要加载的文件路径
:param n: 返回的数据集的规模
:return: 数据集和标签集
'''
# 存放数据及标记
dataArr = []
labelArr = []
# 读取文件
fr = open(fileName)
cnt = 0 # 计数器
# 遍历文件中的每一行
for line in fr.readlines():
if cnt == n:
break
# 获取当前行,并按“,”切割成字段放入列表中
# strip:去掉每行字符串首尾指定的字符(默认空格或换行符)
# split:按照指定的字符将字符串切割成每个字段,返回列表形式
curLine = line.strip().split(',')
# 将每行中除标记外的数据放入数据集中(curLine[0]为标记信息)
# 在放入的同时将原先字符串形式的数据转换为整型
# 此外将数据进行了二值化处理,大于128的转换成1,小于的转换成0,方便后续计算
dataArr.append([int(int(num) > 128) for num in curLine[1:]])
# 将标记信息放入标记集中
# 转换成二分类任务
# 标签0设置为1,反之为0
# 显然这会导致 正负 样本的 分布不均衡, 1 的样本很少(10%), 而0 的很多
if int(curLine[0]) == 0:
labelArr.append(1)
else:
labelArr.append(0)
# if int(curLine[0]) <= 5:
# labelArr.append(1)
# else:
# labelArr.append(0)
cnt += 1
fr.close()
# 返回数据集和标记
return dataArr, labelArr
def test_Mnist_dataset_2classification(self, n_train, n_test):
"""
将 Mnist (手写数字) 数据集 转变为 二分类 数据集
测试 GBDT, 并对 模型效果做出评估
:param n_train: 使用训练数据集的规模
:param n_test: 使用测试数据集的规模
:return:
"""
# 获取训练集
trainDataList, trainLabelList = self.loadData_2classification('../Mnist/mnist_train.csv', n=n_train)
print('train data, row num:{} , column num:{} '.format(len(trainDataList), len(trainDataList[0])))
trainDataArr = np.array(trainDataList)
trainLabelArr = np.array(trainLabelList)
# 开始时间
print('start training model....')
start = time.time()
'''
sklearn GradientBoostingClassifier 调参:
loss:损失函数。有deviance和exponential两种。deviance是采用对数似然,exponential是指数损失,后者相当于AdaBoost。
n_estimators:最大弱学习器个数,默认是100,调参时要注意过拟合或欠拟合,一般和learning_rate一起考虑。
learning_rate:步长,即每个弱学习器的权重缩减系数,默认为0.1,取值范围0-1,当取值为1时,相当于权重不缩减。较小的learning_rate相当于更多的迭代次数。
subsample:子采样,默认为1,取值范围(0,1],当取值为1时,相当于没有采样。小于1时,即进行采样,按比例采样得到的样本去构建弱学习器。这样做可以防止过拟合,但是值不能太低,会造成高方差。
init:初始化弱学习器。不使用的话就是第一轮迭代构建的弱学习器.如果没有先验的话就可以不用管
由于GBDT使用CART回归决策树。以下参数用于调优弱学习器,主要都是为了防止过拟合
max_feature:树分裂时考虑的最大特征数,默认为None,也就是考虑所有特征。可以取值有:log2,auto,sqrt
max_depth:CART最大深度,默认为None
min_sample_split:划分节点时需要保留的样本数。当某节点的样本数小于某个值时,就当做叶子节点,不允许再分裂。默认是2
min_sample_leaf:叶子节点最少样本数。如果某个叶子节点数量少于某个值,会同它的兄弟节点一起被剪枝。默认是1
min_weight_fraction_leaf:叶子节点最小的样本权重和。如果小于某个值,会同它的兄弟节点一起被剪枝。一般用于权重变化的样本。默认是0
min_leaf_nodes:最大叶子节点数
'''
"""
sklearn 性能指标
参数 learning_rate=0.1, n_estimators=50 , max_depth=3
train data, row num:6000 , column num:784
training cost time : 9.30972957611084
test data, row num:1000 , column num:784
test dataset accuracy: 0.976
"""
clf = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=50
, max_depth=3
)
clf.fit(trainDataArr, trainLabelArr)
# clf = GBDT_2Classifier( error_rate_threshold=0.01, max_iter=30, max_depth=3 )
# clf.fit(trainDataArr, trainLabelArr,learning_rate=0.2)
# 结束时间
end = time.time()
print('training cost time :', end - start)
# 获取测试集
testDataList, testLabelList = self.loadData_2classification('../Mnist/mnist_test.csv', n=n_test)
print('test data, row num:{} , column num:{} '.format(len(testDataList), len(testDataList[0])))
testDataArr = np.array(testDataList)
testLabelArr = np.array(testLabelList)
# print('test dataset accuracy: {} '.format(clf.score(testDataArr, testLabelArr)))
# 模型评估
y_pred = clf.predict(testDataArr)
y_true = testLabelArr
# 1.正确率
print('test dataset accuracy: {} '.format(accuracy_score(y_true, y_pred)))
print('====================')
# 2.精确率
# print(precision_score(y_true, y_pred, average='macro')) #
# print(precision_score(y_true, y_pred, average='micro')) #
# print(precision_score(y_true, y_pred, average='weighted')) #
print('pos-1 precision: ', precision_score(y_true, y_pred, average='binary'))
precision_list = precision_score(y_true, y_pred, average=None)
print('neg-0 precision:{}, pos-1 precision:{} '.format(precision_list[0], precision_list[1]))
print('====================')
# 3. 召回率
# print(recall_score(y_true, y_pred, average='macro')) #
# print(recall_score(y_true, y_pred, average='micro')) #
# print(recall_score(y_true, y_pred, average='weighted')) #
print('pos-1 recall: ', recall_score(y_true, y_pred, average='binary'))
recall_list = recall_score(y_true, y_pred, average=None)
print('neg-0 recall:{}, pos-1 recall:{} '.format(recall_list[0], recall_list[1]))
print('====================')
# 4. F1-score
# print(f1_score(y_true, y_pred, average='macro'))
# print(f1_score(y_true, y_pred, average='micro'))
# print(f1_score(y_true, y_pred, average='weighted'))
print('pos-1 f1_score: ', f1_score(y_true, y_pred, average='binary'))
f1_score_list = f1_score(y_true, y_pred, average=None)
print('neg-0 f1_score:{}, pos-1 f1_score:{} '.format(f1_score_list[0], f1_score_list[1]))
print('====================')
# 画出 P-R 曲线
# sklearn 的 GBDT 作为基线
clf2 = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=30
, max_depth=3
)
clf2.fit(trainDataArr, trainLabelArr)
y_pred = clf.predict(testDataArr)
y_scores = clf.predict_proba(testDataArr)
y_true = testLabelArr
precision, recall, thresholds = precision_recall_curve(y_true, y_scores)
y_scores2 = clf2.predict_proba(testDataArr)[:, 1] # 第 1 列 , 表示为 正例的概率
precision2, recall2, thresholds2 = precision_recall_curve(y_true, y_scores2)
# disp = PrecisionRecallDisplay(precision=precision, recall=recall)
# disp.plot()
plt.plot(recall, precision, label="GDBT_2Classifier(xrh)", color='navy') #
plt.plot(recall2, precision2, label="GradientBoostingClassifier(sklearn)", color='turquoise')
plt.title(' Precision-Recall curve ')
# plt.ylim([0.0, 1.05]) # Y 轴的取值范围
# plt.xlim([0.0, 1.0]) # X 轴的取值范围
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc=(0, -.38), prop=dict(size=14)) # 图例
plt.show()
# ROC 曲线
fpr, tpr, _ = roc_curve(y_true, y_scores)
fpr2, tpr2, _ = roc_curve(y_true, y_scores2)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.plot(fpr, tpr, label="GDBT_2Classifier(xrh)", color='darkorange') #
plt.plot(fpr2, tpr2, label="GradientBoostingClassifier(sklearn)", color='turquoise')
# plt.xlim( [0.0, 1.0] )
# plt.ylim( [0.0, 1.05] )
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc=(0, -.38), prop=dict(size=14)) # 图例
plt.show()
def test_tiny_multiclassification_dataset(self):
"""
使用 https://zhuanlan.zhihu.com/p/91652813 中的数据测试 GBDT-多分类
:return:
"""
X_train =np.array( [[6],
[12],
[14],
[18],
[20],
[65],
[31],
[40],
[1],
[2],
[100],
[101],
[65],
[54],
])
y_train = np.array([[0], [0], [0], [0], [0], [1], [1], [1], [1], [1], [2], [2], [2], [2]]).ravel()
clf = GBDT_MultiClassifier( error_rate_threshold=0.01, max_iter=5, max_depth=1 )
clf.fit(X_train, y_train,learning_rate=1)
def loadData(self, fileName, n=1000):
'''
加载文件
:param fileName:要加载的文件路径
:param n: 返回的数据集的规模
:return: 数据集和标签集
'''
# 存放数据及标记
dataArr = []
labelArr = []
# 读取文件
fr = open(fileName)
cnt = 0 # 计数器
# 遍历文件中的每一行
for line in fr.readlines():
if cnt == n:
break
# 获取当前行,并按“,”切割成字段放入列表中
# strip:去掉每行字符串首尾指定的字符(默认空格或换行符)
# split:按照指定的字符将字符串切割成每个字段,返回列表形式
curLine = line.strip().split(',')
# 将每行中除标记外的数据放入数据集中(curLine[0]为标记信息)
# 在放入的同时将原先字符串形式的数据转换为整型
# 此外将数据进行了二值化处理,大于128的转换成1,小于的转换成0,方便后续计算
dataArr.append([int(int(num) > 128) for num in curLine[1:]])
# 将标记信息放入标记集中
labelArr.append(int(curLine[0]))
cnt += 1
fr.close()
# 返回数据集和标记
return dataArr, labelArr
def test_Mnist_dataset(self, n_train, n_test):
"""
Mnist (手写数字) 数据集
测试 AdaBoost 的 多分类
:param n_train: 使用训练数据集的规模
:param n_test: 使用测试数据集的规模
:return:
"""
# 获取训练集
trainDataList, trainLabelList = self.loadData('../Mnist/mnist_train.csv', n=n_train)
print('train data, row num:{} , column num:{} '.format(len(trainDataList), len(trainDataList[0])))
trainDataArr = np.array(trainDataList)
trainLabelArr = np.array(trainLabelList)
# 开始时间
print('start training model....')
start = time.time()
"""
调参:
loss:损失函数。有deviance和exponential两种。deviance是采用对数似然,exponential是指数损失,后者相当于AdaBoost。
n_estimators:最大弱学习器个数,默认是100,调参时要注意过拟合或欠拟合,一般和learning_rate一起考虑。
criterion: 切分叶子节点时, 选择切分特征考虑的误差函数, 默认是 “ friedman_mse”( Friedman 均方误差),“ mse”(均方误差)和“ mae”(均绝对误差)
learning_rate:步长,即每个弱学习器的权重缩减系数,默认为0.1,取值范围0-1,当取值为1时,相当于权重不缩减。较小的learning_rate相当于更多的迭代次数。
subsample:子采样,默认为1,取值范围(0,1],当取值为1时,相当于没有采样。小于1时,即进行采样,按比例采样得到的样本去构建弱学习器。这样做可以防止过拟合,但是值不能太低,会造成高方差。
init:初始化弱学习器。不使用的话就是第一轮迭代构建的弱学习器.如果没有先验的话就可以不用管
由于GBDT使用CART回归决策树。以下参数用于调优弱学习器,主要都是为了防止过拟合
max_feature:树分裂时考虑的最大特征数,默认为None,也就是考虑所有特征。可以取值有:log2,auto,sqrt
max_depth:CART最大深度,默认为None
min_sample_split:划分节点时需要保留的样本数。当某节点的样本数小于某个值时,就当做叶子节点,不允许再分裂。默认是2
min_sample_leaf:叶子节点最少样本数。如果某个叶子节点数量少于某个值,会同它的兄弟节点一起被剪枝。默认是1
min_weight_fraction_leaf:叶子节点最小的样本权重和。如果小于某个值,会同它的兄弟节点一起被剪枝。一般用于权重变化的样本。默认是0
min_leaf_nodes:最大叶子节点数
ref: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
测试1:
max_depth=3, n_estimators=30, learning_rate=0.8,
n_train=60000
n_test=10000
训练时间 : 795.5719292163849
准确率: 0.8883
测试2:
max_depth=3, n_estimators=20, learning_rate=0.5,
n_train=60000
n_test=10000
训练时间 : 589 s
准确率: 0.9197
"""
# clf = GradientBoostingClassifier(loss='deviance',criterion='mse', n_estimators=20, learning_rate=0.5,
# max_depth=3)
#
# clf.fit(trainDataArr, trainLabelArr)
clf = GBDT_MultiClassifier( error_rate_threshold=0.01, max_iter=20, max_depth=3 )
clf.fit( trainDataArr, trainLabelArr,learning_rate= 0.5 ) #
# 结束时间
end = time.time()
print('training cost time :', end - start)
# 获取测试集
testDataList, testLabelList = self.loadData('../Mnist/mnist_test.csv', n=n_test)
print('test data, row num:{} , column num:{} '.format(len(testDataList), len(testDataList[0])))
testDataArr = np.array(testDataList)
testLabelArr = np.array(testLabelList)
print('test dataset accuracy: {} '.format(clf.score(testDataArr, testLabelArr)))
def test_iris_dataset(self):
# 使用iris数据集,其中有三个分类, y的取值为0,1,2
X, y = datasets.load_iris(True) # 包括150行记录
# 将数据集一分为二,训练数据占80%,测试数据占20%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=188)
# clf = GradientBoostingClassifier(loss='deviance',n_estimators=3, learning_rate=0.1,
# max_depth=2)
# clf.fit(X_train, y_train)
clf = GBDT_MultiClassifier( error_rate_threshold=0.01, max_iter=5, max_depth=3 )
clf.fit(X_train, y_train,learning_rate=0.8)
print(clf.score(X_test, y_test))
if __name__ == '__main__':
test = Test()
# test.test_tiny_regress_dataset()
# test.test_regress_dataset()
# test.test_Mnist_dataset_2classification(60000,10000)
# test.test_tiny_2classification_dataset()
# test.test_tiny_multiclassification_dataset()
# test.test_Mnist_dataset(6000, 1000)
# test.test_Mnist_dataset(60000,10000)
test.test_iris_dataset()
|
py
|
1a5640e4ec51d477c458d72d80c6e119e1098b2c
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetAutomationAccountResult',
'AwaitableGetAutomationAccountResult',
'get_automation_account',
]
@pulumi.output_type
class GetAutomationAccountResult:
"""
Definition of the automation account type.
"""
def __init__(__self__, creation_time=None, description=None, encryption=None, etag=None, id=None, identity=None, last_modified_by=None, last_modified_time=None, location=None, name=None, private_endpoint_connections=None, public_network_access=None, sku=None, state=None, tags=None, type=None):
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if encryption and not isinstance(encryption, dict):
raise TypeError("Expected argument 'encryption' to be a dict")
pulumi.set(__self__, "encryption", encryption)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if last_modified_by and not isinstance(last_modified_by, str):
raise TypeError("Expected argument 'last_modified_by' to be a str")
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_time and not isinstance(last_modified_time, str):
raise TypeError("Expected argument 'last_modified_time' to be a str")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if public_network_access and not isinstance(public_network_access, bool):
raise TypeError("Expected argument 'public_network_access' to be a bool")
pulumi.set(__self__, "public_network_access", public_network_access)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
Gets the creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Gets or sets the description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def encryption(self) -> Optional['outputs.EncryptionPropertiesResponse']:
"""
Encryption properties for the automation account
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets or sets the etag of the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
Gets or sets the last modified by.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> str:
"""
Gets the last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The Azure Region where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Optional[Sequence['outputs.PrivateEndpointConnectionResponse']]:
"""
List of Automation operations supported by the Automation resource provider.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[bool]:
"""
Indicates whether traffic on the non-ARM endpoint (Webhook/Agent) is allowed from the public internet
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Gets or sets the SKU of account.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def state(self) -> str:
"""
Gets status of account.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetAutomationAccountResult(GetAutomationAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAutomationAccountResult(
creation_time=self.creation_time,
description=self.description,
encryption=self.encryption,
etag=self.etag,
id=self.id,
identity=self.identity,
last_modified_by=self.last_modified_by,
last_modified_time=self.last_modified_time,
location=self.location,
name=self.name,
private_endpoint_connections=self.private_endpoint_connections,
public_network_access=self.public_network_access,
sku=self.sku,
state=self.state,
tags=self.tags,
type=self.type)
def get_automation_account(automation_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAutomationAccountResult:
"""
Definition of the automation account type.
:param str automation_account_name: The name of the automation account.
:param str resource_group_name: Name of an Azure Resource group.
"""
__args__ = dict()
__args__['automationAccountName'] = automation_account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:automation/v20200113preview:getAutomationAccount', __args__, opts=opts, typ=GetAutomationAccountResult).value
return AwaitableGetAutomationAccountResult(
creation_time=__ret__.creation_time,
description=__ret__.description,
encryption=__ret__.encryption,
etag=__ret__.etag,
id=__ret__.id,
identity=__ret__.identity,
last_modified_by=__ret__.last_modified_by,
last_modified_time=__ret__.last_modified_time,
location=__ret__.location,
name=__ret__.name,
private_endpoint_connections=__ret__.private_endpoint_connections,
public_network_access=__ret__.public_network_access,
sku=__ret__.sku,
state=__ret__.state,
tags=__ret__.tags,
type=__ret__.type)
|
py
|
1a5640fb2d56dcfc6a28ea5b7b7b2f9d085abf2c
|
"""main.py
# Facial Expression Recognition
**Author**: Christopher Holzweber
**Description**: Bachelorthesis - Prototype for FER
**Institution**: Johannes Kepler University Linz - Institute of Computational Perception
This file handles starts the FER application by creating an instance of the GUI class and call the run() method.
"""
from Application.UserInterface import GUI
if __name__ == '__main__':
app = GUI()
app.run()
|
py
|
1a56412e9ea6edd8edfd2fcac6c7c83b0a737562
|
"""
This script creates a test that fails when garage.tf.algos.NPO performance is
too low.
"""
import gym
import pytest
import tensorflow as tf
from garage.envs import normalize
from garage.experiment import LocalRunner
from garage.tf.algos import NPO
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.envs import TfEnv
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import TfGraphTestCase
class TestNPO(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = TfEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
self.baseline = GaussianMLPBaseline(
env_spec=self.env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
@pytest.mark.large
def test_npo_pendulum(self):
"""Test NPO with Pendulum environment."""
with LocalRunner(sess=self.sess) as runner:
algo = NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.98,
policy_ent_coeff=0.0)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 20
def test_npo_with_unknown_pg_loss(self):
"""Test NPO with unkown pg loss."""
with pytest.raises(ValueError, match='Invalid pg_loss'):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
pg_loss='random pg_loss',
)
def test_npo_with_invalid_entropy_method(self):
"""Test NPO with invalid entropy method."""
with pytest.raises(ValueError, match='Invalid entropy_method'):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
entropy_method=None,
)
def test_npo_with_max_entropy_and_center_adv(self):
"""Test NPO with max entropy and center_adv."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
entropy_method='max',
center_adv=True,
)
def test_npo_with_max_entropy_and_no_stop_entropy_gradient(self):
"""Test NPO with max entropy and false stop_entropy_gradient."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
entropy_method='max',
stop_entropy_gradient=False,
)
def test_npo_with_invalid_no_entropy_configuration(self):
"""Test NPO with invalid no entropy configuration."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
entropy_method='no_entropy',
policy_ent_coeff=0.02,
)
def teardown_method(self):
self.env.close()
super().teardown_method()
|
py
|
1a5641b94a9f5e2f535b54d97decab7c48d63442
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DedicatedHostGroupsOperations(object):
"""DedicatedHostGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
host_group_name, # type: str
parameters, # type: "_models.DedicatedHostGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.DedicatedHostGroup"
"""Create or update a dedicated host group. For details of Dedicated Host and Dedicated Host
Groups please see [Dedicated Host Documentation]
(https://go.microsoft.com/fwlink/?linkid=2082596).
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param parameters: Parameters supplied to the Create Dedicated Host Group.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.DedicatedHostGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHostGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
host_group_name, # type: str
parameters, # type: "_models.DedicatedHostGroupUpdate"
**kwargs # type: Any
):
# type: (...) -> "_models.DedicatedHostGroup"
"""Update an dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param parameters: Parameters supplied to the Update Dedicated Host Group operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.DedicatedHostGroupUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHostGroupUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
host_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete a dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
host_group_name, # type: str
expand=None, # type: Optional[Union[str, "_models.InstanceViewTypes"]]
**kwargs # type: Any
):
# type: (...) -> "_models.DedicatedHostGroup"
"""Retrieves information about a dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' will retrieve
the list of instance views of the dedicated hosts under the dedicated host group. 'UserData' is
not supported for dedicated host group.
:type expand: str or ~azure.mgmt.compute.v2021_04_01.models.InstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DedicatedHostGroupListResult"]
"""Lists all of the dedicated host groups in the specified resource group. Use the nextLink
property in the response to get the next page of dedicated host groups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHostGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_04_01.models.DedicatedHostGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DedicatedHostGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DedicatedHostGroupListResult"]
"""Lists all of the dedicated host groups in the subscription. Use the nextLink property in the
response to get the next page of dedicated host groups.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHostGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_04_01.models.DedicatedHostGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DedicatedHostGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/hostGroups'} # type: ignore
|
py
|
1a5641d1a349e8a1ce5f196156bec8686c67e8a2
|
"""Tests for check_live_state command."""
from datetime import datetime, timezone
from io import StringIO
from unittest import mock
from django.core.management import call_command
from django.test import TestCase
from botocore.stub import Stubber
from ..defaults import RAW, RUNNING
from ..factories import VideoFactory
from ..management.commands import check_live_state
class CheckLiveStateTest(TestCase):
"""Test check_live_state command."""
def test_check_live_state_command_no_running_live(self):
"""Command should do nothing when there is no running live."""
out = StringIO()
with Stubber(check_live_state.logs_client) as logs_client_stubber, Stubber(
check_live_state.medialive_client
) as medialive_stubber:
call_command("check_live_state", stdout=out)
logs_client_stubber.assert_no_pending_responses()
medialive_stubber.assert_no_pending_responses()
self.assertEqual("", out.getvalue())
out.close()
def test_check_live_state_live_running_not_ended(self):
"""A live is running and stream is on-going, the command should not stop it."""
VideoFactory(
id="0b791906-ccb3-4450-97cb-7b66fd9ad419",
created_on=datetime(2020, 8, 25, 0, 0, 0, tzinfo=timezone.utc),
live_state=RUNNING,
live_info={
"cloudwatch": {"logGroupName": "/aws/lambda/dev-test-marsha-medialive"},
"medialive": {
"channel": {"arn": "medialive:channel:arn", "id": "123456"}
},
},
live_type=RAW,
)
out = StringIO()
with Stubber(check_live_state.logs_client) as logs_client_stubber, Stubber(
check_live_state.medialive_client
) as medialive_stubber:
logs_client_stubber.add_response(
"filter_log_events",
expected_params={
"logGroupName": "/aws/lambda/dev-test-marsha-medialive",
"startTime": 1598313600000,
"filterPattern": (
"{"
'($.detail-type = "MediaLive Channel Alert") && '
'($.resources[0] = "medialive:channel:arn") &&'
'($.detail.alert_type = "RTMP Has No Audio/Video")'
"}"
),
},
service_response={
"events": [
{
"message": "".join(
"2020-08-24T12:19:38.401Z\t445f36d3-4210-4e14-840f-596d671f0db6\t"
"INFO\tReceived event: "
'{"version":"0","id":"a4c27e5a-28c5-d884-754c-39f2250897a1",'
'"detail-type":"MediaLive Channel Alert","source":"aws.medialive"'
',"account":"082219553157","time":"2020-08-25T12:19:37Z",'
'"region":"eu-west-1","resources":'
'["arn:aws:medialive:eu-west-1:082219553157:channel:3686054"],'
'"detail":{"alarm_state":"SET","alarm_id":"4e912d2be849354108def4'
'0099483af0ccb47763","alert_type":"RTMP Has No Audio/Video",'
'"pipeline":"0","channel_arn":"arn:aws:medialive:eu-west-1:'
'082219553157:channel:3686054","message":'
'"Waiting for RTMP input"}}\n',
)
},
{
"message": "".join(
"2020-08-24T12:19:38.401Z\t445f36d3-4210-4e14-840f-596d671f0db6\t"
"INFO\tReceived event: "
'{"version":"0","id":"a4c27e5a-28c5-d884-754c-39f2250897a1",'
'"detail-type":"MediaLive Channel Alert","source":"aws.medialive"'
',"account":"082219553157","time":"2020-08-25T12:19:37Z",'
'"region":"eu-west-1","resources":'
'["arn:aws:medialive:eu-west-1:082219553157:channel:3686054"],'
'"detail":{"alarm_state":"SET","alarm_id":"4e912d2be849354108def4'
'0099483af0ccb47763","alert_type":"RTMP Has No Audio/Video",'
'"pipeline":"1","channel_arn":"arn:aws:medialive:eu-west-1:'
'082219553157:channel:3686054","message":'
'"Waiting for RTMP input"}}\n',
)
},
{
"message": "".join(
"2020-08-24T12:19:38.401Z\t445f36d3-4210-4e14-840f-596d671f0db6\t"
"INFO\tReceived event: "
'{"version":"0","id":"a4c27e5a-28c5-d884-754c-39f2250897a1",'
'"detail-type":"MediaLive Channel Alert","source":"aws.medialive"'
',"account":"082219553157","time":"2020-08-25T12:19:37Z",'
'"region":"eu-west-1","resources":'
'["arn:aws:medialive:eu-west-1:082219553157:channel:3686054"],'
'"detail":{"alarm_state":"CLEARED","alarm_id":"4e912d2be849354108d'
'ef40099483af0ccb47763","alert_type":"RTMP Has No Audio/Video",'
'"pipeline":"1","channel_arn":"arn:aws:medialive:eu-west-1:'
'082219553157:channel:3686054","message":'
'"Waiting for RTMP input"}}\n',
)
},
],
},
)
call_command("check_live_state", stdout=out)
logs_client_stubber.assert_no_pending_responses()
medialive_stubber.assert_no_pending_responses()
self.assertIn(
"Checking video 0b791906-ccb3-4450-97cb-7b66fd9ad419", out.getvalue()
)
self.assertNotIn("Stopping channel with id 123456", out.getvalue())
self.assertNotIn("Channel stopped", out.getvalue())
out.close()
def test_check_live_state_running_ended_not_expired(self):
"""Live is ended and the delay is not expired."""
VideoFactory(
id="0b791906-ccb3-4450-97cb-7b66fd9ad419",
created_on=datetime(2020, 8, 25, 0, 0, 0, tzinfo=timezone.utc),
live_state=RUNNING,
live_info={
"cloudwatch": {"logGroupName": "/aws/lambda/dev-test-marsha-medialive"},
"medialive": {
"channel": {"arn": "medialive:channel:arn", "id": "123456"}
},
},
live_type=RAW,
)
out = StringIO()
with Stubber(check_live_state.logs_client) as logs_client_stubber, Stubber(
check_live_state.medialive_client
) as medialive_stubber, mock.patch(
"marsha.core.management.commands.check_live_state.generate_expired_date"
) as generate_expired_date_mock:
logs_client_stubber.add_response(
"filter_log_events",
expected_params={
"logGroupName": "/aws/lambda/dev-test-marsha-medialive",
"startTime": 1598313600000,
"filterPattern": (
"{"
'($.detail-type = "MediaLive Channel Alert") && '
'($.resources[0] = "medialive:channel:arn") &&'
'($.detail.alert_type = "RTMP Has No Audio/Video")'
"}"
),
},
service_response={
"events": [
{
"message": "".join(
"2020-08-24T12:19:38.401Z\t445f36d3-4210-4e14-840f-596d671f0db6\t"
"INFO\tReceived event: "
'{"version":"0","id":"a4c27e5a-28c5-d884-754c-39f2250897a1",'
'"detail-type":"MediaLive Channel Alert","source":"aws.medialive"'
',"account":"082219553157","time":"2020-08-25T12:19:37Z",'
'"region":"eu-west-1","resources":'
'["arn:aws:medialive:eu-west-1:082219553157:channel:3686054"],'
'"detail":{"alarm_state":"SET","alarm_id":"4e912d2be849354108def4'
'0099483af0ccb47763","alert_type":"RTMP Has No Audio/Video",'
'"pipeline":"0","channel_arn":"arn:aws:medialive:eu-west-1:'
'082219553157:channel:3686054","message":'
'"Waiting for RTMP input"}}\n',
)
},
{
"message": "".join(
"2020-08-24T12:19:38.401Z\t445f36d3-4210-4e14-840f-596d671f0db6\t"
"INFO\tReceived event: "
'{"version":"0","id":"a4c27e5a-28c5-d884-754c-39f2250897a1",'
'"detail-type":"MediaLive Channel Alert","source":"aws.medialive"'
',"account":"082219553157","time":"2020-08-25T12:25:37Z",'
'"region":"eu-west-1","resources":'
'["arn:aws:medialive:eu-west-1:082219553157:channel:3686054"],'
'"detail":{"alarm_state":"SET","alarm_id":"4e912d2be849354108def4'
'0099483af0ccb47763","alert_type":"RTMP Has No Audio/Video",'
'"pipeline":"1","channel_arn":"arn:aws:medialive:eu-west-1:'
'082219553157:channel:3686054","message":'
'"Waiting for RTMP input"}}\n',
)
},
],
},
)
generate_expired_date_mock.return_value = datetime(
2020, 8, 25, 12, 0, 0, tzinfo=timezone.utc
)
call_command("check_live_state", stdout=out)
logs_client_stubber.assert_no_pending_responses()
medialive_stubber.assert_no_pending_responses()
self.assertIn(
"Checking video 0b791906-ccb3-4450-97cb-7b66fd9ad419", out.getvalue()
)
self.assertNotIn("Stopping channel with id 123456", out.getvalue())
self.assertNotIn("Channel stopped", out.getvalue())
out.close()
def test_check_live_state_running_ended_and_expired(self):
"""Live is ended and the delay is expired. The channel must be stopped."""
VideoFactory(
id="0b791906-ccb3-4450-97cb-7b66fd9ad419",
created_on=datetime(2020, 8, 25, 0, 0, 0, tzinfo=timezone.utc),
live_state=RUNNING,
live_info={
"cloudwatch": {"logGroupName": "/aws/lambda/dev-test-marsha-medialive"},
"medialive": {
"channel": {"arn": "medialive:channel:arn", "id": "123456"}
},
},
live_type=RAW,
)
out = StringIO()
with Stubber(check_live_state.logs_client) as logs_client_stubber, mock.patch(
"marsha.core.management.commands.check_live_state.stop_live_channel"
) as mock_stop_live_channel, mock.patch(
"marsha.core.management.commands.check_live_state.generate_expired_date"
) as generate_expired_date_mock:
logs_client_stubber.add_response(
"filter_log_events",
expected_params={
"logGroupName": "/aws/lambda/dev-test-marsha-medialive",
"startTime": 1598313600000,
"filterPattern": (
"{"
'($.detail-type = "MediaLive Channel Alert") && '
'($.resources[0] = "medialive:channel:arn") &&'
'($.detail.alert_type = "RTMP Has No Audio/Video")'
"}"
),
},
service_response={
"events": [
{
"message": "".join(
"2020-08-24T12:19:38.401Z\t445f36d3-4210-4e14-840f-596d671f0db6\t"
"INFO\tReceived event: "
'{"version":"0","id":"a4c27e5a-28c5-d884-754c-39f2250897a1",'
'"detail-type":"MediaLive Channel Alert","source":"aws.medialive"'
',"account":"082219553157","time":"2020-08-25T12:19:37Z",'
'"region":"eu-west-1","resources":'
'["arn:aws:medialive:eu-west-1:082219553157:channel:3686054"],'
'"detail":{"alarm_state":"SET","alarm_id":"4e912d2be849354108def4'
'0099483af0ccb47763","alert_type":"RTMP Has No Audio/Video",'
'"pipeline":"0","channel_arn":"arn:aws:medialive:eu-west-1:'
'082219553157:channel:3686054","message":'
'"Waiting for RTMP input"}}\n',
)
},
{
"message": "".join(
"2020-08-24T12:19:38.401Z\t445f36d3-4210-4e14-840f-596d671f0db6\t"
"INFO\tReceived event: "
'{"version":"0","id":"a4c27e5a-28c5-d884-754c-39f2250897a1",'
'"detail-type":"MediaLive Channel Alert","source":"aws.medialive"'
',"account":"082219553157","time":"2020-08-25T12:25:37Z",'
'"region":"eu-west-1","resources":'
'["arn:aws:medialive:eu-west-1:082219553157:channel:3686054"],'
'"detail":{"alarm_state":"SET","alarm_id":"4e912d2be849354108def4'
'0099483af0ccb47763","alert_type":"RTMP Has No Audio/Video",'
'"pipeline":"1","channel_arn":"arn:aws:medialive:eu-west-1:'
'082219553157:channel:3686054","message":'
'"Waiting for RTMP input"}}\n',
)
},
],
},
)
generate_expired_date_mock.return_value = datetime(
2020, 8, 25, 12, 30, 0, tzinfo=timezone.utc
)
call_command("check_live_state", stdout=out)
logs_client_stubber.assert_no_pending_responses()
mock_stop_live_channel.assert_called_once()
self.assertIn(
"Checking video 0b791906-ccb3-4450-97cb-7b66fd9ad419", out.getvalue()
)
self.assertIn("Stopping channel with id 123456", out.getvalue())
self.assertIn("Channel stopped", out.getvalue())
out.close()
|
py
|
1a5641d70eaba073a249d989a2ed896a4a0f8b44
|
# Copyright 2018-2019 Geek Guild Co., Ltd.
# ==============================================================================
import glob
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import os
"""Gif module.
"""
def generate_gif_animation(src_file_path_list, dst_file_path, interval=500, repeat_delay=1000):
# prepare figure
fig = plt.figure()
# clearn the figure edge
ax = plt.subplot(1, 1, 1)
ax.spines['right'].set_color('None')
ax.spines['top'].set_color('None')
ax.spines['left'].set_color('None')
ax.spines['bottom'].set_color('None')
ax.tick_params(axis='x', which='both', top='off', bottom='off', labelbottom='off')
ax.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
image_list = []
for src_file_path in src_file_path_list:
image_file = Image.open(src_file_path)
# spline36
image_list.append([plt.imshow(image_file, interpolation="spline36")])
# animation
ani = animation.ArtistAnimation(fig, image_list, interval=interval, repeat_delay=repeat_delay)
ani.save(dst_file_path)
if __name__ == '__main__':
# sample
src_dir_path = "/var/tensorflow/tsp/sample/animation/"
src_file_path_list = glob.glob(src_dir_path + "*.png")
generate_gif_animation(src_file_path_list, src_file_path_list)
|
py
|
1a5643a352703ca317abd3dcee7bcecf96d2b43c
|
from ektelo import util
from ektelo.mixins import Marshallable
import hashlib
class Base(Marshallable):
def __init__(self, short_name=None):
if short_name is None:
self.short_name = 'ektelo_' + self.__class__.__name__
else:
self.short_name = short_name
@property
def hash(self):
m = hashlib.sha1()
m.update(util.prepare_for_hash(self.short_name))
for key, value in util.prepare_for_hash(self.init_params).items():
m.update(key.encode('utf-8'))
m.update(repr(value).encode('utf-8'))
return m.hexdigest()
|
py
|
1a5644522062ddf5ccfae5304b45da1870fecb48
|
from .supported import EXCEPTIONS, LICENSES
def normalize_license_expression(license_expression):
if not license_expression:
return license_expression
# First normalize to lower case so we can look up licenses/exceptions
# and so boolean operators are Python-compatible
license_expression = license_expression.lower()
# Then pad parentheses so tokenization can be achieved by merely splitting on white space
license_expression = license_expression.replace('(', ' ( ').replace(')', ' ) ')
# Now we begin parsing
tokens = license_expression.split()
# Rather than implementing boolean logic we create an expression that Python can parse.
# Everything that is not involved with the grammar itself is treated as `False` and the
# expression should evaluate as such.
python_tokens = []
for token in tokens:
if token not in ('or', 'and', 'with', '(', ')'):
python_tokens.append('False')
elif token == 'with':
python_tokens.append('or')
elif token == '(' and python_tokens and python_tokens[-1] not in ('or', 'and'):
raise ValueError('Invalid license expression')
else:
python_tokens.append(token)
python_expression = ' '.join(python_tokens)
try:
assert eval(python_expression) is False
except Exception:
raise ValueError('Invalid license expression')
# Take a final pass to check for unknown licenses/exceptions
normalized_tokens = []
for token in tokens:
if token in ('or', 'and', 'with', '(', ')'):
normalized_tokens.append(token.upper())
continue
if normalized_tokens and normalized_tokens[-1] == 'WITH':
if token not in EXCEPTIONS:
raise ValueError('Unknown license exception: {}'.format(token))
normalized_tokens.append(EXCEPTIONS[token]['id'])
else:
if token.endswith('+'):
token = token[:-1]
suffix = '+'
else:
suffix = ''
if token not in LICENSES:
raise ValueError('Unknown license: {}'.format(token))
normalized_tokens.append(LICENSES[token]['id'] + suffix)
# Construct the normalized expression
normalized_expression = ' '.join(normalized_tokens)
# Fix internal padding for parentheses
normalized_expression = normalized_expression.replace('( ', '(').replace(' )', ')')
return normalized_expression
|
py
|
1a56448a399f5e035dc6143a19e9e071c066d4f3
|
from django.contrib import admin
from .models import subjectModel
# Register your models here.
admin.site.register(subjectModel)
|
py
|
1a5644a048af1c7dfd0a8c87983aa816c2964487
|
import sqlite3
import databaseCreate
#file for deleting the choosen entries
#RUN THE SQL STATEMENT TO DELETE THE SELECTED RECORD
db=sqlite3.connect("SongStorage.db")
def deleteSong(songToDelete):
try:
databaseCreate.createDb()
delete = "DELETE song FROM song WHERE title = ?",(songToDelete,)
cur = db.cursor()
cur.execute(delete)
db.commit()
cur.close()
db.close()
input("Item deleted, press enter to continue: ")
except:
print ("THERE WAS A PROBLEM DELETING THE RECORD")
input("Press Enter to continue: ")
#TAKE USER INPUT AND RUN FUNCTION TO DELETE THE SELECTED RECORD
def DeleteItem():
print ("===============================")
print ("DELEte A SONG RECORD:")
print ("===============================")
songToDelete = input("\nEnter the title of the DVD to delete:\t")
delete = "DELETE song FROM song WHERE title = ? "(songToDelete,)
try:
cur = db.cursor()
cur.execute(delete)
searchResult = cur.fetchall()
if searchResult[0] == ():
raise
except:
print ("THERE WAS A PROBLEM ACCESSING THE RECORD IN THE DATABASE!")
input("Press Enter to continue: ")
return
print( "===============================")
print ("delete song RECORD:")
print( "===============================")
print ("1 - Title:\t" + modifyResult[0][0])
print ("2 - Star:\t" + modifyResult[0][1])
print ("3 - Costar:\t" + modifyResult[0][2])
print ("4 - Year:\t" + modifyResult[0][3])
print ("5 - Genre\t"+ modifyResult[0][4])
print ("===============================")
input("Press enter to continue: ")
print("""
Are you sure you want to delete? Enter a choice and press enter
(Y/y = yes, Anything else = No)
""")
choice = input("\t")
if (choice == "Y" or choice == "y"):
deleteSong(songToDelete)
else:
c.close()
db.close()
input("Item NOT deleted, press enter to continue: ")
|
py
|
1a5646ad5e098bb069b7a5ef68300936cadfdc83
|
import numpy as np
from zest import zest
from plumbum import local
from plaster.run.sigproc_v2 import synth
from plaster.tools.test_tools.test_tools import (
integration_before,
integration_after,
run_p,
)
# The only reason I'm not entirely deleting sigproc_v1 is because
# Angela has a paper in the works
@zest.skip("sigproc_v1 deprecated")
@zest.group("integration")
def zest_sigproc_v1_integration():
# If test_folder is None it will be set by _before()
# otherwise you can set it here to reuse a debugging folder
test_folder = None
# test_folder = (
# "/erisyon/internal/jobs_folder/_integration_tests/it_runs_sigproc_v1/1613159055"
# )
dim = (512, 512)
amps_constant_val = 6000
n_peaks = 1000
bg_mean = 100.0
bg_std = 30.0
channel_scale_factor = (1.0, 0.5)
channel_peak_width_scale_factor = (1.0, 0.8)
psf_width = 1.5
def _synth_field(n_cycles):
with synth.Synth(n_channels=2, n_cycles=n_cycles, dim=dim) as s:
peaks = (
synth.PeaksModelGaussianCircular(n_peaks=n_peaks)
.locs_randomize()
.widths_uniform(psf_width)
.channel_scale_factor(channel_scale_factor)
.channel_peak_width_scale_factor(channel_peak_width_scale_factor)
)
synth.CameraModel(bg_mean=bg_mean, bg_std=bg_std)
synth.HaloModel()
synth.IlluminationQuadraticFalloffModel()
return s, peaks
def _synth_save_field(s, fl_i, source_folder):
chcy_ims = s.render_chcy(0)
for ch_i in range(chcy_ims.shape[0]):
for cy_i in range(chcy_ims.shape[1]):
np.save(
str(
source_folder
/ f"area_{fl_i:03d}_cell_000_{ch_i:03d}nm_{cy_i:03d}.npy"
),
chcy_ims[ch_i, cy_i],
)
def _synth_dyts(source_folder):
n_fields = 3
for fl_i in range(n_fields):
s, peaks = _synth_field(n_cycles=4)
peaks.dyt_random_choice(
[[3, 2, 2, 1], [2, 1, 1, 0], [1, 0, 0, 0]], [0.5, 0.3, 0.2]
)
peaks.gain_constant(amps_constant_val)
_synth_save_field(s, fl_i, source_folder)
def _it_runs_sigproc_v1():
job_folder = test_folder / "sigproc_v1"
source_folder = test_folder / "sigproc_source"
source_folder.mkdir()
_synth_dyts(source_folder)
run_p(
[
f"gen",
f"sigproc_v1",
f"--job={job_folder}",
f"--sigproc_source={source_folder}",
f"--sample=foo",
f"--force",
]
)
run_p(["run", job_folder, "--no_progress"])
# def _it_runs_sigclass():
# """
# Test that classification can run on sigprov_v2 data
# """
#
# csv_file = test_folder / "protein.csv"
# job_folder = test_folder / "sigproc_v1_class"
# source_folder = test_folder / "sigproc_source"
# source_folder.mkdir()
#
# csv_contents = (
# "Name,Abundance,POI,Seq\n"
# "ACE,28,1,"
# "MGAASGRRGPGLLLPLPLLLLLPPQPALALDPGLQPGNFSADEAGAQLFAQSYNSSAEQV"
# "LFQSVAASWAHDTNITAENARRQEEAALLSQEFAEAWGQKAKELYEPIWQNFTDPQLRRI"
# "IGAVRTLGSANLPLAKRQQYNALLSNMSRIYSTAKVCLPNKTATCWSLDPDLTNILASSR"
# "SYAMLLFAWEGWHNAAGIPLKPLYEDFTALSNEAYKQDGFTDTGAYWRSWYNSPTFEDDL"
# "EHLYQQLEPLYLNLHAFVRRALHRRYGDRYINLRGPIPAHLLGDMWAQSWENIYDMVVPF"
# "PDKPNLDVTSTMLQQGWNATHMFRVAEEFFTSLELSPMPPEFWEGSMLEKPADGREVVCH"
# "ASAWDFYNRKDFRIKQCTRVTMDQLSTVHHEMGHIQYYLQYKDLPVSLRRGANPGFHEAI"
# "GDVLALSVSTPEHLHKIGLLDRVTNDTESDINYLLKMALEKIAFLPFGYLVDQWRWGVFS"
# "GRTPPSRYNFDWWYLRTKYQGICPPVTRNETHFDAGAKFHVPNVTPYIRYFVSFVLQFQF"
# "HEALCKEAGYEGPLHQCDIYRSTKAGAKLRKVLQAGSSRPWQEVLKDMVGLDALDAQPLL"
# "KYFQPVTQWLQEQNQQNGEVLGWPEYQWHPPLPDNYPEGIDLVTDEAEASKFVEEYDRTS"
# "QVVWNEYAEANWNYNTNITTETSKILLQKNMQIANHTLKYGTQARKFDVNQLQNTTIKRI"
# "IKKVQDLERAALPAQELEEYNKILLDMETTYSVATVCHPNGSCLQLEPDLTNVMATSRKY"
# "EDLLWAWEGWRDKAGRAILQFYPKYVELINQAARLNGYVDAGDSWRSMYETPSLEQDLER"
# "LFQELQPLYLNLHAYVRRALHRHYGAQHINLEGPIPAHLLGNMWAQTWSNIYDLVVPFPS"
# "APSMDTTEAMLKQGWTPRRMFKEADDFFTSLGLLPVPPEFWNKSMLEKPTDGREVVCHAS"
# "AWDFYNGKDFRIKQCTTVNLEDLVVAHHEMGHIQYFMQYKDLPVALREGANPGFHEAIGD"
# "VLALSVSTPKHLHSLNLLSSEGGSDEHDINFLMKMALDKIAFIPFSYLVDQWRWRVFDGS"
# "ITKENYNQEWWSLRLKYQGLCPPVPRTQGDFDPGAKFHIPSSVPYIRYFVSFIIQFQFHE"
# "ALCQAAGHTGPLHKCDIYQSKEAGQRLATAMKLGFSRPWPEAMQLITGQPNMSASAMLSY"
# "FKPLLDWLRTENELHGEKLGWPQYNWTPNSARSEGPLPDSGRVSFLGLDLDAQQARVGQW"
# "LLLFLGIALLVATLGLSQRLFSIRHRSLHRHSHGPQFGSEVELRHS\n"
# "ACE2,12,1,"
# "MSSSSWLLLSLVAVTAAQSTIEEQAKTFLDKFNHEAEDLFYQSSLASWNYNTNITEENVQ"
# "NMNNAGDKWSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQQNGSSVLSEDKSKRLNTIL"
# "NTMSTIYSTGKVCNPDNPQECLLLEPGLNEIMANSLDYNERLWAWESWRSEVGKQLRPLY"
# "EEYVVLKNEMARANHYEDYGDYWRGDYEVNGVDGYDYSRGQLIEDVEHTFEEIKPLYEHL"
# "HAYVRAKLMNAYPSYISPIGCLPAHLLGDMWGRFWTNLYSLTVPFGQKPNIDVTDAMVDQ"
# "AWDAQRIFKEAEKFFVSVGLPNMTQGFWENSMLTDPGNVQKAVCHPTAWDLGKGDFRILM"
# "CTKVTMDDFLTAHHEMGHIQYDMAYAAQPFLLRNGANEGFHEAVGEIMSLSAATPKHLKS"
# "IGLLSPDFQEDNETEINFLLKQALTIVGTLPFTYMLEKWRWMVFKGEIPKDQWMKKWWEM"
# "KREIVGVVEPVPHDETYCDPASLFHVSNDYSFIRYYTRTLYQFQFQEALCQAAKHEGPLH"
# "KCDISNSTEAGQKLFNMLRLGKSEPWTLALENVVGAKNMNVRPLLNYFEPLFTWLKDQNK"
# "NSFVGWSTDWSPYADQSIKVRISLKSALGDKAYEWNDNEMYLFRSSVAYAMRQYFLKVKN"
# "QMILFGEEDVRVANLKPRISFNFFVTAPKNVSDIIPRTEVEKAIRMSRSRINDAFRLNDN"
# "SLEFLGIQPTLGPPNQPPVSIWLIVFGVVMGVIVVGIVILIFTGIRDRKKKNKARSGENP"
# "YASIDISKGENNPGFQNTDDVQTSF\n"
# "ACTB,7,1,"
# "MDDDIAALVVDNGSGMCKAGFAGDDAPRAVFPSIVGRPRHQGVMVGMGQKDSYVGDEAQS"
# "KRGILTLKYPIEHGIVTNWDDMEKIWHHTFYNELRVAPEEHPVLLTEAPLNPKANREKMT"
# "QIMFETFNTPAMYVAIQAVLSLYASGRTTGIVMDSGDGVTHTVPIYEGYALPHAILRLDL"
# "AGRDLTDYLMKILTERGYSFTTTAEREIVRDIKEKLCYVALDFEQEMATAASSSSLEKSY"
# "ELPDGQVITIGNERFRCPEALFQPSFLGMESCGIHETTFNSIMKCDVDIRKDLYANTVLS"
# "GGTTMYPGIADRMQKEITALAPSTMKIKIIAPPERKYSVWIGGSILASLSTFQQMWISKQ"
# "EYDESGPSIVHRKCF\n"
# )
# csv_contents = "\n".join(csv_contents.split()) + "\n"
# csv_file.write(csv_contents)
#
# _synth_dyts(source_folder)
#
# # DEBUGGING HINT: Comment out following to debug report only
# run_p(
# [
# f"gen",
# f"classify_v1",
# f"--sample=KidneyBiomarkers1",
# f"--job={job_folder}",
# f"--protein_csv={csv_file}",
# f"--label_set=C,DE",
# f"--protease=trypsin",
# f"--n_pres=1",
# f"--n_mocks=1",
# f"--n_edmans=2",
# f"--rf",
# f"--decoys=reverse",
# f"--force",
# f"--sigproc_source={source_folder}",
# ]
# )
#
# run_p(["run", job_folder, "--no_progress"])
def _before():
nonlocal test_folder
if test_folder is None:
test_folder = integration_before()
test_folder = local.path(test_folder)
def _after():
integration_after()
def it_runs_sigproc_v1():
# zest randomizes order so the following ensures order...
# WHEN DEBUGGING:
# * Set the test_folder to the folder that failed rather then None
# (indicated in the blue trace).
# * Comment out the following tests that you don't need to iterate on
# (Example, _it_runs_calib and _it_runs_sigproc_v2_with_calib has already passed
# and you just want to run _it_runs_sigclass over and over until it passes)
_it_runs_sigproc_v1()
# _it_runs_sigclass()
zest()
|
py
|
1a56478893c01d65069822bfa4ab5100275e468f
|
#!/usr/bin/env python3
import os
import subprocess
from typing import List, Optional
from functools import lru_cache
from common.basedir import BASEDIR
from selfdrive.swaglog import cloudlog
TESTED_BRANCHES = ['devel', 'release3-staging', 'dashcam3-staging', 'release3', 'dashcam3']
training_version: bytes = b"0.2.0"
terms_version: bytes = b"2"
def cache(user_function, /):
return lru_cache(maxsize=None)(user_function)
def run_cmd(cmd: List[str]) -> str:
return subprocess.check_output(cmd, encoding='utf8').strip()
def run_cmd_default(cmd: List[str], default: Optional[str] = None) -> Optional[str]:
try:
return run_cmd(cmd)
except subprocess.CalledProcessError:
return default
@cache
def get_commit(branch: str = "HEAD", default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", branch], default=default)
@cache
def get_short_branch(default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", "--abbrev-ref", "HEAD"], default=default)
@cache
def get_branch(default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"], default=default)
@cache
def get_origin(default: Optional[str] = None) -> Optional[str]:
try:
local_branch = run_cmd(["git", "name-rev", "--name-only", "HEAD"])
tracking_remote = run_cmd(["git", "config", "branch." + local_branch + ".remote"])
return run_cmd(["git", "config", "remote." + tracking_remote + ".url"])
except subprocess.CalledProcessError: # Not on a branch, fallback
return run_cmd_default(["git", "config", "--get", "remote.origin.url"], default=default)
@cache
def get_normalized_origin(default: Optional[str] = None) -> Optional[str]:
origin: Optional[str] = get_origin()
if origin is None:
return default
return origin.replace("git@", "", 1) \
.replace(".git", "", 1) \
.replace("https://", "", 1) \
.replace(":", "/", 1)
@cache
def get_version() -> str:
with open(os.path.join(BASEDIR, "common", "version.h")) as _versionf:
version = _versionf.read().split('"')[1]
return version
@cache
def get_short_version() -> str:
return get_version().split('-')[0] # type: ignore
@cache
def is_prebuilt() -> bool:
return os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
@cache
def is_comma_remote() -> bool:
# note to fork maintainers, this is used for release metrics. please do not
# touch this to get rid of the orange startup alert. there's better ways to do that
origin: Optional[str] = get_origin()
if origin is None:
return False
return origin.startswith('[email protected]:sunnyhaibin') or origin.startswith('https://github.com/sunnyhaibin')
@cache
def is_tested_branch() -> bool:
return get_short_branch() in TESTED_BRANCHES
@cache
def is_dirty() -> bool:
origin = get_origin()
branch = get_branch()
if (origin is None) or (branch is None):
return True
dirty = False
try:
# Actually check dirty files
if not is_prebuilt():
# This is needed otherwise touched files might show up as modified
try:
subprocess.check_call(["git", "update-index", "--refresh"])
except subprocess.CalledProcessError:
pass
dirty = (subprocess.call(["git", "diff-index", "--quiet", branch, "--"]) != 0)
except subprocess.CalledProcessError:
cloudlog.exception("git subprocess failed while checking dirty")
dirty = True
return dirty
if __name__ == "__main__":
from common.params import Params
params = Params()
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
print(f"Dirty: {is_dirty()}")
print(f"Version: {get_version()}")
print(f"Short version: {get_short_version()}")
print(f"Origin: {get_origin()}")
print(f"Normalized origin: {get_normalized_origin()}")
print(f"Branch: {get_branch()}")
print(f"Short branch: {get_short_branch()}")
print(f"Prebuilt: {is_prebuilt()}")
|
py
|
1a56486094bd260f089aeb65a5b8fbf54a2387bc
|
import time
import torch
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from archived.elasticache.Memcached import memcached_init
from archived.s3.get_object import get_object
from archived.s3 import put_object
from archived.old_model import LogisticRegression
from data_loader.libsvm_dataset import DenseDatasetWithLines
# lambda setting
grad_bucket = "async-grads"
model_bucket = "async-updates"
local_dir = "/tmp"
w_prefix = "w_"
b_prefix = "b_"
w_grad_prefix = "w_grad_"
b_grad_prefix = "b_grad_"
# algorithm setting
learning_rate = 0.1 #np.arange(0.09,0.15,0.01)
batch_size = 100000
num_epochs = 55
validation_ratio = .2
shuffle_dataset = True
random_seed = 42
def handler(event, context):
start_time = time.time()
bucket = event['bucket']
key = event['name']
num_features = event['num_features']
num_classes = event['num_classes']
elasti_location = event['elasticache']
endpoint = memcached_init(elasti_location)
print('bucket = {}'.format(bucket))
print('key = {}'.format(key))
key_splits = key.split("_")
worker_index = int(key_splits[0])
num_worker = event['num_files']
batch_size = 100000
batch_size = int(np.ceil(batch_size/num_worker))
torch.manual_seed(random_seed)
# read file(dataset) from s3
file = get_object(bucket, key).read().decode('utf-8').split("\n")
print("read data cost {} s".format(time.time() - start_time))
parse_start = time.time()
dataset = DenseDatasetWithLines(file, num_features)
preprocess_start = time.time()
print("libsvm operation cost {}s".format(parse_start - preprocess_start))
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
print("dataset size = {}".format(dataset_size))
indices = list(range(dataset_size))
split = int(np.floor(validation_ratio * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=valid_sampler)
print("preprocess data cost {} s".format(time.time() - preprocess_start))
model = LogisticRegression(num_features, num_classes)
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loss = []
test_loss = []
test_acc = []
total_time = 0
# Training the Model
epoch_start = time.time()
for epoch in range(num_epochs):
tmp_train = 0
for batch_index, (items, labels) in enumerate(train_loader):
#batch_start = time.time()
print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index))
items = Variable(items.view(-1, num_features))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(items)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
w = model.linear.weight.data.numpy()
b = model.linear.bias.data.numpy()
file_postfix = "{}_{}".format(batch_index,epoch)
#asynchronization / shuffle starts from that every worker writes their gradients of this batch and epoch
#upload individual gradient
if batch_index == 0 and epoch == 0:
hset_object(endpoint, model_bucket, w_prefix, w.tobytes())
hset_object(endpoint, model_bucket, b_prefix, b.tobytes())
time.sleep(0.0001)
#randomly get one from others. (Asynchronized)
w_new = np.fromstring(hget_object(endpoint, model_bucket, w_prefix), dtype=w.dtype).reshape(w.shape)
b_new = np.fromstring(hget_object(endpoint, model_bucket, b_prefix), dtype=b.dtype).reshape(b.shape)
else:
w_new = np.fromstring(hget_object(endpoint, model_bucket, w_prefix), dtype=w.dtype).reshape(w.shape)
b_new = np.fromstring(hget_object(endpoint, model_bucket, b_prefix), dtype=b.dtype).reshape(b.shape)
hset_object(endpoint, model_bucket, w_prefix, w.tobytes())
hset_object(endpoint, model_bucket, b_prefix, b.tobytes())
model.linear.weight.data = torch.from_numpy(w_new)
model.linear.bias.data = torch.from_numpy(b_new)
#report train loss and test loss for every mini batch
if (batch_index + 1) % 1 == 0:
print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
% (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size, loss.data))
tmp_train += loss.item()
total_time += time.time()-epoch_start
train_loss.append(tmp_train)
tmp_test, tmp_acc = test(model, validation_loader, criterion)
test_loss.append(tmp_test)
test_acc.append(tmp_acc)
epoch_start = time.time()
print("total time = {}".format(total_time))
end_time = time.time()
print("elapsed time = {} s".format(end_time - start_time))
loss_record = [test_loss, test_acc, train_loss, total_time]
put_object("async-model-loss", "async-loss{}".format(worker_index), pickle.dumps(loss_record))
def test(model, testloader, criterion):
# Test the Model
correct = 0
total = 0
total_loss = 0
count = 0
with torch.no_grad():
for items,labels in testloader:
outputs = model(items)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
loss = criterion(outputs,labels)
total_loss += loss.data
count = count+1
return total_loss/count, float(correct)/float(total)*100
|
py
|
1a564a87a5da4143ee06a4fc1227269e84044f2c
|
import pandas as pd
import numpy as np
import scipy.stats
from pandas import DataFrame
from typing import List
from blacksheep._constants import *
SampleList = List[str]
def _convert_to_outliers(
df: DataFrame, samples: SampleList, num_iqrs: float, up_or_down: str
) -> DataFrame:
"""Calls outliers on a given values table.
Args:
df: Input DataFrame with samples as columns and genes or sites as rows. \
Index should be an identifier for each row.
samples: List of samples to be considered in the distribution when defining median, \
IQR and outliers.
num_iqrs: How many inter-quartile ranges (IQRs) above or below the median to consider \
something an outlier.
up_or_down: Whether to call outliers above the median (up) or below the median (down)
Returns:
A DataFrame with outlier calls for each value. 0 means not an outlier; 1 means there is \
an outlier. Missing values are propagated.
"""
if num_iqrs <= 0:
raise ValueError("num_iqrs must be greater than 0")
df = df.copy()
df[row_iqr_name] = scipy.stats.iqr(df[samples], axis=1, nan_policy="omit")
df[row_median_name] = np.nanquantile(df[samples], q=0.5, axis=1)
outlier_df = pd.DataFrame()
if up_or_down == "up":
df[row_upper_bound_name] = df[row_median_name] + (num_iqrs * df[row_iqr_name])
outlier_df[samples] = (
df[samples].gt(df[row_upper_bound_name], axis=0).astype(int)
)
outlier_df[df[samples].isnull()] = np.nan
return outlier_df
elif up_or_down == "down":
df[row_lower_bound_name] = df[row_median_name] - (num_iqrs * df[row_iqr_name])
outlier_df[samples] = (
df[samples].lt(df[row_lower_bound_name], axis=0).astype(int)
)
outlier_df[df[samples].isnull()] = np.nan
return outlier_df
raise ValueError("up_or_down must be either 'up' or 'down'")
def _convert_to_counts(
df: DataFrame, samples: SampleList, aggregate: bool, ind_sep: str
) -> DataFrame:
"""Counts outliers and non-outlier values for each sample and each row (if aggregate=False)
or each unique identifier (if aggregate=True).
Args:
df: Outlier DataFrame from convertToOutliers function.
samples: List of samples to consider. Should be same list as input to convertToOutliers.
aggregate: Whether or not to collapse multiple rows with identical identifiers before \
a separater. Collapsing values is done by counting outliers and non-outliers for each \
sample per unique identifier.
ind_sep: The separator used in the index to separate a more general ID and less specific \
ID. e.g. in RAG2-S365 the separater is "-".
Returns:
Outlier DataFrame that has a column with counts of non-outliers and outliers for \
each sample, with a row for each input site (if no aggregation) or each unique identifier \
(with aggregation). This table is the input for the comparison function.
"""
not_outlier_cols = [x + col_seps + col_not_outlier_suffix for x in samples]
outlier_cols = [x + col_seps + col_outlier_suffix for x in samples]
if aggregate:
df.index = [ind.split(ind_sep)[0] for ind in df.index]
df_inv = df == 0
output_df = pd.DataFrame()
output_df[not_outlier_cols] = df_inv.groupby(level=0)[samples].sum()
output_df[outlier_cols] = df.groupby(level=0)[samples].sum()
elif not aggregate:
output_df = pd.DataFrame(index=df.index)
output_df[outlier_cols] = df[samples]
output_df[not_outlier_cols] = 1 - df[samples]
return output_df
|
py
|
1a564c3ca2af98512f03ae2a27c0118b43d1116d
|
from functools import partial
from inspect import signature
from itertools import product
from itertools import chain
from itertools import permutations
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_less
from sklearn.utils._testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import d2_tweedie_score
from sklearn.metrics import det_curve
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import max_error
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_tweedie_deviance
from sklearn.metrics import mean_poisson_deviance
from sklearn.metrics import mean_gamma_deviance
from sklearn.metrics import median_absolute_error
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics import mean_pinball_loss
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import zero_one_loss
from sklearn.metrics import ndcg_score
from sklearn.metrics import dcg_score
from sklearn.metrics import top_k_accuracy_score
from sklearn.metrics._base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics with common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"max_error": max_error,
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"mean_pinball_loss": mean_pinball_loss,
"median_absolute_error": median_absolute_error,
"mean_absolute_percentage_error": mean_absolute_percentage_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput="variance_weighted"),
"mean_normal_deviance": partial(mean_tweedie_deviance, power=0),
"mean_poisson_deviance": mean_poisson_deviance,
"mean_gamma_deviance": mean_gamma_deviance,
"mean_compound_poisson_deviance": partial(mean_tweedie_deviance, power=1.4),
"d2_tweedie_score": partial(d2_tweedie_score, power=1.4),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"adjusted_balanced_accuracy_score": partial(balanced_accuracy_score, adjusted=True),
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
# `confusion_matrix` returns absolute values and hence behaves unnormalized
# . Naming it with an unnormalized_ prefix is necessary for this module to
# skip sample_weight scaling checks which will fail for unnormalized
# metrics.
"unnormalized_confusion_matrix": confusion_matrix,
"normalized_confusion_matrix": lambda *args, **kwargs: (
confusion_matrix(*args, **kwargs).astype("float")
/ confusion_matrix(*args, **kwargs).sum(axis=1)[:, np.newaxis]
),
"unnormalized_multilabel_confusion_matrix": multilabel_confusion_matrix,
"unnormalized_multilabel_confusion_matrix_sample": partial(
multilabel_confusion_matrix, samplewise=True
),
"hamming_loss": hamming_loss,
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"jaccard_score": jaccard_score,
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"weighted_jaccard_score": partial(jaccard_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"micro_jaccard_score": partial(jaccard_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"macro_jaccard_score": partial(jaccard_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"samples_jaccard_score": partial(jaccard_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
def precision_recall_curve_padded_thresholds(*args, **kwargs):
"""
The dimensions of precision-recall pairs and the threshold array as
returned by the precision_recall_curve do not match. See
func:`sklearn.metrics.precision_recall_curve`
This prevents implicit conversion of return value triple to an higher
dimensional np.array of dtype('float64') (it will be of dtype('object)
instead). This again is needed for assert_array_equal to work correctly.
As a workaround we pad the threshold array with NaN values to match
the dimension of precision and recall arrays respectively.
"""
precision, recall, thresholds = precision_recall_curve(*args, **kwargs)
pad_threshholds = len(precision) - len(thresholds)
return np.array(
[
precision,
recall,
np.pad(
thresholds.astype(np.float64),
pad_width=(0, pad_threshholds),
mode="constant",
constant_values=[np.nan],
),
]
)
CURVE_METRICS = {
"roc_curve": roc_curve,
"precision_recall_curve": precision_recall_curve_padded_thresholds,
"det_curve": det_curve,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score, # default: average="macro"
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"ovr_roc_auc": partial(roc_auc_score, average="macro", multi_class="ovr"),
"weighted_ovr_roc_auc": partial(
roc_auc_score, average="weighted", multi_class="ovr"
),
"ovo_roc_auc": partial(roc_auc_score, average="macro", multi_class="ovo"),
"weighted_ovo_roc_auc": partial(
roc_auc_score, average="weighted", multi_class="ovo"
),
"partial_roc_auc": partial(roc_auc_score, max_fpr=0.5),
"average_precision_score": average_precision_score, # default: average="macro"
"weighted_average_precision_score": partial(
average_precision_score, average="weighted"
),
"samples_average_precision_score": partial(
average_precision_score, average="samples"
),
"micro_average_precision_score": partial(average_precision_score, average="micro"),
"label_ranking_average_precision_score": label_ranking_average_precision_score,
"ndcg_score": ndcg_score,
"dcg_score": dcg_score,
"top_k_accuracy_score": top_k_accuracy_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
ALL_METRICS.update(CURVE_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = {
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"samples_jaccard_score",
"coverage_error",
"unnormalized_multilabel_confusion_matrix_sample",
"label_ranking_loss",
"label_ranking_average_precision_score",
"dcg_score",
"ndcg_score",
}
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = {
"brier_score_loss",
"micro_roc_auc",
"samples_roc_auc",
"partial_roc_auc",
"roc_auc_score",
"weighted_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
"jaccard_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
# curves
"roc_curve",
"precision_recall_curve",
"det_curve",
}
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = METRIC_UNDEFINED_BINARY.union(
METRIC_UNDEFINED_MULTICLASS
)
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = {
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
"jaccard_score",
}
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = {
"roc_auc_score",
"average_precision_score",
"partial_roc_auc",
}
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = {
"roc_curve",
"precision_recall_curve",
"det_curve",
"brier_score_loss",
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
"jaccard_score",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
}
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = {
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"det_curve",
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
"jaccard_score",
"weighted_f0.5_score",
"weighted_f1_score",
"weighted_f2_score",
"weighted_precision_score",
"weighted_recall_score",
"weighted_jaccard_score",
"micro_f0.5_score",
"micro_f1_score",
"micro_f2_score",
"micro_precision_score",
"micro_recall_score",
"micro_jaccard_score",
"macro_f0.5_score",
"macro_f1_score",
"macro_f2_score",
"macro_precision_score",
"macro_recall_score",
"macro_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"unnormalized_multilabel_confusion_matrix_sample",
"cohen_kappa_score",
}
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = {
"accuracy_score",
"top_k_accuracy_score",
"zero_one_loss",
}
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = {
"log_loss",
"unnormalized_log_loss",
"roc_auc_score",
"weighted_roc_auc",
"samples_roc_auc",
"micro_roc_auc",
"partial_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"samples_average_precision_score",
"micro_average_precision_score",
"coverage_error",
"label_ranking_loss",
"ndcg_score",
"dcg_score",
"label_ranking_average_precision_score",
}
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = {
"accuracy_score",
"unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss",
"unnormalized_zero_one_loss",
"weighted_f0.5_score",
"weighted_f1_score",
"weighted_f2_score",
"weighted_precision_score",
"weighted_recall_score",
"weighted_jaccard_score",
"macro_f0.5_score",
"macro_f1_score",
"macro_f2_score",
"macro_precision_score",
"macro_recall_score",
"macro_jaccard_score",
"micro_f0.5_score",
"micro_f1_score",
"micro_f2_score",
"micro_precision_score",
"micro_recall_score",
"micro_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"samples_jaccard_score",
}
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = {
"mean_absolute_error",
"median_absolute_error",
"mean_squared_error",
"r2_score",
"explained_variance_score",
"mean_absolute_percentage_error",
"mean_pinball_loss",
}
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = {
"accuracy_score",
"unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss",
"unnormalized_zero_one_loss",
"micro_jaccard_score",
"macro_jaccard_score",
"jaccard_score",
"samples_jaccard_score",
"f1_score",
"micro_f1_score",
"macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score",
"micro_f1_score",
"micro_f2_score",
"micro_precision_score",
"micro_recall_score",
"matthews_corrcoef_score",
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"max_error",
# Pinball loss is only symmetric for alpha=0.5 which is the default.
"mean_pinball_loss",
"cohen_kappa_score",
"mean_normal_deviance",
}
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = {
"balanced_accuracy_score",
"adjusted_balanced_accuracy_score",
"explained_variance_score",
"r2_score",
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"det_curve",
"precision_score",
"recall_score",
"f2_score",
"f0.5_score",
"weighted_f0.5_score",
"weighted_f1_score",
"weighted_f2_score",
"weighted_precision_score",
"weighted_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"macro_f0.5_score",
"macro_f2_score",
"macro_precision_score",
"macro_recall_score",
"log_loss",
"hinge_loss",
"mean_gamma_deviance",
"mean_poisson_deviance",
"mean_compound_poisson_deviance",
"d2_tweedie_score",
"mean_absolute_percentage_error",
}
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = {
"median_absolute_error",
"max_error",
"ovo_roc_auc",
"weighted_ovo_roc_auc",
}
METRICS_REQUIRE_POSITIVE_Y = {
"mean_poisson_deviance",
"mean_gamma_deviance",
"mean_compound_poisson_deviance",
"d2_tweedie_score",
}
def _require_positive_targets(y1, y2):
"""Make targets strictly positive"""
offset = abs(min(y1.min(), y2.min())) + 1
y1 += offset
y2 += offset
return y1, y2
def test_symmetry_consistency():
# We shouldn't forget any metrics
assert (
SYMMETRIC_METRICS
| NOT_SYMMETRIC_METRICS
| set(THRESHOLDED_METRICS)
| METRIC_UNDEFINED_BINARY_MULTICLASS
) == set(ALL_METRICS)
assert (SYMMETRIC_METRICS & NOT_SYMMETRIC_METRICS) == set()
@pytest.mark.parametrize("name", sorted(SYMMETRIC_METRICS))
def test_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20,))
y_pred = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
y_true_bin = random_state.randint(0, 2, size=(20, 25))
y_pred_bin = random_state.randint(0, 2, size=(20, 25))
metric = ALL_METRICS[name]
if name in METRIC_UNDEFINED_BINARY:
if name in MULTILABELS_METRICS:
assert_allclose(
metric(y_true_bin, y_pred_bin),
metric(y_pred_bin, y_true_bin),
err_msg="%s is not symmetric" % name,
)
else:
assert False, "This case is currently unhandled"
else:
assert_allclose(
metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name,
)
@pytest.mark.parametrize("name", sorted(NOT_SYMMETRIC_METRICS))
def test_not_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20,))
y_pred = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
metric = ALL_METRICS[name]
# use context manager to supply custom error message
with pytest.raises(AssertionError):
assert_array_equal(metric(y_true, y_pred), metric(y_pred, y_true))
raise ValueError("%s seems to be symmetric" % name)
@pytest.mark.parametrize(
"name", sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
)
def test_sample_order_invariance(name):
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20,))
y_pred = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
with ignore_warnings():
metric = ALL_METRICS[name]
assert_allclose(
metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name,
)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(
y_true, y_pred, y_score, random_state=0
)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_allclose(
metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name,
)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_allclose(
metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name,
)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_allclose(
metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name,
)
assert_allclose(
metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name,
)
@pytest.mark.parametrize(
"name", sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
)
def test_format_invariance_with_1d_vectors(name):
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20,))
y2 = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y1, y2 = _require_positive_targets(y1, y2)
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_array_equal(y1_1d.ndim, 1)
assert_array_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
with ignore_warnings():
metric = ALL_METRICS[name]
measure = metric(y1, y2)
assert_allclose(
metric(y1_list, y2_list),
measure,
err_msg="%s is not representation invariant with list" % name,
)
assert_allclose(
metric(y1_1d, y2_1d),
measure,
err_msg="%s is not representation invariant with np-array-1d" % name,
)
assert_allclose(
metric(y1_column, y2_column),
measure,
err_msg="%s is not representation invariant with np-array-column" % name,
)
# Mix format support
assert_allclose(
metric(y1_1d, y2_list),
measure,
err_msg="%s is not representation invariant with mix np-array-1d and list"
% name,
)
assert_allclose(
metric(y1_list, y2_1d),
measure,
err_msg="%s is not representation invariant with mix np-array-1d and list"
% name,
)
assert_allclose(
metric(y1_1d, y2_column),
measure,
err_msg=(
"%s is not representation invariant with mix "
"np-array-1d and np-array-column"
)
% name,
)
assert_allclose(
metric(y1_column, y2_1d),
measure,
err_msg=(
"%s is not representation invariant with mix "
"np-array-1d and np-array-column"
)
% name,
)
assert_allclose(
metric(y1_list, y2_column),
measure,
err_msg=(
"%s is not representation invariant with mix list and np-array-column"
)
% name,
)
assert_allclose(
metric(y1_column, y2_list),
measure,
err_msg=(
"%s is not representation invariant with mix list and np-array-column"
)
% name,
)
# These mix representations aren't allowed
with pytest.raises(ValueError):
metric(y1_1d, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_1d)
with pytest.raises(ValueError):
metric(y1_list, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_list)
with pytest.raises(ValueError):
metric(y1_column, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if name not in (
MULTIOUTPUT_METRICS | THRESHOLDED_MULTILABEL_METRICS | MULTILABELS_METRICS
):
with pytest.raises(ValueError):
metric(y1_row, y2_row)
@pytest.mark.parametrize(
"name", sorted(set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
)
def test_classification_invariance_string_vs_numbers_labels(name):
# Ensure that classification metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20,))
y2 = random_state.randint(0, 2, size=(20,))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
with ignore_warnings():
metric = CLASSIFICATION_METRICS[name]
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(
measure_with_number,
measure_with_str,
err_msg="{0} failed string vs number invariance test".format(name),
)
measure_with_strobj = metric_str(y1_str.astype("O"), y2_str.astype("O"))
assert_array_equal(
measure_with_number,
measure_with_strobj,
err_msg="{0} failed string object vs number invariance test".format(name),
)
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(
measure_with_number,
measure_with_str,
err_msg="{0} failed string vs number invariance test".format(name),
)
measure_with_strobj = metric_str(y1_str.astype("O"), y2_str.astype("O"))
assert_array_equal(
measure_with_number,
measure_with_strobj,
err_msg="{0} failed string vs number invariance test".format(name),
)
@pytest.mark.parametrize("name", THRESHOLDED_METRICS)
def test_thresholded_invariance_string_vs_numbers_labels(name):
# Ensure that thresholded metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20,))
y2 = random_state.randint(0, 2, size=(20,))
y1_str = np.array(["eggs", "spam"])[y1]
pos_label_str = "spam"
with ignore_warnings():
metric = THRESHOLDED_METRICS[name]
if name not in METRIC_UNDEFINED_BINARY:
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(
measure_with_number,
measure_with_str,
err_msg="{0} failed string vs number invariance test".format(name),
)
measure_with_strobj = metric_str(y1_str.astype("O"), y2)
assert_array_equal(
measure_with_number,
measure_with_strobj,
err_msg="{0} failed string object vs number invariance test".format(
name
),
)
else:
# TODO those metrics doesn't support string label yet
with pytest.raises(ValueError):
metric(y1_str, y2)
with pytest.raises(ValueError):
metric(y1_str.astype("O"), y2)
invalids_nan_inf = [
([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf]),
([0, 1], [np.inf, 1]),
([0, 1], [np.nan, 1]),
]
@pytest.mark.parametrize(
"metric", chain(THRESHOLDED_METRICS.values(), REGRESSION_METRICS.values())
)
@pytest.mark.parametrize("y_true, y_score", invalids_nan_inf)
def test_regression_thresholded_inf_nan_input(metric, y_true, y_score):
with pytest.raises(ValueError, match="contains NaN, infinity"):
metric(y_true, y_score)
@pytest.mark.parametrize("metric", CLASSIFICATION_METRICS.values())
@pytest.mark.parametrize(
"y_true, y_score",
invalids_nan_inf +
# Add an additional case for classification only
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/6809
[([np.nan, 1, 2], [1, 2, 3])], # type: ignore
)
def test_classification_inf_nan_input(metric, y_true, y_score):
"""check that classification metrics raise a message mentioning the
occurrence of non-finite values in the target vectors."""
err_msg = "Input contains NaN, infinity or a value too large"
with pytest.raises(ValueError, match=err_msg):
metric(y_true, y_score)
@pytest.mark.parametrize("metric", CLASSIFICATION_METRICS.values())
def test_classification_binary_continuous_input(metric):
"""check that classification metrics raise a message of mixed type data
with continuous/binary target vectors."""
y_true, y_score = ["a", "b", "a"], [0.1, 0.2, 0.3]
err_msg = (
"Classification metrics can't handle a mix of binary and continuous targets"
)
with pytest.raises(ValueError, match=err_msg):
metric(y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
if name in METRICS_REQUIRE_POSITIVE_Y:
values = [1, 2]
else:
values = [0, 1]
for i, j in product(values, repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
@pytest.mark.parametrize(
"name",
sorted(
set(ALL_METRICS)
# Those metrics are not always defined with one sample
# or in multiclass classification
- METRIC_UNDEFINED_BINARY_MULTICLASS
- set(THRESHOLDED_METRICS)
),
)
def test_single_sample(name):
check_single_sample(name)
@pytest.mark.parametrize("name", sorted(MULTIOUTPUT_METRICS | MULTILABELS_METRICS))
def test_single_sample_multioutput(name):
check_single_sample_multioutput(name)
@pytest.mark.parametrize("name", sorted(MULTIOUTPUT_METRICS))
def test_multioutput_number_of_output_differ(name):
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
metric = ALL_METRICS[name]
with pytest.raises(ValueError):
metric(y_true, y_pred)
@pytest.mark.parametrize("name", sorted(MULTIOUTPUT_METRICS))
def test_multioutput_regression_invariance_to_dimension_shuffling(name):
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_allclose(
metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling invariant" % (name),
)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(
n_features=1,
n_classes=n_classes,
random_state=0,
n_samples=n_samples,
allow_unlabeled=True,
)
_, y2 = make_multilabel_classification(
n_features=1,
n_classes=n_classes,
random_state=1,
n_samples=n_samples,
allow_unlabeled=True,
)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
y1_list_array_indicator = list(y1)
y2_list_array_indicator = list(y2)
y1_list_list_indicator = [list(a) for a in y1_list_array_indicator]
y2_list_list_indicator = [list(a) for a in y2_list_array_indicator]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = "tmp"
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_allclose(
metric(y1_sparse_indicator, y2_sparse_indicator),
measure,
err_msg=(
"%s failed representation invariance between "
"dense and sparse indicator formats."
)
% name,
)
assert_almost_equal(
metric(y1_list_list_indicator, y2_list_list_indicator),
measure,
err_msg=(
"%s failed representation invariance "
"between dense array and list of list "
"indicator formats."
)
% name,
)
assert_almost_equal(
metric(y1_list_array_indicator, y2_list_array_indicator),
measure,
err_msg=(
"%s failed representation invariance "
"between dense and list of array "
"indicator formats."
)
% name,
)
@pytest.mark.parametrize("name", sorted(MULTILABELS_METRICS))
def test_raise_value_error_multilabel_sequences(name):
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype="object"),
]
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
with pytest.raises(ValueError):
metric(seq, seq)
@pytest.mark.parametrize("name", sorted(METRICS_WITH_NORMALIZE_OPTION))
def test_normalize_option_binary_classification(name):
# Test in the binary case
n_classes = 2
n_samples = 20
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples,))
y_pred = random_state.randint(0, n_classes, size=(n_samples,))
y_score = random_state.normal(size=y_true.shape)
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(
-1.0 * measure_normalized,
0,
err_msg="We failed to test correctly the normalize option",
)
assert_allclose(
measure_normalized,
measure_not_normalized / n_samples,
err_msg=f"Failed with {name}",
)
@pytest.mark.parametrize("name", sorted(METRICS_WITH_NORMALIZE_OPTION))
def test_normalize_option_multiclass_classification(name):
# Test in the multiclass case
n_classes = 4
n_samples = 20
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples,))
y_pred = random_state.randint(0, n_classes, size=(n_samples,))
y_score = random_state.uniform(size=(n_samples, n_classes))
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(
-1.0 * measure_normalized,
0,
err_msg="We failed to test correctly the normalize option",
)
assert_allclose(
measure_normalized,
measure_not_normalized / n_samples,
err_msg=f"Failed with {name}",
)
@pytest.mark.parametrize(
"name", sorted(METRICS_WITH_NORMALIZE_OPTION.intersection(MULTILABELS_METRICS))
)
def test_normalize_option_multilabel_classification(name):
# Test in the multilabel case
n_classes = 4
n_samples = 100
random_state = check_random_state(0)
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(
n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples,
)
_, y_pred = make_multilabel_classification(
n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples,
)
y_score = random_state.uniform(size=y_true.shape)
# To make sure at least one empty label is present
y_true += [0] * n_classes
y_pred += [0] * n_classes
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(
-1.0 * measure_normalized,
0,
err_msg="We failed to test correctly the normalize option",
)
assert_allclose(
measure_normalized,
measure_not_normalized / n_samples,
err_msg=f"Failed with {name}",
)
@ignore_warnings
def _check_averaging(
metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel
):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_allclose(
label_measure,
[
metric(y_true_binarize[:, i], y_pred_binarize[:, i])
for i in range(n_classes)
],
)
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_allclose(
micro_measure, metric(y_true_binarize.ravel(), y_pred_binarize.ravel())
)
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_allclose(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure, np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_allclose(
sample_measure,
np.mean(
[
metric(y_true_binarize[i], y_pred_binarize[i])
for i in range(n_samples)
]
),
)
with pytest.raises(ValueError):
metric(y_true, y_pred, average="unknown")
with pytest.raises(ValueError):
metric(y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(
metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel
)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(
metric, y_true, y_score, y_true_binarize, y_score, is_multilabel
)
else:
raise ValueError("Metric is not recorded as having an average option")
@pytest.mark.parametrize("name", sorted(METRICS_WITH_AVERAGING))
def test_averaging_multiclass(name):
n_samples, n_classes = 50, 3
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples,))
y_pred = random_state.randint(0, n_classes, size=(n_samples,))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize(
"name", sorted(METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING)
)
def test_averaging_multilabel(name):
n_samples, n_classes = 40, 5
_, y = make_multilabel_classification(
n_features=1,
n_classes=n_classes,
random_state=5,
n_samples=n_samples,
allow_unlabeled=False,
)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize("name", sorted(METRICS_WITH_AVERAGING))
def test_averaging_multilabel_all_zeroes(name):
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_binary_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
# Test _average_binary_score for weight.sum() == 0
binary_metric = lambda y_true, y_score, average="macro": _average_binary_score(
precision_score, y_true, y_score, average
)
_check_averaging(
binary_metric,
y_true,
y_pred,
y_true_binarize,
y_pred_binarize,
is_multilabel=True,
)
@pytest.mark.parametrize("name", sorted(METRICS_WITH_AVERAGING))
def test_averaging_multilabel_all_ones(name):
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# top_k_accuracy_score always lead to a perfect score for k > 1 in the
# binary case
metric = partial(metric, k=1) if name == "top_k_accuracy_score" else metric
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_allclose(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to sample_weight=ones"
% name,
)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
# use context manager to supply custom error message
with pytest.raises(AssertionError):
assert_allclose(unweighted_score, weighted_score)
raise ValueError(
"Unweighted and weighted scores are unexpectedly "
"almost equal (%s) and (%s) "
"for %s" % (unweighted_score, weighted_score, name)
)
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2, sample_weight=sample_weight.tolist())
assert_allclose(
weighted_score,
weighted_score_list,
err_msg=(
"Weighted scores for array and list "
"sample_weight input are not equal (%s != %s) for %s"
)
% (weighted_score, weighted_score_list, name),
)
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0),
sample_weight=None,
)
assert_allclose(
weighted_score,
repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name,
)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(
y1_subset, y2_subset, sample_weight=sample_weight_subset
)
weighted_score_zeroed = metric(y1, y2, sample_weight=sample_weight_zeroed)
assert_allclose(
weighted_score_subset,
weighted_score_zeroed,
err_msg=(
"Zeroing weights does not give the same result as "
"removing the corresponding samples (%s != %s) for %s"
)
% (weighted_score_zeroed, weighted_score_subset, name),
)
if not name.startswith("unnormalized"):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_allclose(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant under scaling" % name,
)
# Check that if number of samples in y_true and sample_weight are not
# equal, meaningful error is raised.
error_message = (
r"Found input variables with inconsistent numbers of "
r"samples: \[{}, {}, {}\]".format(
_num_samples(y1), _num_samples(y2), _num_samples(sample_weight) * 2
)
)
with pytest.raises(ValueError, match=error_message):
metric(y1, y2, sample_weight=np.hstack([sample_weight, sample_weight]))
@pytest.mark.parametrize(
"name",
sorted(
set(ALL_METRICS).intersection(set(REGRESSION_METRICS))
- METRICS_WITHOUT_SAMPLE_WEIGHT
),
)
def test_regression_sample_weight_invariance(name):
n_samples = 50
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
"name",
sorted(
set(ALL_METRICS)
- set(REGRESSION_METRICS)
- METRICS_WITHOUT_SAMPLE_WEIGHT
- METRIC_UNDEFINED_BINARY
),
)
def test_binary_sample_weight_invariance(name):
# binary
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples,))
y_pred = random_state.randint(0, 2, size=(n_samples,))
y_score = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
"name",
sorted(
set(ALL_METRICS)
- set(REGRESSION_METRICS)
- METRICS_WITHOUT_SAMPLE_WEIGHT
- METRIC_UNDEFINED_BINARY_MULTICLASS
),
)
def test_multiclass_sample_weight_invariance(name):
# multiclass
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples,))
y_pred = random_state.randint(0, 5, size=(n_samples,))
y_score = random_state.random_sample(size=(n_samples, 5))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
# softmax
temp = np.exp(-y_score)
y_score_norm = temp / temp.sum(axis=-1).reshape(-1, 1)
check_sample_weight_invariance(name, metric, y_true, y_score_norm)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
"name",
sorted(
(MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS)
- METRICS_WITHOUT_SAMPLE_WEIGHT
),
)
def test_multilabel_sample_weight_invariance(name):
# multilabel indicator
random_state = check_random_state(0)
_, ya = make_multilabel_classification(
n_features=1, n_classes=10, random_state=0, n_samples=50, allow_unlabeled=False
)
_, yb = make_multilabel_classification(
n_features=1, n_classes=10, random_state=1, n_samples=50, allow_unlabeled=False
)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [
[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel],
]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
@pytest.mark.parametrize(
"name", sorted(MULTILABELS_METRICS - {"unnormalized_multilabel_confusion_matrix"})
)
def test_multilabel_label_permutations_invariance(name):
random_state = check_random_state(0)
n_samples, n_classes = 20, 4
y_true = random_state.randint(0, 2, size=(n_samples, n_classes))
y_score = random_state.randint(0, 2, size=(n_samples, n_classes))
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
y_score_perm = y_score[:, perm]
y_true_perm = y_true[:, perm]
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
@pytest.mark.parametrize(
"name", sorted(THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS)
)
def test_thresholded_multilabel_multioutput_permutations_invariance(name):
random_state = check_random_state(0)
n_samples, n_classes = 20, 4
y_true = random_state.randint(0, 2, size=(n_samples, n_classes))
y_score = random_state.normal(size=y_true.shape)
# Makes sure all samples have at least one label. This works around errors
# when running metrics where average="sample"
y_true[y_true.sum(1) == 4, 0] = 0
y_true[y_true.sum(1) == 0, 0] = 1
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
y_score_perm = y_score[:, perm]
y_true_perm = y_true[:, perm]
current_score = metric(y_true_perm, y_score_perm)
if metric == mean_absolute_percentage_error:
assert np.isfinite(current_score)
assert current_score > 1e6
# Here we are not comparing the values in case of MAPE because
# whenever y_true value is exactly zero, the MAPE value doesn't
# signify anything. Thus, in this case we are just expecting
# very large finite value.
else:
assert_almost_equal(score, current_score)
@pytest.mark.parametrize(
"name", sorted(set(THRESHOLDED_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
)
def test_thresholded_metric_permutation_invariance(name):
n_samples, n_classes = 100, 3
random_state = check_random_state(0)
y_score = random_state.rand(n_samples, n_classes)
temp = np.exp(-y_score)
y_score = temp / temp.sum(axis=-1).reshape(-1, 1)
y_true = random_state.randint(0, n_classes, size=n_samples)
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
inverse_perm = np.zeros(n_classes, dtype=int)
inverse_perm[list(perm)] = np.arange(n_classes)
y_score_perm = y_score[:, inverse_perm]
y_true_perm = np.take(perm, y_true)
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
@pytest.mark.parametrize("metric_name", CLASSIFICATION_METRICS)
def test_metrics_consistent_type_error(metric_name):
# check that an understable message is raised when the type between y_true
# and y_pred mismatch
rng = np.random.RandomState(42)
y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=object)
y2 = rng.randint(0, 2, size=y1.size)
err_msg = "Labels in y_true and y_pred should be of the same type."
with pytest.raises(TypeError, match=err_msg):
CLASSIFICATION_METRICS[metric_name](y1, y2)
@pytest.mark.parametrize(
"metric, y_pred_threshold",
[
(average_precision_score, True),
(brier_score_loss, True),
(f1_score, False),
(partial(fbeta_score, beta=1), False),
(jaccard_score, False),
(precision_recall_curve, True),
(precision_score, False),
(recall_score, False),
(roc_curve, True),
],
)
@pytest.mark.parametrize("dtype_y_str", [str, object])
def test_metrics_pos_label_error_str(metric, y_pred_threshold, dtype_y_str):
# check that the error message if `pos_label` is not specified and the
# targets is made of strings.
rng = np.random.RandomState(42)
y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=dtype_y_str)
y2 = rng.randint(0, 2, size=y1.size)
if not y_pred_threshold:
y2 = np.array(["spam", "eggs"], dtype=dtype_y_str)[y2]
err_msg_pos_label_None = (
"y_true takes value in {'eggs', 'spam'} and pos_label is not "
"specified: either make y_true take value in {0, 1} or {-1, 1} or "
"pass pos_label explicit"
)
err_msg_pos_label_1 = (
r"pos_label=1 is not a valid label. It should be one of " r"\['eggs', 'spam'\]"
)
pos_label_default = signature(metric).parameters["pos_label"].default
err_msg = err_msg_pos_label_1 if pos_label_default == 1 else err_msg_pos_label_None
with pytest.raises(ValueError, match=err_msg):
metric(y1, y2)
|
py
|
1a564c419ff25496a11840753331abb5bfadc588
|
from sqlalchemy import Column, ForeignKey, Integer
from app.db.settings import Base
class ModelAssignment(Base):
__tablename__ = "assignments"
user_id = Column(Integer, ForeignKey("users.id"), primary_key=True)
task_id = Column(Integer, ForeignKey("tasks.id"), primary_key=True)
|
py
|
1a564d58800699cafb3cea02ab789666465ffb91
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Federated version."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = '0.9.0'
|
py
|
1a564e55e3a271f1369c3d16412ce642a79acdec
|
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from oslo.config import cfg
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.plugins.common import base
from cloudbaseinit.plugins.common import constants
from cloudbaseinit.utils import crypt
opts = [
cfg.BoolOpt('inject_user_password', default=True, help='Set the password '
'provided in the configuration. If False or no password is '
'provided, a random one will be set'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.import_opt('username', 'cloudbaseinit.plugins.common.createuser')
LOG = logging.getLogger(__name__)
class SetUserPasswordPlugin(base.BasePlugin):
def _encrypt_password(self, ssh_pub_key, password):
cm = crypt.CryptManager()
with cm.load_ssh_rsa_public_key(ssh_pub_key) as rsa:
enc_password = rsa.public_encrypt(password.encode())
return base64.b64encode(enc_password)
def _get_ssh_public_key(self, service):
public_keys = service.get_public_keys()
if public_keys:
return list(public_keys)[0]
def _get_password(self, service, shared_data):
injected = False
if CONF.inject_user_password:
password = service.get_admin_password()
else:
password = None
if password:
injected = True
LOG.warn('Using admin_pass metadata user password. Consider '
'changing it as soon as possible')
else:
password = shared_data.get(constants.SHARED_DATA_PASSWORD)
return password, injected
def _set_metadata_password(self, password, service):
if service.is_password_set:
LOG.debug('User\'s password already set in the instance metadata '
'and it cannot be updated in the instance metadata')
return True
else:
ssh_pub_key = self._get_ssh_public_key(service)
if ssh_pub_key:
enc_password_b64 = self._encrypt_password(ssh_pub_key,
password)
return service.post_password(enc_password_b64)
else:
LOG.info('No SSH public key available for password encryption')
return True
def _set_password(self, service, osutils, user_name, shared_data):
"""Change the password for the received username if it is required.
The used password can be the one received from the metadata provider,
if it does exist, or a random one will be generated.
.. notes:
This method has a different behaviour depending on the value of
:meth:`~can_update password` if this is True the password will
be set only if the :meth:`~is_password_changed` is also True.
"""
if service.can_update_password and not service.is_password_changed():
LOG.info('Updating password is not required.')
return None
password, injected = self._get_password(service, shared_data)
if not password:
LOG.debug('Generating a random user password')
maximum_length = osutils.get_maximum_password_length()
password = osutils.generate_random_password(
maximum_length)
osutils.set_user_password(user_name, password)
self.post_set_password(user_name, password,
password_injected=injected)
return password
def post_set_password(self, username, password, password_injected=False):
"""Executes post set password logic.
This is called by :meth:`execute` after the password was set.
"""
def execute(self, service, shared_data):
# TODO(alexpilotti): The username selection logic must be set in the
# CreateUserPlugin instead if using CONF.username
user_name = shared_data.get(constants.SHARED_DATA_USERNAME,
CONF.username)
osutils = osutils_factory.get_os_utils()
if osutils.user_exists(user_name):
password = self._set_password(service, osutils,
user_name, shared_data)
if password:
LOG.info('Password succesfully updated for user %s' %
user_name)
# TODO(alexpilotti): encrypt with DPAPI
shared_data[constants.SHARED_DATA_PASSWORD] = password
if not service.can_post_password:
LOG.info('Cannot set the password in the metadata as it '
'is not supported by this service')
else:
self._set_metadata_password(password, service)
if service.can_update_password:
# If the metadata provider can update the password, the plugin
# must run at every boot in order to update the password if
# it was changed.
return base.PLUGIN_EXECUTE_ON_NEXT_BOOT, False
else:
return base.PLUGIN_EXECUTION_DONE, False
|
py
|
1a564eef6e456df37af4ea2a695d9d5f986cff2d
|
"""A class to represent a chemical species."""
# The MIT License (MIT)
#
# Copyright (c) 2018 Institute for Molecular Systems Biology, ETH Zurich.
# Copyright (c) 2019 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from typing import (
Dict,
TypeVar
)
from logging import (
Logger,
getLogger
)
from copy import deepcopy
from chemlite import Compound
from rplibs.rpObject import rpObject
class rpCompound(Compound, rpObject):
"""A class to implement a chemical species
enriched with FBA and thermodynamics informations.
"""
__thermo_str = 'standard_dg_formation'
__fba_str = 'shadow_price'
def __init__(
self,
id: str,
smiles: str = '',
inchi: str = '',
inchikey: str = '',
formula: str = '',
name: str = '',
compartment_id: str = 'c',
logger: Logger = getLogger(__name__)
):
"""Create a rpCompound object with default settings.
Parameters
----------
id: str
smiles: str, optional
inchi: str, optional
inchikey: str, optional
formula: str, optional
name: str, optional
compartment_id: str, optional
logger : Logger, optional
"""
Compound.__init__(
self,
id=id,
smiles=smiles,
inchi=inchi,
inchikey=inchikey,
formula=formula,
name=name,
logger=logger
)
rpObject.__init__(self)
self.set_compartment(compartment_id)
def _to_dict(
self,
full: bool = True
) -> Dict:
"""Get attributes as a dictionary.
Parameters
----------
full: bool, optional
If set to False, the returned dictionary will not
contain attributes inherited from Compound class
(default: True).
"""
if full:
return {
**Compound._to_dict(self),
**rpObject._to_dict(self),
**self.__to_dict()
}
else:
return {
**self.__to_dict(),
**rpObject._to_dict(self)
}
def __to_dict(self) -> Dict:
"""Returns a dictionary which contains attributes
only from rpCompound class excluding inherited ones."""
return {
# 'compartment': self.get_compartment()
}
# def __eq__(self, other) -> bool:
# """Returns the equality between two rpCompound objects."""
# return super(Compound, self).__eq__(other)
def get_thermo_standard_dg_formation(self) -> TypeVar:
"""Get thermodynamics dG formation cost."""
return self.get_thermo_info(rpCompound.__thermo_str)
def get_fba_biomass_shadow_price(self) -> TypeVar:
"""Get flux shadow price during biomass production."""
return self.get_fba_info(f'biomass_{rpCompound.__fba_str}')
def get_fba_fraction_shadow_price(self) -> TypeVar:
"""Get flux shadow price during fraction of reaction analysis."""
return self.get_fba_info(f'fraction_{rpCompound.__fba_str}')
def get_fba_fba_shadow_price(self) -> TypeVar:
"""Get flux shadow price during balance analysis."""
return self.get_fba_info(f'fba_{rpCompound.__fba_str}')
def get_fba_pfba_shadow_price(self) -> TypeVar:
"""Get flux shadow price during parcimonious balance analysis."""
return self.get_fba_info(f'pfba_{rpCompound.__fba_str}')
def get_compartment(self) -> str:
"""Get compound compartment ID."""
return self.__compartment
def set_thermo_standard_dg_formation(self, value: float) -> None:
"""Set dG formation cost.
Parameters
----------
value: float
"""
self.add_thermo_info(rpCompound.__thermo_str, value)
def set_fba_biomass_shadow_price(self, value: float) -> None:
"""Set flux shadow price during biomass production.
Parameters
----------
value: float
"""
self.add_fba_info(f'biomass_{rpCompound.__fba_str}', value)
def set_fba_fraction_shadow_price(self, value: float) -> None:
"""Set flux shadow price during fraction of reaction analysis.
Parameters
----------
value: float
"""
self.add_fba_info(f'fraction_{rpCompound.__fba_str}', value)
def set_fba_fba_shadow_price(self, value: float) -> None:
"""Set flux shadow price during balance analysis..
Parameters
----------
value: float
"""
self.add_fba_info(f'fba_{rpCompound.__fba_str}', value)
def set_fba_pfba_shadow_price(self, value: float) -> None:
"""Set flux shadow price during parcimonious balance analysis.
Parameters
----------
value: float
"""
self.add_fba_info(f'pfba_{rpCompound.__fba_str}', value)
def set_compartment(self, compartment: str) -> None:
"""Set compartment ID of the compound.
Parameters
----------
compartment: str
"""
self.__compartment = compartment
@staticmethod
def from_compound(
compound:Compound,
compartment_id:str='c',
logger: Logger = getLogger(__name__)
) -> 'rpCompound':
'''Create a rpCompound object from a Compound object
:param compound: A Compound object
:param compartment_id: A compartment id (Default: 'c')
:param logger: A logging object (Default: create one)
:type compound: Compound
:type compartment_id: str
:type logger: logging
:return: An rpCompound object
:rtype: rpCompound
'''
return rpCompound(
id=compound.get_id(),
smiles=compound.get_smiles(),
inchi=compound.get_inchi(),
inchikey=compound.get_inchikey(),
formula=compound.get_formula(),
name=compound.get_name(),
compartment_id=compartment_id,
logger=logger
)
|
py
|
1a564f496530c85118cb945044272ea143456fbb
|
from setuptools import setup, find_packages
import codecs
import os
def get_lookup():
"""get version by way of sourcecred.version, returns a
lookup dictionary with several global variables without
needing to import singularity
"""
lookup = dict()
version_file = os.path.join("sourcecred", "version.py")
with open(version_file) as filey:
exec(filey.read(), lookup)
return lookup
# Read in requirements
def get_reqs(lookup=None, key="INSTALL_REQUIRES"):
"""get requirements, mean reading in requirements and versions from
the lookup obtained with get_lookup
"""
if lookup == None:
lookup = get_lookup()
install_requires = []
for module in lookup[key]:
module_name = module[0]
module_meta = module[1]
if "exact_version" in module_meta:
dependency = "%s==%s" % (module_name, module_meta["exact_version"])
elif "min_version" in module_meta:
if module_meta["min_version"] == None:
dependency = module_name
else:
dependency = "%s>=%s" % (module_name, module_meta["min_version"])
install_requires.append(dependency)
return install_requires
# Make sure everything is relative to setup.py
install_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(install_path)
# Get version information from the lookup
lookup = get_lookup()
VERSION = lookup["__version__"]
NAME = lookup["NAME"]
AUTHOR = lookup["AUTHOR"]
AUTHOR_EMAIL = lookup["AUTHOR_EMAIL"]
PACKAGE_URL = lookup["PACKAGE_URL"]
KEYWORDS = lookup["KEYWORDS"]
DESCRIPTION = lookup["DESCRIPTION"]
LICENSE = lookup["LICENSE"]
with open("README.md") as filey:
LONG_DESCRIPTION = filey.read()
################################################################################
# MAIN #########################################################################
################################################################################
if __name__ == "__main__":
INSTALL_REQUIRES = get_reqs(lookup)
TESTS_REQUIRES = get_reqs(lookup, "TESTS_REQUIRES")
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=AUTHOR,
maintainer_email=AUTHOR_EMAIL,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
url=PACKAGE_URL,
license=LICENSE,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
keywords=KEYWORDS,
setup_requires=["pytest-runner"],
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRES,
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Operating System :: Unix",
"Programming Language :: Python :: 3.7",
],
entry_points={"console_scripts": ["sourcecred=sourcecred.cli:main"]},
)
|
py
|
1a564f934b3f6dec1901c6f36e70d4d471a884fe
|
# path: lib/elements/
# filename: markdown_.py
# description: Markdown support for docs
'''
# make python2 strings and dictionaries behave like python3
from __future__ import unicode_literals
try:
from builtins import dict, str
except ImportError:
from __builtin__ import dict, str
Copyright 2017 Mark Madere
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
''' external imports
'''
import markdown
''' internal imports
'''
import classes.element
class Markdown(classes.element.Element):
def render(self):
extensions = self.conf.get('extensions',[])
return markdown.markdown(self.conf.get('value',''), extensions=extensions)
|
py
|
1a564f9f8c341e3983cb90adc864ed908963598c
|
from random import randint
import codecs
import time
compteur = False
def traitement(mot):
liste = []
for x in range(1, len(mot)-1):
liste.append(mot[x])
res = mot
while res == mot:
alea = []
cp_liste = liste.copy()
n = None
for x in range(0, len(cp_liste)):
n = randint(0, len(cp_liste)-1)
alea.append(cp_liste[n])
cp_liste.remove(cp_liste[n])
alea = ''.join(alea)
res = mot[0]+alea+mot[-1]
return res
sortie = []
i = 0
print(f"Traitement en cours...")
try:
if compteur:
print("")
fichier = codecs.open("input.txt", "r", "utf-8")
texte = fichier.read()
texte2 = texte
lignes = texte.split("\n")
nb_mots = len(texte2.split(" ")) + len(lignes) - 1
fichier.close()
tps1 = time.time()
for x in range(0, len(lignes)):
lignes[x] = lignes[x].split(" ")
sortie_lignes = []
for mot in lignes[x]:
if len(mot.split("-")) > 1:
tirets = []
for tiret in mot.split("-"):
if len(tiret) >= 4:
tirets.append(f"{traitement(tiret)}")
else:
tirets.append(f"{tiret}")
tirets = '-'.join(tirets)
sortie.append(tirets)
else:
if len(mot) >= 4:
sortie.append(traitement(mot))
else:
sortie.append(mot)
if compteur:
i = i + 1
p = "{:06.2F}".format(i * 100 / nb_mots)
print(f"[{p}%] {i} sur {nb_mots} Traité")
sortie.append("\n")
tps2 = time.time()
with codecs.open("output.txt","w", "utf-8") as fichier:
for x in range(0, len(lignes)):
sortie_lignes = " ".join(sortie).split("\n")
sortie_lignes[x] = sortie_lignes[x].strip()
fichier.write(f"{sortie_lignes[x]}\n")
fichier.close()
print(f"\n {nb_mots} mots en {(tps2 - tps1)} seconde(s)")
print(f" {int(nb_mots/(tps2 - tps1))} Mots/s\n")
except Exception as e:
print(f" /!\\ Erreur: {e}\n")
|
py
|
1a564fe8fffd0ddafc71e282dab2ec92ebe47992
|
#The provided code stub reads two integers from STDIN, and . Add code to print three lines where:
#The first line contains the sum of the two numbers.
#The second line contains the difference of the two numbers (first - second).
#The third line contains the product of the two numbers.
#Input Format
#The first line contains the first integer, .
#The second line contains the second integer, .
#Sample Input 0
#3
#2
#Sample Output 0
#5
#1
#6
if __name__ == '__main__':
a = int(input())
b = int(input())
print(a+b)
print(a-b)
print(a*b)
|
py
|
1a5650b9a85e79aeff38209e4fedcb47c5e8b1b5
|
#!/usr/bin/python
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import matplotlib.ticker as ticker
from matplotlib import cm
import sys
sys.path.append('../../python')
from instance import Instance
import visualizer
import histogram
#------------------------------------------------------------------------------
# Load the gene clusters from text files
#------------------------------------------------------------------------------
def load_clusters(filename):
file = open(filename, 'r')
content = file.read().splitlines()
file.close()
gene_clusters = []
for line in content:
gene_cluster = []
atoms_str = line.split(' ')
for elt in atoms_str:
tokens = elt.split('_')
if len(tokens) == 2:
atom = (tokens[0], int(tokens[1]))
gene_cluster.append(atom)
gene_clusters.append(gene_cluster)
return gene_clusters
#------------------------------------------------------------------------------
# Print the gene clusters formatted in LaTex
#------------------------------------------------------------------------------
def print_cluster_latex(instance, cluster):
print(cluster)
cluster_values = { (0,3):0, (0,2):1, (0,1):2, (1,3):3, (1,2):4, (2,3):5, (1,1):6, (2,2):7, (3,3):9}
ordered_cluster = cluster.copy()
ordered_cluster.sort(key = lambda atom: cluster_values[(atom[1], instance.n_values[atom[0]]-1)])
# output the clusters formatted in LaTex
res = ''
for elt in ordered_cluster:
atom_index = instance.get_atom_index(elt)
val = elt[1]
nval = instance.n_values[elt[0]]
color_str = None
if nval == 2:
if val == 0:
color_str = 'BrickRed'
elif val == 1:
color_str = 'OliveGreen'
elif nval == 3:
if val == 0:
color_str = 'BrickRed'
elif val == 1:
color_str = 'Goldenrod'
elif val == 2:
color_str = 'OliveGreen'
elif nval == 4:
if val == 0:
color_str = 'BrickRed'
elif val == 1:
color_str = 'Orange'
elif val == 2:
color_str = 'SpringGreen'
elif val == 3:
color_str = 'OliveGreen'
res += '\\textcolor{' + color_str + '}{$\\boldsymbol{\\textit{' + elt[0] + '}' + '_{' + str(elt[1]) + '/' + str(nval-1) + '}}$}, '
print(res)
return
#------------------------------------------------------------------------------
# Print the gene clusters
#------------------------------------------------------------------------------
def print_gene_clusters(gene_clusters):
for ind_cluster in range(len(gene_clusters)):
print('gene cluster ', ind_cluster, ': ')
str_clusters = ''
for atom in gene_clusters[ind_cluster]:
str_clusters += atom[0] + '_' + str(atom[1]) + ', '
print(str_clusters)
print('\n\n')
return
#------------------------------------------------------------------------------
# Plot the rule matching error for a body
#------------------------------------------------------------------------------
def plot_rule_error(df_discrete, df_coordinates, instance, body, ax):
histo = histogram.Histogram(instance, body, histogram.Histogram_type.GLOBAL)
plasma = cm.get_cmap('plasma_r', 256)
cnorm = mcolors.Normalize(vmin=0, vmax=len(histo.positive_histogram))
cell_score = {barcode : error for error in range(len(histo.positive_histogram)) for barcode in histo.positive_histogram[error]}
# sort all the cells according to the score
barcodes = [index for index in df_coordinates.index]
barcodes.sort(key = lambda elt: cell_score[elt], reverse=True)
df_coordinates_sorted = df_coordinates.loc[barcodes]
col = [cell_score[barcode] for barcode in df_coordinates_sorted.index]
ax.scatter(df_coordinates_sorted['UMAP_1'].values, df_coordinates_sorted['UMAP_2'].values, c=col, cmap=plasma, norm=cnorm, s=3)
# remove UMAP coordinates
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.set_xlabel('UMAP 1')
ax.set_ylabel('UMAP 2')
# squared plots
# ax.set_aspect((ax.get_ylim()[1]-ax.get_ylim()[0])/(ax.get_xlim()[1]-ax.get_xlim()[0]))
ax.set_aspect((ax.get_xlim()[1]-ax.get_xlim()[0])/(ax.get_ylim()[1]-ax.get_ylim()[0]))
cbar = ax.get_figure().colorbar(cm.ScalarMappable(norm=cnorm, cmap=plasma), ax=ax)
cbar.set_label('Matching error')
# cbar.set_label('Erreur de couv.')
return
#------------------------------------------------------------------------------
# Plot the cell scores for each cluster
#------------------------------------------------------------------------------
def plot_cell_scores(df_discrete, df_coordinates, gene_clusters, cluster_selection = None, n_col = 3, title=None):
instance = Instance.create_random_instance(df_discrete.copy(deep=False), 0.5)
if cluster_selection == None:
cluster_indexes = [ind for ind in range(len(gene_clusters))]
else:
cluster_indexes = cluster_selection
n_line = math.ceil(len(cluster_indexes)/n_col)
fig, axs = plt.subplots(n_line, n_col)
# fig.tight_layout()
ind_plot = 0
for ind_line in range(n_line):
for ind_col in range(n_col):
if ind_plot < len(cluster_indexes):
ax = axs[ind_line, ind_col]
ind_cluster = cluster_indexes[ind_plot]
body = [instance.get_atom_index(atom) for atom in gene_clusters[ind_cluster] ]
plot_rule_error(df_discrete, df_coordinates, instance, body, ax)
ax.set_title('cluster ' + str(ind_cluster))
ind_plot += 1
# title = None
if title != None:
fig.suptitle(title)
return
#------------------------------------------------------------------------------
# create a dataframe with the gene clusters identity
#------------------------------------------------------------------------------
def create_dataframe_clusters(filename, gene_clusters):
genes = np.concatenate(gene_clusters)
data = [ [int(elt[1]) ] for elt in genes]
for ind_gene in range(len(genes)):
for ind_cluster in range(len(gene_clusters)):
gene_cluster = gene_clusters[ind_cluster]
if (genes[ind_gene][0], int(genes[ind_gene][1])) in gene_cluster:
data[ind_gene].append(1)
else:
data[ind_gene].append(0)
df_gene_clusters = pd.DataFrame(data, index=[elt[0] for elt in genes], columns=['gene_value'] + ['cluster_'+str(ind) for ind in range(len(gene_clusters))])
df_gene_clusters.to_csv(filename)
return
#------------------------------------------------------------------------------
# create a dataframe containing the matching error for all cells on the clusters
#------------------------------------------------------------------------------
def create_cell_score_dataframe(filename, df_discrete, gene_clusters):
instance = Instance.create_random_instance(df_discrete.copy(deep=False), 0.5)
data = [[] for _ in df_discrete.index]
barcode_to_ind = { df_discrete.index[ind]:ind for ind in range(len(df_discrete.index))}
for ind_cluster in range(len(gene_clusters)):
print('gene cluster index: ', ind_cluster)
body = [instance.get_atom_index(atom) for atom in gene_clusters[ind_cluster] ]
histo = histogram.Histogram(instance, body, histogram.Histogram_type.GLOBAL)
for error in range(len(body)+1):
cell_score = 1.-error/len(body)
for barcode in histo.positive_histogram[error]:
data[barcode_to_ind[barcode]].append(cell_score)
df_cell_score = pd.DataFrame(data, index=df_discrete.index, columns=['gene_cluster_' + str(ind) for ind in range(len(gene_clusters))])
df_cell_score.to_csv(filename)
return
#------------------------------------------------------------------------------
# create a discrete dataframe with a subset of the cells
#------------------------------------------------------------------------------
def create_sub_dataframe(filename, df_discrete, selected_cells):
df_sub_discrete = df_discrete.copy()
df_sub_discrete = df_sub_discrete.loc[selected_cells]
df_sub_discrete = df_sub_discrete.loc[:, (df_sub_discrete != df_sub_discrete.iloc[0]).any()] # remove potential constant columns
# print(df_sub_discrete)
# print('filename: ', filename)
df_sub_discrete.to_csv(filename)
return
#------------------------------------------------------------------------------
# compose the cells into two subset based on the cluster matching error
#------------------------------------------------------------------------------
def decompose_cluster(df_coordinates, instance, gene_clusters, cluster_index, error_threshold, all_cells = False):
# plot global histogram from cluster 6 and select cells with a small matching error
body = [instance.get_atom_index(atom) for atom in gene_clusters[cluster_index] ]
histo = histogram.Histogram(instance, body, histogram.Histogram_type.GLOBAL)
fig,ax = plt.subplots()
visualizer.plot_global_histogram(ax, histo)
ax.set_ylabel('cell proportion')
title = 'Matching error of cluster ' + str(cluster_index)
if all_cells:
title += ' on all cells'
else:
title += ' on the myeloids'
ax.set_title(title)
selected_cells = [cell for error in range(error_threshold+1) for cell in histo.positive_histogram[error]]
selected_cells
remaining_cells = [barcode for barcode in df_coordinates.index if not barcode in selected_cells]
# plot the selected cells from cluster 6 in the UMAP
fig, ax = plt.subplots()
ax.scatter(df_coordinates.loc[selected_cells]['UMAP_1'].values, df_coordinates.loc[selected_cells]['UMAP_2'].values, s=3, c='red', zorder=1, label='selected cells')
ax.scatter(df_coordinates.loc[remaining_cells]['UMAP_1'].values, df_coordinates.loc[remaining_cells]['UMAP_2'].values, s=3, zorder=0, c='black', label='other cell')
ax.legend()
title = 'Cells selected from the matching error on cluster ' + str(cluster_index)
if all_cells:
title += ' (on all cells)'
ax.set_title(title)
# export the cells corresponding to cluster 6 in a csv file
df_selection = pd.DataFrame([1 for _ in selected_cells] + [0 for _ in remaining_cells], index=selected_cells+remaining_cells, columns=['selection'])
filename = '../../dataset/Imagine/coexpression/myeloid_c' + str(cluster_index) + '_selection'
if all_cells:
filename += '_all_cells'
filename += '.csv'
df_selection.to_csv(filename)
return
#------------------------------------------------------------------------------
# process the global clusters
#------------------------------------------------------------------------------
def process_global_clusters():
# read the discrete matrix
filename = '../../dataset/Imagine/discrete_matrix.csv'
df_discrete = pd.read_csv(filename, index_col=0)
# print(df_discrete)
# read the embedding coordinates
df_coordinates = pd.read_csv('../../dataset/Imagine/umap_coordinates.csv', index_col=0)
# read the gene clusters
gene_clusters = load_clusters('../../dataset/Imagine/coexpression/gene_clusters.txt')
print('number of gene clusters: ', len(gene_clusters))
# create an artifial instance
instance = Instance.create_random_instance(df_discrete.copy(deep=False), 0.5)
# output the clusters formatted in LaTex
# for cluster_index in range(len(gene_clusters)):
# cluster = gene_clusters[cluster_index]
# print('gene cluster ', cluster_index, '(', len(cluster), ' atoms)')
# print_cluster_latex(instance, gene_clusters[cluster_index])
# print('\n\n')
# Display the cell score for each gene cluster on the UMAP
plot_cell_scores(df_discrete, df_coordinates, gene_clusters, None, 2, 'Matching error of the gene clusters on the cells')
# display gene cluster 5 cell score in a histogram
ind_cluster = 5
body = [instance.get_atom_index(atom) for atom in gene_clusters[ind_cluster] ]
histo = histogram.Histogram(instance, body, histogram.Histogram_type.GLOBAL)
# visualization of the matching error histogram of cluster #4 on all the cells
fig,ax = plt.subplots()
values = [ error for error in range(len(histo.positive_histogram)) for _ in histo.positive_histogram[error] ]
ax.hist(values, 50, density=True, edgecolor='black')
# print(ax.get_ylim())
temp_ylim = ax.get_ylim()
ax.plot([193, 193], [ax.get_ylim()[0], ax.get_ylim()[1]], '--', color='red')
ax.set_ylim(temp_ylim)
# print(ax.get_ylim())
ax.set_ylabel('cell proportion')
ax.set_xlabel('matching error')
ax.set_title('Matching error of gene cluster ' + str(ind_cluster) + ' on all cells')
# ax.set_ylabel('proportion de cellules')
# ax.set_xlabel('erreur de couverture')
# select the cells that have the lower score
threshold = 193
selected_cells = []
other_cells = []
for error in range(threshold+1):
selected_cells += histo.positive_histogram[error]
for error in range(threshold+1,len(histo.positive_histogram)):
other_cells += histo.positive_histogram[error]
# plot a UMAP with the selected cells
fig, ax = plt.subplots()
ax.set_xlabel('UMAP 1')
ax.set_ylabel('UMAP 2')
# ax.set_title('Selected cells from gene cluster ' + str(ind_cluster))
colors = ['red' if barcode in selected_cells else 'black' for barcode in df_coordinates.index]
ax.scatter(df_coordinates.loc[selected_cells]['UMAP_1'], df_coordinates.loc[selected_cells]['UMAP_2'], c='red', s=3, label='selected cells', zorder=1)
# ax.scatter(df_coordinates.loc[selected_cells]['UMAP_1'], df_coordinates.loc[selected_cells]['UMAP_2'], c='red', s=3, label='cellules sélectionnées', zorder=1)
ax.scatter(df_coordinates.loc[other_cells]['UMAP_1'], df_coordinates.loc[other_cells]['UMAP_2'], c='black', s=3, label='other cells', zorder=0)
# ax.scatter(df_coordinates.loc[other_cells]['UMAP_1'], df_coordinates.loc[other_cells]['UMAP_2'], c='black', s=3, label='autres cellules', zorder=0)
ax.set_aspect((ax.get_xlim()[1]-ax.get_xlim()[0])/(ax.get_ylim()[1]-ax.get_ylim()[0]))
ax.set_title('Cells selected through the matching error from cluster ' + str(ind_cluster))
ax.legend(loc='upper right')
# print the gene clusters
print('\n\nList of the gene clusters:\n\n')
print_gene_clusters(gene_clusters)
# creation of a DataFrame with the gene clusters
#create_dataframe_clusters('../../coexpression/gene_clusters.csv', gene_clusters)
# creation of a dataframe with the cell scores (between 0 and 1)
#create_cell_score_dataframe('../../coexpression/gene_cluster_cell_scores.csv', df_discrete, gene_clusters)
# create a sub dataset with the selected cells (presumably Myeloids)
create_sub_dataframe('../../dataset/Imagine/sub_dataset_discrete.csv', df_discrete, selected_cells)
plt.show()
return
#------------------------------------------------------------------------------
# process the sub network clusters
#------------------------------------------------------------------------------
def process_sub_network_clusters():
# read the sub discrete matrix (with only cells selected from the first network)
filename = '../../dataset/Imagine/sub_dataset_discrete.csv'
df_discrete_sub = pd.read_csv(filename, index_col=0)
# print(df_discrete_sub.head())
# print(df_discrete_sub.shape)
# read the complete discrete matrix (to visualize the cluster matching error on all the cells)
filename = '../../dataset/Imagine/discrete_matrix.csv'
df_discrete = pd.read_csv(filename, index_col=0)
# read the embedding coordinates
df_coordinates = pd.read_csv('../../dataset/Imagine/umap_coordinates.csv', index_col=0)
df_coordinates_sub = df_coordinates.loc[df_discrete_sub.index] # select only cells in the sub dataset
# read the gene clusters
gene_clusters = load_clusters('../../dataset/Imagine/coexpression/sub_network_gene_clusters.txt')
###########################################################################
# Manual sub selection of the myeloids
selected_indexes = []
for index in df_coordinates_sub.index:
pos = (df_coordinates_sub['UMAP_1'][index], df_coordinates_sub['UMAP_2'][index])
if pos[0] >= -8 and pos[0] <= 8.0:
if pos[1] >= -16 and pos[1] <= -11:
selected_indexes.append(index)
df_coordinates_sub_manual = df_coordinates_sub.loc[selected_indexes]
print(df_coordinates_sub.shape[0]-df_coordinates_sub_manual.shape[0], ' cells are discarded for the visualization')
###########################################################################
print('number of clusters: ', len(gene_clusters))
print('\n\nList of the gene clusters:\n\n')
print_gene_clusters(gene_clusters)
# create a artificial instance to compute the matching error
instance = Instance.create_random_instance(df_discrete_sub.copy(deep=False), 0.5)
selected_clusters = [2,3,5,6]
print('Selection of clusters: ', selected_clusters)
# Display the cell score for each gene cluster on the UMAP
plot_cell_scores(df_discrete_sub, df_coordinates_sub_manual, gene_clusters, selected_clusters, 2, 'Clusters matching error (sub graph) on the myeloids')
# Same visualization on all the cells this time
plot_cell_scores(df_discrete, df_coordinates, gene_clusters, selected_clusters, 2, 'Clusters matching error (sub graph) on all the cells')
# analysis of cluster 5 matching error
# cluster_index = 5
# error_threshold = 6
# decompose_cluster(df_coordinates_sub, instance, gene_clusters, cluster_index, error_threshold, False)
instance_global = Instance.create_random_instance(df_discrete.copy(deep=False), 0.5) # create a artificial instance on al cells
# output the clusters formatted in LaTex
# for cluster_index in selected_clusters:
# cluster = gene_clusters[cluster_index]
# print('gene cluster ', cluster_index, '(', len(cluster), ' atoms)')
# # print_cluster_latex(instance, gene_clusters[cluster_index])
# print_cluster_latex(instance_global, gene_clusters[cluster_index])
# print('\n\n')
# analysis of cluster 5 matching error on all cells
# cluster_index = 5
# error_threshold = 8
# decompose_cluster(df_coordinates, instance_global, gene_clusters, cluster_index, error_threshold, True)
# cluster 5: inflammed cells
# analysis of cluster 5: selection of cells and decomposition into two groups: cells in the sub dataset and other cells
cluster_index = 5
# visualization of the matching error histogram of cluster #5 on all the cells
body = [instance.get_atom_index(atom) for atom in gene_clusters[cluster_index] ]
histo = histogram.Histogram(instance_global, body, histogram.Histogram_type.GLOBAL)
fig,ax = plt.subplots()
visualizer.plot_global_histogram(ax, histo)
# values = [ error for error in range(len(histo.positive_histogram)) for _ in histo.positive_histogram[error] ]
# print('n values: ', len(values))
# print('len body: ', len(body))
# print(values[:50])
# ax.hist(values, 10, density=True, edgecolor='black')
ax.set_ylabel('cell proportion')
ax.set_xlabel('matching error')
ax.set_title('Matching error of cluster ' + str(cluster_index) + ' on all cells')
# visualisation of the selected cells on the umap
error_threshold = 5
body = [instance_global.get_atom_index(atom) for atom in gene_clusters[cluster_index] ]
histo = histogram.Histogram(instance_global, body, histogram.Histogram_type.GLOBAL)
selected_cells = [cell for error in range(error_threshold+1) for cell in histo.positive_histogram[error]]
other_cells = [barcode for barcode in df_discrete.index if not barcode in selected_cells]
positive_cells = [barcode for barcode in selected_cells if barcode in df_discrete_sub.index]
negative_cells = [barcode for barcode in selected_cells if not barcode in positive_cells]
fig,ax = plt.subplots()
ax.scatter(df_coordinates.loc[other_cells]['UMAP_1'].values, df_coordinates.loc[other_cells]['UMAP_2'].values, c='k', s=3, label='other cells')
ax.scatter(df_coordinates.loc[positive_cells]['UMAP_1'].values, df_coordinates.loc[positive_cells]['UMAP_2'].values, c='r', s=3, label='selected myeloids')
ax.scatter(df_coordinates.loc[negative_cells]['UMAP_1'].values, df_coordinates.loc[negative_cells]['UMAP_2'].values, c='b', s=3, label='other cells selected')
ax.set_title('Cells selected from cluster 5 (red and blue)' + ' threshold = ' + str(error_threshold))
ax.set_xlabel('UMAP_1')
ax.set_ylabel('UMAP_2')
ax.legend()
df_selection = pd.DataFrame([1 for _ in positive_cells] + [0 for _ in negative_cells], index=positive_cells+negative_cells, columns=['selection'])
filename = '../../dataset/Imagine/coexpression/myeloid_c5_selection_myeloid_vs_others.csv'
df_selection.to_csv(filename)
# analysis of cluster 6 (CD8) matching error
cluster_index = 6
body = [instance.get_atom_index(atom) for atom in gene_clusters[cluster_index] ]
body_global = [instance_global.get_atom_index(atom) for atom in gene_clusters[cluster_index]]
# print(body)
histo_global = histogram.Histogram(instance_global, body_global, histogram.Histogram_type.GLOBAL)
fig,ax = plt.subplots()
visualizer.plot_global_histogram(ax, histo_global)
ax.set_ylabel('cell proportion')
ax.set_xlabel('matching error')
ax.set_title('Matching error of cluster ' + str(cluster_index) + ' on all cells')
error_threshold = 9
decompose_cluster(df_coordinates_sub, instance, gene_clusters, cluster_index, error_threshold, False)
# display the selected cells
selected_cells = [elt for error in range(error_threshold) for elt in histo.positive_histogram[error]]
other_cells = [barcode for barcode in df_discrete.index if not barcode in selected_cells]
fig, ax = plt.subplots()
ax.scatter(df_coordinates.loc[other_cells]['UMAP_1'].values, df_coordinates.loc[other_cells]['UMAP_2'].values, c='k', s=3, label = 'other cells')
ax.scatter(df_coordinates.loc[selected_cells]['UMAP_1'].values, df_coordinates.loc[selected_cells]['UMAP_2'].values, c='red', s=3, label='selected cells')
ax.set_title('Cells selected from the matching error of cluster ' + str(cluster_index) + ' (threshold = ' + str(error_threshold) + ')')
ax.set_xlabel('UMAP_1')
ax.set_ylabel('UMAP_2')
ax.legend()
# fig, ax = plt.subplots()
# plot_rule_error(df_discrete, df_coordinates, instance_global, body_global, ax)
# print the gene clusters
# print_gene_clusters(gene_clusters)
# creation of a DataFrame with the gene clusters
# create_dataframe_clusters('../../dataset/Imagine/coexpression/sub_network_gene_clusters.csv', gene_clusters)
# creation of a dataframe with the cell scores (between 0 and 1)
# create_cell_score_dataframe('../../dataset/Imagine/coexpression/sub_network_gene_cluster_cell_scores.csv', df_discrete, gene_clusters)
plt.show()
return
process_global_clusters()
process_sub_network_clusters()
|
py
|
1a565343903c9d0ae84e67cfcccdb6a1fc1e7e4f
|
# -*- coding: utf-8 -*-
from anima import logger
from anima.ui.lib import QtGui, QtWidgets
class VersionsTableWidget(QtWidgets.QTableWidget):
"""A QTableWidget derivative specialized to hold version data
"""
def __init__(self, parent=None, *args, **kwargs):
QtWidgets.QTableWidget.__init__(self, parent, *args, **kwargs)
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
# self.setAlternatingRowColors(True)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setShowGrid(False)
self.setColumnCount(5)
self.setObjectName("previous_versions_table_widget")
self.setColumnCount(5)
self.setRowCount(0)
self.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem())
self.setHorizontalHeaderItem(1, QtWidgets.QTableWidgetItem())
self.setHorizontalHeaderItem(2, QtWidgets.QTableWidgetItem())
self.setHorizontalHeaderItem(3, QtWidgets.QTableWidgetItem())
self.setHorizontalHeaderItem(4, QtWidgets.QTableWidgetItem())
self.horizontalHeader().setStretchLastSection(True)
self.verticalHeader().setStretchLastSection(False)
tool_tip_html = \
"<html><head/><body><p>Right click to:</p><ul style=\"" \
"margin-top: 0px; margin-bottom: 0px; margin-left: 0px; " \
"margin-right: 0px; -qt-list-indent: 1;\"><li><span style=\" " \
"font-weight:600;\">Copy Path</span></li><li><span style=\" " \
"font-weight:600;\">Browse Path</span></li><li><span style=\" " \
"font-weight:600;\">Change Description</span></li></ul>" \
"<p>Double click to:</p><ul style=\"margin-top: 0px; " \
"margin-bottom: 0px; margin-left: 0px; margin-right: 0px; " \
"-qt-list-indent: 1;\"><li style=\" margin-top:12px; " \
"margin-bottom:12px; margin-left:0px; margin-right:0px; " \
"-qt-block-indent:0; text-indent:0px;\"><span style=\" " \
"font-weight:600;\">Open</span></li></ul></body></html>"
try:
self.setToolTip(
QtWidgets.QApplication.translate(
"Dialog",
tool_tip_html,
None,
QtWidgets.QApplication.UnicodeUTF8
)
)
except AttributeError:
self.setToolTip(
QtWidgets.QApplication.translate(
"Dialog",
tool_tip_html,
None
)
)
self.versions = []
self.labels = [
'#',
'App',
'Created By',
'Updated By',
'Size',
'Date',
'Description',
]
self.setColumnCount(len(self.labels))
def clear(self):
"""overridden clear method
"""
QtWidgets.QTableWidget.clear(self)
self.versions = []
# reset the labels
self.setHorizontalHeaderLabels(self.labels)
def select_version(self, version):
"""selects the given version in the list
"""
# select the version in the previous version list
index = -1
for i, prev_version in enumerate(self.versions):
if self.versions[i].id == version.id:
index = i
break
logger.debug('current index: %s' % index)
# select the row
if index != -1:
item = self.item(index, 0)
logger.debug('item : %s' % item)
self.setCurrentItem(item)
@property
def current_version(self):
"""returns the current selected version from the table
"""
index = self.currentRow()
try:
version = self.versions[index]
return version
except IndexError:
return None
def update_content(self, versions):
"""updates the content with the given versions data
"""
import os
import datetime
logger.debug('VersionsTableWidget.update_content() is started')
self.clear()
self.versions = versions
self.setRowCount(len(versions))
def set_published_font(item):
"""sets the font for the given item
:param item: the a QTableWidgetItem
"""
my_font = item.font()
my_font.setBold(True)
item.setFont(my_font)
foreground = item.foreground()
foreground.setColor(QtGui.QColor(0, 192, 0))
item.setForeground(foreground)
# update the previous versions list
from anima import defaults
for i, version in enumerate(versions):
is_published = version.is_published
absolute_full_path = os.path.normpath(
os.path.expandvars(version.full_path)
).replace('\\', '/')
version_file_exists = os.path.exists(absolute_full_path)
c = 0
# ------------------------------------
# version_number
item = QtWidgets.QTableWidgetItem(str(version.version_number))
# align to center and vertical center
item.setTextAlignment(0x0004 | 0x0080)
if is_published:
set_published_font(item)
if not version_file_exists:
item.setBackground(QtGui.QColor(64, 0, 0))
self.setItem(i, c, item)
c += 1
# ------------------------------------
# ------------------------------------
# created_with
item = QtWidgets.QTableWidgetItem()
if version.created_with:
from anima.ui import utils as ui_utils
app_icon = ui_utils.get_icon(version.created_with.lower())
if app_icon:
item.setIcon(app_icon)
if is_published:
set_published_font(item)
if not version_file_exists:
item.setBackground(QtGui.QColor(64, 0, 0))
self.setItem(i, c, item)
c += 1
# ------------------------------------
# ------------------------------------
# user.name
created_by = ''
if version.created_by_id:
created_by = defaults.user_names_lut[version.created_by_id]
item = QtWidgets.QTableWidgetItem(created_by)
# align to left and vertical center
item.setTextAlignment(0x0001 | 0x0080)
if is_published:
set_published_font(item)
if not version_file_exists:
item.setBackground(QtGui.QColor(64, 0, 0))
self.setItem(i, c, item)
c += 1
# ------------------------------------
# ------------------------------------
# user.name
updated_by = ''
if version.updated_by_id:
updated_by = defaults.user_names_lut[version.updated_by_id]
item = QtWidgets.QTableWidgetItem(updated_by)
# align to left and vertical center
item.setTextAlignment(0x0001 | 0x0080)
if is_published:
set_published_font(item)
if not version_file_exists:
item.setBackground(QtGui.QColor(64, 0, 0))
self.setItem(i, c, item)
c += 1
# ------------------------------------
# ------------------------------------
# file size
# get the file size
# file_size_format = "%.2f MB"
file_size = -1
if version_file_exists:
file_size = float(
os.path.getsize(absolute_full_path)) / 1048576
from anima import defaults
item = QtWidgets.QTableWidgetItem(
defaults.file_size_format % file_size
)
# align to left and vertical center
item.setTextAlignment(0x0001 | 0x0080)
if is_published:
set_published_font(item)
if not version_file_exists:
item.setBackground(QtGui.QColor(64, 0, 0))
self.setItem(i, c, item)
c += 1
# ------------------------------------
# ------------------------------------
# date
# get the file date
file_date = datetime.datetime.today()
if version_file_exists:
file_date = datetime.datetime.fromtimestamp(
os.path.getmtime(absolute_full_path)
)
item = QtWidgets.QTableWidgetItem(
file_date.strftime(defaults.date_time_format)
)
# align to left and vertical center
item.setTextAlignment(0x0001 | 0x0080)
if is_published:
set_published_font(item)
if not version_file_exists:
item.setBackground(QtGui.QColor(64, 0, 0))
self.setItem(i, c, item)
c += 1
# ------------------------------------
# ------------------------------------
# description
item = QtWidgets.QTableWidgetItem(version.description)
# align to left and vertical center
item.setTextAlignment(0x0001 | 0x0080)
if is_published:
set_published_font(item)
if not version_file_exists:
item.setBackground(QtGui.QColor(64, 0, 0))
self.setItem(i, c, item)
c += 1
# ------------------------------------
# resize the first column
self.resizeRowsToContents()
self.resizeColumnsToContents()
self.resizeRowsToContents()
logger.debug('VersionsTableWidget.update_content() is finished')
|
py
|
1a56550003a685d5b5ced64aed985b4a88c7f89a
|
"""
AWR + SAC from demo experiment
"""
from railrl.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from railrl.demos.source.mdp_path_loader import MDPPathLoader, MDPPathLoader
from railrl.launchers.experiments.ashvin.awr_sac_rl import experiment
import railrl.misc.hyperparameter as hyp
from railrl.launchers.arglauncher import run_variants
if __name__ == "__main__":
variant = dict(
num_epochs=100,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
replay_buffer_size=int(1E6),
algorithm="SAC",
version="normal",
collection_mode='batch',
layer_size=256,
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=True,
bc_num_pretrain_steps=10000,
q_num_pretrain_steps=10000,
policy_weight_decay=1e-4,
bc_loss_type="mle",
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_path=["demos/icml2020/hand/door.npy"],
demo_off_policy_path=[
"/home/ashvin/data/s3doodad/ashvin/icml2020/hand/door/demo-bc5/run0/id*/video_*.p",
# "ashvin/icml2020/hand/door/demo-bc1/run4/video_*.p",
# "ashvin/icml2020/hand/door/demo-bc1/run5/video_*.p",
],
),
logger_variant=dict(
tensorboard=True,
),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
)
search_space = {
'env': ["door-v0", ],
'seedid': range(5),
'trainer_kwargs.beta': [10, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
|
py
|
1a56574d93437e8b8746d2bb5842561a8db12eae
|
#!/usr/bin/python
import math
import sys
from PIL import Image
from util import Helper, ImageHelper
from util.CharType import CharType, maps as char_maps
from util.ImageType import ImageType
BRAILLE_BASE = int('0x2800', 16)
def convert(pixel, char_type=CharType.ASCII):
return char_maps[char_type][pixel // (256 // len(char_maps[char_type]))]
def convert_to_braille(image, threshold=128, use_dot_spacing=False):
"""
convert sets of 2x4 pixels into their braille equivalent by checking if a given pixel should be displayed or not
and setting its respective bit
see https://en.wikipedia.org/wiki/Braille_Patterns#Block for more info
"""
width, height = image.size
text = [[0] * math.ceil(width / 2) for _ in range(math.ceil(height / 4))]
data = image.load()
# convert every 2x4 area in its braille equivalent
for y in range(0, height, 4):
for x in range(0, width, 2):
unicode_offset = 0
for i in range(2):
for j in range(4):
if x + i < width and y + j < height:
# mark bit index if pixel is white or not
unicode_offset += (data[x + i, y + j] <= threshold) << ((i * 4) + j)
if use_dot_spacing and unicode_offset == 0:
unicode_offset = 4 # blank braille char has kerning issues for some fonts
text[y // 4][x // 2] = chr(BRAILLE_BASE + unicode_offset)
return text
def convert_to_text(image, char_type=CharType.ASCII):
if char_type == CharType.BRAILLE:
return convert_to_braille(image)
text = []
data = image.load()
for y in range(image.height):
text.append([])
for x in range(image.width):
text[y].append(convert(data[x, y], char_type))
return text
def setup_text_image(text):
image = ''
for i in range(len(text)):
image += ''.join(text[i]) + '\n'
return image
def store_text(text, input_filename):
with open(ImageHelper.resource_folder(f'output/{input_filename}.txt'), 'w', encoding='utf-8') as output_file:
output_file.write(text)
def gray_scale_image():
image = Image.new('L', [255, 255])
data = image.load()
for x in range(image.width):
for y in range(image.height):
data[x, y] = x
return image
def to_ascii_from_image(image, name='image', invert=True, char_type=CharType.BRAILLE, image_type=ImageType.DITHER):
"""
convert to text via the following steps:
1. fit image to terminal screen size
2. invert image if needed
3. convert image based on given ImageType
4. convert pixels in image to their given CharType equivalent
5. join the 2d image array into a single string
"""
image = Helper.fit_image_to_terminal(image, char_type == CharType.BRAILLE)
if invert:
image = ImageHelper.invert(image)
image = ImageHelper.convert_image(image, image_type)
text_array = convert_to_text(image, char_type)
ascii_text = setup_text_image(text_array)
if ImageHelper.DEBUG:
store_text(ascii_text, name)
return ascii_text
def to_ascii(input_filename):
return to_ascii_from_image(Image.open(input_filename).convert('RGB'), input_filename)
def main():
if len(sys.argv) < 2:
raise RuntimeError('Usage: this_script.py <input file>')
input_filename = sys.argv[1]
text = to_ascii(input_filename)
print(text)
if __name__ == '__main__':
main()
# input('Press enter to exit')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.