ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5b80bf6d7c68f9192cf3b6e36fe6be7ba1d80d
|
from .backend_template import BackendTemplate
import warnings
try:
import pandas as pd
# WHEN CHECKING FOR THE TYPE OF AN OBJECT IN A SERIES BEWARE THAT:
#
# series = pd.Series([1, 2, 3, 4])
#
# for s in series:
# print(str(type(s)))
# outputs;
# `<class 'int'>
# `<class 'int'>
# `<class 'int'>
# `<class 'int'>
#
# str(type(series[2]))
# outputs:
# "<class 'numpy.int64'>"
def is_consistent(series: pd.Series) -> bool:
"""Check that all the values in the series are of the same type."""
if series.dtype != "object":
return True
expected_type = str(type(series.values[0]))
return all(
expected_type == str(type(s))
for s in series
)
def get_vector_dtype(series: pd.Series) -> str:
"""Get which type to use to serialize the type of the series"""
t = str(series.dtype)
if t == "object":
return "str"
return t
common_message = (
" contains values of multiple types therefore the data will be saved as required"
" but we don't guarantee that"
" they will be loaded as the same types as pandas does not support this.\n"
"Consider using pickle (.pkl) or compress pickle (.pkl.gz, ...) to cache this complex type"
" in a consistent manner."
)
class PandasCsvBackend(BackendTemplate):
SUPPORTED_EXTENSIONS = {
".csv":",",
".csv.gz":",",
".csv.bz2":",",
".csv.xz":",",
".csv.zip":",",
".tsv":"\t",
".tsv.gz":"\t",
".tsv.bz2":"\t",
".tsv.xz":"\t",
".tsv.zip":"\t",
}
def __init__(self, load_kwargs, dump_kwargs):
load_kwargs = load_kwargs.copy()
load_kwargs.setdefault("index_col", 0)
super(PandasCsvBackend, self).__init__(load_kwargs, dump_kwargs)
@staticmethod
def support_path(path:str) -> bool:
return any(
path.endswith(extension)
for extension in PandasCsvBackend.SUPPORTED_EXTENSIONS
)
@staticmethod
def can_deserialize(metadata: dict, path:str) -> bool:
return PandasCsvBackend.support_path(path) and metadata.get("type", None) == "pandas"
@staticmethod
def can_serialize(obj_to_serialize: object, path:str) -> bool:
return PandasCsvBackend.support_path(path) and isinstance(obj_to_serialize, pd.DataFrame)
def dump(self, obj_to_serialize: pd.DataFrame, path:str) -> dict:
for column in obj_to_serialize.columns:
if not is_consistent(obj_to_serialize[column]):
warnings.warn("The column '{}'".format(column) + common_message )
if not is_consistent(obj_to_serialize.index):
warnings.warn("The index" + common_message)
if not is_consistent(obj_to_serialize.columns):
warnings.warn("The column names" + common_message)
obj_to_serialize.to_csv(
path,
sep=self.SUPPORTED_EXTENSIONS[
next(
x
for x in self.SUPPORTED_EXTENSIONS
if path.endswith(x)
)
],
**self._dump_kwargs
)
# Return the types of the columns to be saved as metadata
return {
"type":"pandas",
"columns_types":{
column:get_vector_dtype(obj_to_serialize[column])
for column in obj_to_serialize.columns
},
"index_type":get_vector_dtype(obj_to_serialize.index),
"columns_names_type":get_vector_dtype(obj_to_serialize.columns),
}
def load(self, metadata:dict, path:str) -> object:
df = pd.read_csv(
path,
sep=self.SUPPORTED_EXTENSIONS[
next(
x
for x in self.SUPPORTED_EXTENSIONS
if path.endswith(x)
)
],
**self._load_kwargs
)
# Convert back the types of the columns to the original ones
df = df.astype(metadata["columns_types"])
df.index = df.index.astype(metadata["index_type"])
df.columns = df.columns.astype(metadata["columns_names_type"])
return df
except ModuleNotFoundError:
PandasCsvBackend = None
|
py
|
1a5b827f17036c831b99a3b557b7ea9d9fba7791
|
import psycopg2 as psy
import sqlalchemy
import datetime as dt
from sqlalchemy import (
Table,
Column,
Index,
Integer,
String,
Text,
Boolean,
ForeignKey,
UniqueConstraint,
)
from sqlalchemy import text
from sqlalchemy.dialects.postgresql import JSON,JSONB
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.orm import (
relationship,
)
Base = declarative_base()
###### Table defs ###############################
# association between curators and studies
curator_study_table = Table('curator_study_map', Base.metadata,
Column('study_id', String, ForeignKey('study.id'), primary_key=True),
Column('curator_id', Integer, ForeignKey('curator.id'), primary_key=True)
)
# association between trees and otus
tree_otu_table = Table('tree_otu_map', Base.metadata,
Column('ott_id', Integer, ForeignKey('taxonomy.id'), primary_key=True),
Column('tree_id', Integer, ForeignKey('tree.id'), primary_key=True)
)
class Study(Base):
__tablename__ = 'study'
# The studyID is of the form prefix_id, so String, not Int.
id = Column(String, primary_key=True, index=True)
year = Column(Integer)
data = Column(JSONB)
#trees = relationship('Tree',backref='study')
# many-to-many study<-->curator relationship
curators = relationship('Curator',
secondary=curator_study_table,
back_populates='studies')
class Tree(Base):
__tablename__ = 'tree'
__table_args__ = (
UniqueConstraint('id','study_id'),
)
id = Column(Integer,primary_key=True)
tree_id = Column(String, nullable=False)
data = Column(JSONB)
study_id = Column(String, ForeignKey("study.id"), nullable=False)
ntips = Column(Integer)
proposed = Column(Boolean)
treebase_id = Column(String)
# many-to-many tree<-->otu relationship
otus = relationship('Taxonomy',
secondary=tree_otu_table,
back_populates='trees')
class Curator(Base):
__tablename__ = 'curator'
id = Column(Integer,primary_key=True)
name = Column(String,nullable=False,unique=True)
# many-to-many study<-->curator relationship
studies = relationship('Study',
secondary=curator_study_table,
back_populates='curators')
class Taxonomy(Base):
__tablename__ = 'taxonomy'
id = Column(Integer, primary_key=True)
name = Column(String,nullable=False)
parent = Column(Integer)
trees = relationship('Tree',
secondary=tree_otu_table,
back_populates='otus')
|
py
|
1a5b845a5cb379e8ea7881deca0bc3f11fd80b75
|
# -*- coding: utf-8 -*-
"""The filter file CLI arguments helper."""
from __future__ import unicode_literals
import os
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class FilterFileArgumentsHelper(interface.ArgumentsHelper):
"""Filter file CLI arguments helper."""
NAME = 'filter_file'
DESCRIPTION = 'Filter file command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'-f', '--file_filter', '--file-filter', dest='file_filter',
action='store', type=str, default=None, help=(
'List of files to include for targeted collection of files to '
'parse, one line per file path, setup is /path|file - where each '
'element can contain either a variable set in the preprocessing '
'stage or a regular expression.'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
filter_file = cls._ParseStringOption(options, 'file_filter')
# Search the data location for the filter file.
if filter_file and not os.path.isfile(filter_file):
data_location = getattr(configuration_object, '_data_location', None)
if data_location:
filter_file_basename = os.path.basename(filter_file)
filter_file_path = os.path.join(data_location, filter_file_basename)
if os.path.isfile(filter_file_path):
filter_file = filter_file_path
if filter_file and not os.path.isfile(filter_file):
raise errors.BadConfigOption(
'No such collection filter file: {0:s}.'.format(filter_file))
setattr(configuration_object, '_filter_file', filter_file)
manager.ArgumentHelperManager.RegisterHelper(FilterFileArgumentsHelper)
|
py
|
1a5b84883910b659d2da987d642095ff8d96d585
|
from ethereumetl.domain.receipt_log import EthReceiptLog
from ethereumetl.service.token_transfer_v2_extractor import EthTokenTransferV2Extractor, word_to_address
from ethereumetl.service.token_transfer_v2_extractor import TRANSFER_EVENT_TOPICS, ERC1155_TRANSFER_SINGLE_TOPIC, ERC721_ERC_20_TRANSFER_TOPIC, ERC1155_TRANSFER_BATCH_TOPIC
from ethereumetl.utils import to_normalized_address
token_transfer_extractor = EthTokenTransferV2Extractor()
#https://etherscan.io/tx/0x5ec4c69bcff7ec3f9fbe33b93573c0e81357e36689e606fc070a52831e3586b8#eventlog
def test_extract_transfer_from_receipt_log_erc20():
log = EthReceiptLog()
log.address = '0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48'
log.block_number = 14051054
log.log_index = 0
log.topics = [ERC721_ERC_20_TRANSFER_TOPIC,
'0x0000000000000000000000007a686933fc67023aabd424f35ad0b883332e2222',
'0x00000000000000000000000016011b51e022766c352b29b0c1ed423489f4d3ca']
log.data = '0x0000000000000000000000000000000000000000000000000000000002faf080'
log.transaction_hash = '0x5ec4c69bcff7ec3f9fbe33b93573c0e81357e36689e606fc070a52831e3586b8'
token_transfers = token_transfer_extractor.extract_transfer_from_log(log)
assert len(token_transfers) == 1
assert token_transfers[0].token_id == '0x0000000000000000000000000000000000000000000000000000000000000001'
assert token_transfers[0].amount == '0x0000000000000000000000000000000000000000000000000000000002faf080'
assert token_transfers[0].block_number == 14051054
assert token_transfers[0].from_address == word_to_address('0x0000000000000000000000007a686933fc67023aabd424f35ad0b883332e2222')
assert token_transfers[0].to_address == word_to_address('0x00000000000000000000000016011b51e022766c352b29b0c1ed423489f4d3ca')
assert token_transfers[0].token_type == "ERC20"
assert token_transfers[0].contract_address == to_normalized_address('0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48')
assert token_transfers[0].transaction_hash == '0x5ec4c69bcff7ec3f9fbe33b93573c0e81357e36689e606fc070a52831e3586b8'
assert token_transfers[0].log_index == 0
#https://etherscan.io/tx/0x9fb4dd639dd74a24c8b1253a6199da294d08ce7587ada810c72fe89bc2225510#eventlog
def test_extract_transfer_from_receipt_log_erc721():
log = EthReceiptLog()
log.address = '0x716039ab9ce2780e35450b86dc6420f22460c380'
log.block_number = 14051620
log.log_index = 0
log.topics = [ERC721_ERC_20_TRANSFER_TOPIC,
'0x000000000000000000000000b5fdfbbddc872d08d0203cd6d69d5ce67eb4c761',
'0x00000000000000000000000040b060a0ac95db3d5211b687511632b46c5d3bb7',
'0x0000000000000000000000000000000000000000000000000000000000000735']
log.data = '0x'
log.transaction_hash = '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
token_transfers = token_transfer_extractor.extract_transfer_from_log(log)
assert len(token_transfers) == 1
assert token_transfers[0].token_id == '0x0000000000000000000000000000000000000000000000000000000000000735'
assert token_transfers[0].amount == '0x0000000000000000000000000000000000000000000000000000000000000001'
assert token_transfers[0].block_number == 14051620
assert token_transfers[0].from_address == word_to_address('0x000000000000000000000000b5fdfbbddc872d08d0203cd6d69d5ce67eb4c761')
assert token_transfers[0].to_address == word_to_address('0x00000000000000000000000040b060a0ac95db3d5211b687511632b46c5d3bb7')
assert token_transfers[0].token_type == "ERC721"
assert token_transfers[0].contract_address == to_normalized_address('0x716039ab9ce2780e35450b86dc6420f22460c380')
assert token_transfers[0].transaction_hash == '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
assert token_transfers[0].log_index == 0
#https://etherscan.io/tx/0xd72e66497d1614eff8136898043c22ad1d7c88e2831c57866fa5683430ef37c1#eventlog
def test_extract_transfer_from_receipt_log_erc1155_single():
log = EthReceiptLog()
log.address = '0x25c6413359059694A7FCa8e599Ae39Ce1C944Da2'
log.block_number = 1061946
log.log_index = 0
log.topics = [ERC1155_TRANSFER_SINGLE_TOPIC,
'0x0000000000000000000000004fee7b061c97c9c496b01dbce9cdb10c02f0a0be',
'0x000000000000000000000000ab3e5a900663ea8c573b8f893d540d331fbab9f5',
'0x0000000000000000000000006a36f56e0a1bc32e187408f1651195d58cf688bd']
log.data = '0x00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004'
log.transaction_hash = '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
token_transfers = token_transfer_extractor.extract_transfer_from_log(log)
assert len(token_transfers) == 1
assert token_transfers[0].token_id == '0x0000000000000000000000000000000000000000000000000000000000000002'
assert token_transfers[0].amount == '0x0000000000000000000000000000000000000000000000000000000000000004'
assert token_transfers[0].block_number == 1061946
assert token_transfers[0].from_address == word_to_address('0x000000000000000000000000ab3e5a900663ea8c573b8f893d540d331fbab9f5')
assert token_transfers[0].to_address == word_to_address('0x0000000000000000000000006a36f56e0a1bc32e187408f1651195d58cf688bd')
assert token_transfers[0].token_type == "ERC1155"
assert token_transfers[0].contract_address == to_normalized_address('0x25c6413359059694A7FCa8e599Ae39Ce1C944Da2')
assert token_transfers[0].transaction_hash == '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
assert token_transfers[0].log_index == 0
#https://etherscan.io/tx/0xca0a113c842a1305a49107ed7b9ebef69ccca9bee2a06d5c8230cedf72284498#eventlog
def test_extract_transfer_from_receipt_log_erc1155_batch():
log = EthReceiptLog()
log.address = '0x6cad6e1abc83068ea98924aef37e996ed02abf1c'
log.block_number = 1061946
log.log_index = 0
log.topics = [ERC1155_TRANSFER_BATCH_TOPIC,
'0x0000000000000000000000005bd25d2f4f26bc82a34de016d34612a28a0cd492',
'0x0000000000000000000000000000000000000000000000000000000000000000',
'0x000000000000000000000000991f3775c81d6f8331b9a812eda34ea48a7ea76d']
log.data = '0x000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001'
log.transaction_hash = '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
token_transfers = token_transfer_extractor.extract_transfer_from_log(log)
assert len(token_transfers) == 10
for iter in range(len(token_transfers)):
assert token_transfers[iter].token_id == '0x%064x' % (iter + 1)
assert token_transfers[iter].amount == '0x0000000000000000000000000000000000000000000000000000000000000001'
assert token_transfers[iter].block_number == 1061946
assert token_transfers[iter].from_address == word_to_address('0x0000000000000000000000000000000000000000000000000000000000000000')
assert token_transfers[iter].to_address == word_to_address('0x000000000000000000000000991f3775c81d6f8331b9a812eda34ea48a7ea76d')
assert token_transfers[iter].token_type == "ERC1155"
assert token_transfers[iter].contract_address == to_normalized_address('0x6cad6e1abc83068ea98924aef37e996ed02abf1c')
assert token_transfers[iter].transaction_hash == '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
assert token_transfers[iter].log_index == 0
def word_to_address(param):
if param is None:
return None
elif len(param) >= 40:
return to_normalized_address('0x' + param[-40:])
else:
return to_normalized_address(param)
|
py
|
1a5b8496b4d27db4aa672335bcbf7e7d364ac62e
|
# (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import textwrap
import traceback
import yaml
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.loader import module_loader, action_loader, lookup_loader, callback_loader, cache_loader, \
vars_loader, connection_loader, strategy_loader, PluginLoader
from ansible.utils import plugin_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
''' displays information on modules installed in Ansible libraries.
It displays a terse listing of plugins and their short descriptions,
provides a printout of their DOCUMENTATION strings,
and it can create a short "snippet" which can be pasted into a playbook. '''
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.plugin_list = set()
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [-l|-s] [options] [-t <plugin type] [plugin]',
module_opts=True,
desc="plugin documentation tool",
epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available plugins')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified plugin(s)')
self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_plugins',
help='**For internal testing only** Show documentation for all plugins.')
self.parser.add_option("-t", "--type", action="store", default='module', dest='type', type='choice',
help='Choose which plugin type (defaults to "module")',
choices=['cache', 'callback', 'connection', 'inventory', 'lookup', 'module', 'strategy', 'vars'])
super(DocCLI, self).parse()
if [self.options.all_plugins, self.options.list_dir, self.options.show_snippet].count(True) > 1:
raise AnsibleOptionsError("Only one of -l, -s or -a can be used at the same time.")
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
plugin_type = self.options.type
# choose plugin type
if plugin_type == 'cache':
loader = cache_loader
elif plugin_type == 'callback':
loader = callback_loader
elif plugin_type == 'connection':
loader = connection_loader
elif plugin_type == 'lookup':
loader = lookup_loader
elif plugin_type == 'strategy':
loader = strategy_loader
elif plugin_type == 'vars':
loader = vars_loader
elif plugin_type == 'inventory':
loader = PluginLoader('InventoryModule', 'ansible.plugins.inventory', C.DEFAULT_INVENTORY_PLUGIN_PATH, 'inventory_plugins')
else:
loader = module_loader
# add to plugin path from command line
if self.options.module_path:
for path in self.options.module_path:
if path:
loader.add_directory(path)
# save only top level paths for errors
search_paths = DocCLI.print_paths(loader)
loader._paths = None # reset so we can use subdirs below
# list plugins for type
if self.options.list_dir:
paths = loader._get_paths()
for path in paths:
self.find_plugins(path, plugin_type)
self.pager(self.get_plugin_list_text(loader))
return 0
# process all plugins of type
if self.options.all_plugins:
paths = loader._get_paths()
for path in paths:
self.find_plugins(path, plugin_type)
self.args = sorted(set(self.plugin_list))
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line list
text = ''
for plugin in self.args:
try:
# if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if filename is None:
display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths))
continue
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs, metadata = plugin_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.format_exc())
display.error("%s %s has a documentation error formatting or is missing documentation." % (plugin_type, plugin))
continue
if doc is not None:
# assign from other sections
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
doc['metadata'] = metadata
# generate extra data
if plugin_type == 'module':
# is there corresponding action plugin?
if plugin in action_loader:
doc['action'] = True
else:
doc['action'] = False
doc['filename'] = filename
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
if 'docuri' in doc:
doc['docuri'] = doc[plugin_type].replace('_', '-')
if self.options.show_snippet and plugin_type == 'module':
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.format_exc())
raise AnsibleError("%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, str(e)))
if text:
self.pager(text)
return 0
def find_plugins(self, path, ptype):
display.vvvv("Searching %s for plugins" % path)
if not os.path.exists(path):
display.vvvv("%s does not exist" % path)
return
bkey = ptype.upper()
for plugin in os.listdir(path):
display.vvvv("Found %s" % plugin)
full_path = '/'.join([path, plugin])
if plugin.startswith('.'):
continue
elif os.path.isdir(full_path):
continue
elif any(plugin.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif plugin.startswith('__'):
continue
elif plugin in C.IGNORE_FILES:
continue
elif plugin .startswith('_'):
if os.path.islink(full_path): # avoids aliases
continue
plugin = os.path.splitext(plugin)[0] # removes the extension
plugin = plugin.lstrip('_') # remove underscore from deprecated plugins
if plugin not in plugin_docs.BLACKLIST.get(bkey, ()):
self.plugin_list.add(plugin)
display.vvvv("Added %s" % plugin)
def get_plugin_list_text(self, loader):
columns = display.columns
displace = max(len(x) for x in self.plugin_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for plugin in sorted(self.plugin_list):
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
doc = None
try:
doc, plainexamples, returndocs, metadata = plugin_docs.get_docstring(filename)
except:
display.warning("%s has a documentation formatting error" % plugin)
if not doc or not isinstance(doc, dict):
desc = 'UNDOCUMENTED'
display.warning("%s parsing did not produce documentation." % plugin)
else:
desc = self.tty_ify(doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip())
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if plugin.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, plugin[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
except Exception as e:
raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e)))
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths(subdirs=False):
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" %s:" % (doc['module']))
pad = 31
subdent = " " * pad
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
if isinstance(opt['description'], string_types):
desc = CLI.tty_ify(opt['description'])
else:
desc = CLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
desc = "(required) %s" % desc
o = '%s:' % o
text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def _dump_yaml(self, struct, indent):
return CLI.tty_ify('\n'.join([indent + line for line in yaml.dump(struct, default_flow_style=False, Dumper=AnsibleDumper).split('\n')]))
def add_fields(self, text, fields, limit, opt_indent):
for o in sorted(fields):
opt = fields[o]
required = opt.pop('required', False)
if not isinstance(required, bool):
raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
for entry in opt['description']:
text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
else:
text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
del opt['description']
aliases = ''
if 'aliases' in opt:
if len(opt['aliases']) > 0:
aliases = "(Aliases: " + ", ".join(str(i) for i in opt['aliases']) + ")"
del opt['aliases']
choices = ''
if 'choices' in opt:
if len(opt['choices']) > 0:
choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")"
del opt['choices']
default = ''
if 'default' in opt or not required:
default = "[Default: %s" % str(opt.pop('default', '(null)')) + "]"
text.append(textwrap.fill(CLI.tty_ify(aliases + choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'options' in opt:
text.append("%soptions:\n" % opt_indent)
self.add_fields(text, opt.pop('options'), limit, opt_indent + opt_indent)
if 'spec' in opt:
text.append("%sspec:\n" % opt_indent)
self.add_fields(text, opt.pop('spec'), limit, opt_indent + opt_indent)
conf = {}
for config in ('env', 'ini', 'yaml', 'vars'):
if config in opt and opt[config]:
conf[config] = opt.pop(config)
if conf:
text.append(self._dump_yaml({'set_via': conf}, opt_indent))
for k in sorted(opt):
if k.startswith('_'):
continue
if isinstance(opt[k], string_types):
text.append('%s%s: %s' % (opt_indent, k, textwrap.fill(CLI.tty_ify(opt[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(opt[k], (list, tuple)):
text.append(CLI.tty_ify('%s%s: %s' % (opt_indent, k, ', '.join(opt[k]))))
else:
text.append(self._dump_yaml({k: opt[k]}, opt_indent))
text.append('')
@staticmethod
def get_support_block(doc):
# Note: 'curated' is deprecated and not used in any of the modules we ship
support_level_msg = {'core': 'The Ansible Core Team',
'network': 'The Ansible Network Team',
'certified': 'an Ansible Partner',
'community': 'The Ansible Community',
'curated': 'A Third Party',
}
if doc['metadata'].get('metadata_version') in ('1.0', '1.1'):
return [" * This module is maintained by %s" % support_level_msg[doc['metadata']['supported_by']]]
return []
@staticmethod
def get_metadata_block(doc):
text = []
if doc['metadata'].get('metadata_version') in ('1.0', '1.1'):
text.append("METADATA:")
text.append('\tSUPPORT LEVEL: %s' % doc['metadata']['supported_by'])
for k in (m for m in doc['metadata'] if m not in ('version', 'metadata_version', 'supported_by')):
if isinstance(k, list):
text.append("\t%s: %s" % (k.capitalize(), ", ".join(doc['metadata'][k])))
else:
text.append("\t%s: %s" % (k.capitalize(), doc['metadata'][k]))
return text
return []
def get_man_text(self, doc):
IGNORE = frozenset(['module', 'docuri', 'version_added', 'short_description', 'now_date', 'plainexamples', 'returndocs', self.options.type])
opt_indent = " "
text = []
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
text.append("> %s (%s)\n" % (doc.get(self.options.type, doc.get('plugin_type')).upper(), doc.pop('filename')))
if isinstance(doc['description'], list):
desc = " ".join(doc.pop('description'))
else:
desc = doc.pop('description')
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n")
if isinstance(doc['deprecated'], dict):
text.append("\tReason: %(why)s\n\tScheduled removal: Ansible %(version)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
else:
text.append("%s" % doc.pop('deprecated'))
text.append("\n")
try:
support_block = self.get_support_block(doc)
if support_block:
text.extend(support_block)
except:
pass # FIXME: not suported by plugins
if doc.pop('action', False):
text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
if 'options' in doc and doc['options']:
text.append("OPTIONS (= is mandatory):\n")
self.add_fields(text, doc.pop('options'), limit, opt_indent)
text.append('')
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
text.append("NOTES:")
for note in doc['notes']:
text.append(textwrap.fill(CLI.tty_ify(note), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
text.append('')
del doc['notes']
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc.pop('requirements'))
text.append("REQUIREMENTS:%s\n" % textwrap.fill(CLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent))
# Generic handler
for k in sorted(doc):
if k in IGNORE or not doc[k]:
continue
if isinstance(doc[k], string_types):
text.append('%s: %s' % (k.upper(), textwrap.fill(CLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(doc[k], (list, tuple)):
text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
else:
text.append(self._dump_yaml({k.upper(): doc[k]}, opt_indent))
del doc[k]
text.append('')
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
if isinstance(doc['plainexamples'], string_types):
text.append(doc.pop('plainexamples').strip())
else:
text.append(yaml.dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
text.append('')
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:\n")
if isinstance(doc['returndocs'], string_types):
text.append(doc.pop('returndocs'))
else:
text.append(yaml.dump(doc.pop('returndocs'), indent=2, default_flow_style=False))
text.append('')
try:
metadata_block = self.get_metadata_block(doc)
if metadata_block:
text.extend(metadata_block)
text.append('')
except:
pass # metadata is optional
return "\n".join(text)
|
py
|
1a5b861777ff0526bce4b7197f32dae62077e261
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Storage operator.
"""
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.exceptions import AirflowException
WILDCARD = '*'
class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
"""
Copies objects from a bucket to another, with renaming if requested.
:param source_bucket: The source Google cloud storage bucket where the
object is. (templated)
:type source_bucket: str
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your
bucket. The wildcard can appear inside the object name or at the
end of the object name. Appending a wildcard to the bucket name is
unsupported.
:type source_object: str
:param destination_bucket: The destination Google cloud storage bucket
where the object should be. If the destination_bucket is None, it defaults
to source_bucket. (templated)
:type destination_bucket: str
:param destination_object: The destination name of the object in the
destination Google cloud storage bucket. (templated)
If a wildcard is supplied in the source_object argument, this is the
prefix that will be prepended to the final destination objects' paths.
Note that the source path's part before the wildcard will be removed;
if it needs to be retained it should be appended to destination_object.
For example, with prefix ``foo/*`` and destination_object ``blah/``, the
file ``foo/baz`` will be copied to ``blah/baz``; to retain the prefix write
the destination_object as e.g. ``blah/foo``, in which case the copied file
will be named ``blah/foo/baz``.
:type destination_object: str
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:type move_object: bool
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param last_modified_time: When specified, the objects will be copied or moved,
only if they were modified after last_modified_time.
If tzinfo has not been set, UTC will be assumed.
:type last_modified_time: datetime.datetime
:Example:
The following Operator would copy a single file named
``sales/sales-2017/january.avro`` in the ``data`` bucket to the file named
``copied_sales/2017/january-backup.avro`` in the ``data_backup`` bucket ::
copy_single_file = GoogleCloudStorageToGoogleCloudStorageOperator(
task_id='copy_single_file',
source_bucket='data',
source_object='sales/sales-2017/january.avro',
destination_bucket='data_backup',
destination_object='copied_sales/2017/january-backup.avro',
google_cloud_storage_conn_id=google_cloud_conn_id
)
The following Operator would copy all the Avro files from ``sales/sales-2017``
folder (i.e. with names starting with that prefix) in ``data`` bucket to the
``copied_sales/2017`` folder in the ``data_backup`` bucket. ::
copy_files = GoogleCloudStorageToGoogleCloudStorageOperator(
task_id='copy_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
google_cloud_storage_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2017``
folder (i.e. with names starting with that prefix) in ``data`` bucket to the
same folder in the ``data_backup`` bucket, deleting the original files in the
process. ::
move_files = GoogleCloudStorageToGoogleCloudStorageOperator(
task_id='move_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
move_object=True,
google_cloud_storage_conn_id=google_cloud_conn_id
)
"""
template_fields = ('source_bucket', 'source_object', 'destination_bucket',
'destination_object',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
destination_bucket=None,
destination_object=None,
move_object=False,
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
last_modified_time=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.move_object = move_object
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.last_modified_time = last_modified_time
def execute(self, context):
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to
)
if self.destination_bucket is None:
self.log.warning(
'destination_bucket is None. Defaulting it to source_bucket (%s)',
self.source_bucket)
self.destination_bucket = self.source_bucket
if WILDCARD in self.source_object:
total_wildcards = self.source_object.count(WILDCARD)
if total_wildcards > 1:
error_msg = "Only one wildcard '*' is allowed in source_object parameter. " \
"Found {} in {}.".format(total_wildcards, self.source_object)
raise AirflowException(error_msg)
prefix, delimiter = self.source_object.split(WILDCARD, 1)
objects = hook.list(self.source_bucket, prefix=prefix, delimiter=delimiter)
for source_object in objects:
if self.destination_object is None:
destination_object = source_object
else:
destination_object = source_object.replace(prefix,
self.destination_object, 1)
self._copy_single_object(hook=hook, source_object=source_object,
destination_object=destination_object)
else:
self._copy_single_object(hook=hook, source_object=self.source_object,
destination_object=self.destination_object)
def _copy_single_object(self, hook, source_object, destination_object):
if self.last_modified_time is not None:
# Check to see if object was modified after last_modified_time
if hook.is_updated_after(self.source_bucket,
source_object,
self.last_modified_time):
self.log.debug("Object has been modified after %s ", self.last_modified_time)
else:
return
self.log.info('Executing copy of gs://%s/%s to gs://%s/%s',
self.source_bucket, source_object,
self.destination_bucket, destination_object)
hook.rewrite(self.source_bucket, source_object,
self.destination_bucket, destination_object)
if self.move_object:
hook.delete(self.source_bucket, source_object)
|
py
|
1a5b87ca8a51f6e892bfea3d50115930e8b50b3d
|
'''
This file will write and compile resume latex file.
'''
import json
import os
import sys
from GetLatex import *
'''
Read init tex file and add content from pre-defined json
args:
filename: filename defined your resume file
js: resume json object
'''
def build(filename,js):
with open(filename,'w') as f:
with open("init.tex") as fin:
for l in fin:
if "Education" in l:
f.write(l)
f.write(getEductionLatex(js["education"]))
elif "Skill" in l:
f.write(l)
f.write(getSkillLatex(js["skillset"]))
elif "Internship" in l:
f.write(l)
f.write(getExperience(js["experience"]))
elif "Projects" in l:
f.write(l)
f.write(getProLatex(js["project"],False))
else:
f.write(l)
'''
main program
Read resume object, copy to template folder, use latex to build and compile it. Then delete some temp files and move your tex and pdf to root folder.
args:
file: json file defines your resume.
'''
def main(file):
jobj = json.load(open(file))
build(jobj["filename"],jobj)
os.system("mv "+jobj["filename"]+" template/")
os.chdir("template")
os.system("xelatex -synctex=1 -interaction=nonstopmode {0}".format(jobj['filename']))
os.system("mv {0} ..".format(jobj["filename"][:-4]+'.pdf'))
os.system("mv {0} ..".format(jobj["filename"][:-4]+'.tex'))
os.system("rm {0}".format(jobj["filename"][:-4]+'.*'))
if __name__ == '__main__':
main(sys.argv[1])
|
py
|
1a5b87f069877cc0dc369e1ab36909a6d1aa1527
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bot that scrapes RSS feeds.
Usage:
Run python3 bot.py --help for help.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import logging
import feedparser
import pytz
import argparse
import datetime as dt
import yaml
from sqlitedict import SqliteDict
from telegram.error import TimedOut
from telegram.ext import Updater
from time import mktime
from utils import get_substring_or_empty
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
SGT = pytz.timezone('Asia/Singapore')
RSS_FROM_THIS_TIMESTAMP = dt.datetime.strptime('20 Mar 2019 2:30PM', '%d %b %Y %I:%M%p').astimezone(SGT)
def update_feeds(context):
job = context.job
bot = context.bot
chat_name, chat_id, seen_urls_dict, feeds = job.context
logger.debug("update_feeds called! chat_name = '%s' | chat_id = '%s'" % (chat_name, chat_id))
for feed in feeds: # feeds is a list of dicts with keys {name, url, disabled}
feed_name = feed['name']
if "disabled" in feed and feed['disabled']:
logger.debug("skipping '%s' since 'disabled' detected" % feed_name)
continue
# HTTP access on feed['url'] and grab the entries
try:
NewsFeed = feedparser.parse(feed['url'])
except:
logger.error("Exception when attempting to read and/or parse '%s' at URL %s", feed_name, feed['url'])
continue
for entry in NewsFeed.entries:
# Ignore if the link has already been seen before
if entry.link in seen_urls_dict:
break
# Ignore if any of the ignored_categories are found in the entry.category
if 'ignored_categories' in feed:
ignored = False
for c in feed['ignored_categories']:
if c in entry.category:
logger.info("Ignored because category = '%s': %s | %s | %s", entry.category, feed_name,
entry.published, entry.link)
ignored = True
# Mark as seen
seen_urls_dict[entry.link] = True
break
if ignored: continue
# Ignore if the published datetime is before RSS_FROM_THIS_TIMESTAMP
published_datetime = dt.datetime.fromtimestamp(mktime(entry.published_parsed)).replace(
tzinfo=pytz.utc).astimezone(SGT)
if published_datetime < RSS_FROM_THIS_TIMESTAMP:
break
budget = get_substring_or_empty(entry['summary'], '<b>Budget</b>: ', '<br /><b>')
hourly_rate = get_substring_or_empty(entry['summary'], '<b>Hourly Range</b>: ', '<br /><b>')
country = get_substring_or_empty(entry['summary'], '<b>Country</b>: ', '<br />')
text = """%s (%s)
%s
[%s]
Fixed: %s
Hourly: %s
From: %s
""" % (
entry.title,
published_datetime.strftime("%Y-%m-%d %H:%M"),
entry.link,
feed_name,
budget,
hourly_rate,
country
)
try:
bot.send_message(chat_id, text=text)
logger.info("Sent to chat '%s': %s | %s | %s", chat_name, feed_name, entry.published, entry.link)
# If this line is reached, then the message has been successfully sent
seen_urls_dict[entry.link] = True
except TimedOut as e:
logger.error("Timeout when attempting to send to chat '%s': %s | %s | %s | %s", chat_name, feed_name,
entry.published, entry.link, str(e))
except Exception as e:
logger.error("Exception when attempting to send to chat '%s': %s | %s | %s | %s", chat_name, feed_name,
entry.published, entry.link, str(e))
def error(bot, update, telegram_error):
logger.warning('Update "%s" caused error "%s"', update, telegram_error)
def main():
# Command line parameters
parser = argparse.ArgumentParser(description='RSS Scraping Telegram Bot')
parser.add_argument('bot_token', action='store', default=None, help="Your bot's token")
# parser.add_argument('chat_id', action='store', default=None, help="The destination channel or chat in the format @channelname")
parser.add_argument('--interval', dest='interval', action='store', type=int, default=60,
help="Interval in seconds to refresh the RSS feeds")
parser.add_argument('--feeds', dest='feeds', action='store', type=str, default='feeds.yaml',
help="YAML file containing chats and feeds")
parser.add_argument('--seendb', dest='seendb', action='store', type=str, default='seen_urls.sqlite',
help="SQLite db for storing seen URLs")
parser.add_argument('--runonce', action='store_true', default=False, help="Scrape once and quit")
args = parser.parse_args()
# Open the "feeds.yaml" config file and read the feeds
with open(args.feeds, 'r') as stream:
try:
feeds_config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("Error while loading %s" % args.feeds)
raise exc
logger.info("RSS Scraping Telegram Bot starting up...")
updater = Updater(args.bot_token, use_context=True)
for chat in feeds_config['chats']:
seen_urls_dict = SqliteDict(args.seendb, autocommit=True, tablename=chat['chat_name'])
if args.runonce:
updater.job_queue.run_once(update_feeds, 0,
context=(chat['chat_name'], chat['chat_id'], seen_urls_dict, chat['feeds']))
else:
updater.job_queue.run_repeating(update_feeds, args.interval,
context=(chat['chat_name'], chat['chat_id'], seen_urls_dict, chat['feeds']))
# Get the dispatcher to register handlers
dp = updater.dispatcher
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or
# SIGABRT. This should be used most of the time, since start_polling() is
# non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
py
|
1a5b87f4e9a65ed44fc4041fa75849aa542397df
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def _get_current_replica_id_as_int():
"""Returns the current replica ID as an integer, or `None`."""
replica_context = ds_context.get_replica_context()
if replica_context:
replica_id = replica_context.replica_id_in_sync_group
if not isinstance(replica_id, int):
replica_id = tensor_util.constant_value(replica_id)
else:
replica_id = distribute_lib.get_update_replica_id()
return replica_id
@tf_export("distribute.DistributedValues", v1=[])
class DistributedValues(object):
"""Base class for representing distributed values.
A subclass instance of DistributedValues is created when creating variables
within a distribution strategy, iterating a `tf.Dataset` or through
`strategy.run`. This base class should never be instantiated
directly. DistributedValues contains a value per replica. Depending on
the subclass, the values could either be synced on update, synced on demand,
or never synced.
DistributedValues can be reduced to obtain single value across replicas,
as input into `run` or the per replica values inspected
using `experimental_local_results`.
Example usage:
1. Created from Dataset:
>>> strategy = tf.distribute.MirroredStrategy()
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
2. Returned by `run`:
>>> strategy = tf.distribute.MirroredStrategy()
>>> @tf.function
... def run():
... ctx = tf.distribute.get_replica_context()
... return ctx.replica_id_in_sync_group
>>> distributed_values = strategy.run(run)
3. As input into `run`:
>>> strategy = tf.distribute.MirroredStrategy()
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
>>> @tf.function
... def run(input):
... return input + 1.0
>>> updated_value = strategy.run(run, args=(distributed_values,))
4. Reduce value:
>>> strategy = tf.distribute.MirroredStrategy()
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
>>> reduced_value = strategy.reduce(tf.distribute.ReduceOp.SUM,
... distributed_values,
... axis = 0)
5. Inspect per replica values:
>>> strategy = tf.distribute.MirroredStrategy()
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> per_replica_values = strategy.experimental_local_results(
... distributed_values)
>>> per_replica_values
(<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([5., 6.], dtype=float32)>,)
"""
def __init__(self, values):
"""Should only be called by subclass __init__."""
self._values = tuple(values)
def _get(self):
"""Returns the value for the current device or raises a ValueError."""
replica_id = _get_current_replica_id_as_int()
if replica_id is None:
return self._get_cross_replica()
else:
return self._values[replica_id]
def _get_cross_replica(self):
raise NotImplementedError(
"This method should be overridden by sub-classes which support cross-"
"replica accesses.")
def _get_closest(self):
"""Returns value in same replica or device if possible, else the _primary."""
replica_id = _get_current_replica_id_as_int()
if replica_id is None:
# Try to find a value on the current device.
current_device = device_util.canonicalize(device_util.current())
for value in self._values:
if device_util.canonicalize(value.device) == current_device:
return value
return self._primary
else:
return self._values[replica_id]
@property
def _primary(self):
"""Returns a representative component."""
return self._values[0]
@property
def _devices(self):
return tuple(v.device for v in self._values)
def __str__(self):
debug_str = ",\n".join(
" %d: %s" % (i, v) for i, v in enumerate(self._values))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
debug_repr = ",\n".join(
" %d: %r" % (i, v) for i, v in enumerate(self._values))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
# NOTE(josh11b,apassos): It would be great if we could inspect the values this was
# initialized with and use that to generate the overloaded operators here.
# Unfortunately, Python's rules for special methods don't allow this, see
# https://docs.python.org/3/reference/datamodel.html#special-method-names
# "if a class defines a method named __getitem__(), and x is an instance of
# this class, then x[i] is roughly equivalent to type(x).__getitem__(x, i)."
# In particular, these special methods don't go through __getattr__, and
# it will only use those methods if they are defined in the class, not the
# object.
class DistributedDelegate(DistributedValues):
"""A map from device to values; acts as the same type as the values."""
def __getattr__(self, name):
# The '_use_resource_variables' and the attrs starts with '_self' are used
# for restoring the saved_model proto, and '_attribute_sentinel' is used for
# Layer tracking. At the point these attrs are queried, the variable has not
# been initialized. Thus it should not query those of the underlying
# components.
if name.startswith("_self_") or name in ("_use_resource_variables",
"_attribute_sentinel",
"_distributed_container"):
return super(DistributedDelegate, self).__getattr__(name)
# TODO(priyag): This needs to be made robust against pitfalls from mix use
# __getattr__ and @property. See b/120402273.
return getattr(self._get(), name)
@property
def values(self):
"""Returns the per replica values."""
return self._values
def _get_as_operand(self):
"""Returns the value for operations for the current device.
Some implementations, e.g. `TPUMirroredVariable`, are not able to return the
value type within a replica context. They can, however, return a value that
can be used by the operations below.
"""
return self._get()
# pylint: disable=multiple-statements
def __add__(self, o):
return self._get_as_operand() + o
def __radd__(self, o):
return o + self._get_as_operand()
def __sub__(self, o):
return self._get_as_operand() - o
def __rsub__(self, o):
return o - self._get_as_operand()
def __mul__(self, o):
return self._get_as_operand() * o
def __rmul__(self, o):
return o * self._get_as_operand()
def __truediv__(self, o):
return self._get_as_operand() / o
def __rtruediv__(self, o):
return o / self._get_as_operand()
def __floordiv__(self, o):
return self._get_as_operand() // o
def __rfloordiv__(self, o):
return o // self._get_as_operand()
def __mod__(self, o):
return self._get_as_operand() % o
def __rmod__(self, o):
return o % self._get_as_operand()
def __lt__(self, o):
return self._get_as_operand() < o
def __le__(self, o):
return self._get_as_operand() <= o
def __gt__(self, o):
return self._get_as_operand() > o
def __ge__(self, o):
return self._get_as_operand() >= o
def __and__(self, o):
return self._get_as_operand() & o
def __rand__(self, o):
return o & self._get_as_operand()
def __or__(self, o):
return self._get_as_operand() | o
def __ror__(self, o):
return o | self._get_as_operand()
def __xor__(self, o):
return self._get_as_operand() ^ o
def __rxor__(self, o):
return o ^ self._get_as_operand()
def __getitem__(self, o):
return self._get_as_operand()[o]
def __pow__(self, o, modulo=None):
return pow(self._get_as_operand(), o, modulo)
def __rpow__(self, o):
return pow(o, self._get_as_operand())
def __invert__(self):
return ~self._get_as_operand()
def __neg__(self):
return -self._get_as_operand()
def __abs__(self):
return abs(self._get_as_operand())
def __div__(self, o):
try:
return self._get_as_operand().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._get_as_operand().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._get_as_operand().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._get_as_operand().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# TODO(josh11b): Even more operator overloads.
class PerReplica(DistributedValues, composite_tensor.CompositeTensor):
"""Holds a map from replica to unsynchronized values."""
@property
def _type_spec(self):
return PerReplicaSpec(
*(type_spec.type_spec_from_value(v) for v in self._values))
@property
def values(self):
"""Returns the per replica values."""
return self._values
class PerReplicaSpec(type_spec.TypeSpec):
"""Type specification for a `PerReplica`."""
__slots__ = ["_value_specs"]
value_type = property(lambda self: PerReplica)
def __init__(self, *value_specs):
self._value_specs = tuple(value_specs)
def _serialize(self):
return self._value_specs
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
replica_context = ds_context.get_replica_context()
if replica_context is not None and replica_context.num_replicas_in_sync > 1:
raise ValueError(
"Flattening a PerReplica to components is not supported in replica "
"context.")
return value._values # pylint: disable=protected-access
def _from_components(self, tensor_list):
return PerReplica(tensor_list)
# Note that unlike PerReplica, Mirrored values inherit from
# DistributedDelegate and so can be used directly in cross-replica mode.
# TODO(tomhennigan) Should this extend CompositeTensor?
class Mirrored(DistributedDelegate):
"""Holds a map from replica to values which are kept in sync."""
def _get_cross_replica(self):
return self._get_closest()
def _as_graph_element(self):
obj = self._get()
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return obj
def _assign_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign(tensor)
def _assign_add_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign_add(tensor)
def _assign_sub_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign_sub(tensor)
class DistributedVarOp(object):
"""A class that looks like `tf.Operation`."""
def __init__(self, name, graph, traceback, typ):
self.name = name
self.graph = graph
self.traceback = traceback
self.type = typ
def __eq__(self, o):
if not isinstance(o, self.__class__):
raise NotImplementedError
return (self.name == o.name and self.graph == o.graph and
self.traceback == o.traceback and self.type == o.type)
def __hash__(self):
return hash((self.name, self.graph, self.traceback, self.type))
class DistributedVariable(DistributedDelegate, variables_lib.Variable):
"""Holds a map from replica to variables."""
# TODO(josh11b): Support changing the set of variables if e.g. if new
# devices are joining or a device is to leave.
def __init__(self, strategy, values, aggregation):
self._distribute_strategy = strategy
self._aggregation = aggregation
super(DistributedVariable, self).__init__(values)
self._common_name = self._primary.name.split(":")[0]
# Use a weakref to make it easy to map from the contained values
# to the container without introducing a reference cycle.
for v in values:
v._distributed_container = weakref.ref(self) # pylint: disable=protected-access
# tf.keras keeps track of variables initialized using this attribute. When
# tf.keras gets the default session, it initializes all uninitialized vars.
# We need to make _keras_initialized a member of DistributedVariable because
# without this it will use `__getattr__` which will delegate to a component
# variable.
self._keras_initialized = False
# Typically, a `DistributedVariable`'s initializer is composed of the
# initializers of the components variables. However, in some cases, such as
# when restoring from a checkpoint, we may set the _initializer_op
# property on the entire `DistributedVariable`.
self._initializer_op = None
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
result = self._primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(
result, self._values[-1].is_initialized(), name=name)
return result
@property
def initializer(self):
if self._initializer_op:
init_op = self._initializer_op
else:
# return grouped ops of all the var initializations of component values of
# the mirrored variable
init_op = control_flow_ops.group(
tuple(v.initializer for v in self._values))
return init_op
def initialized_value(self):
return self._get_closest().initialized_value()
@property
def initial_value(self):
return self._get_closest().initial_value
@property
def constraint(self):
return self._primary.constraint
@property
def graph(self):
return self._primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self._primary._unique_id # pylint: disable=protected-access
@property
def _graph_key(self):
"""Lets Optimizers know which graph this variable is from."""
return self._primary._graph_key # pylint: disable=protected-access
@property
def name(self):
return self._primary.name
@property
def dtype(self):
return self._primary.dtype
@property
def shape(self):
return self._primary.shape
@property
def synchronization(self):
return self._primary.synchronization
@property
def aggregation(self):
return self._aggregation
@property
def handle(self):
replica_id = _get_current_replica_id_as_int()
if replica_id is None:
raise ValueError("`handle` is not available outside the replica context"
" or a `tf.distribute.Strategy.update()` call.")
else:
return self._values[replica_id].handle
def eval(self, session=None):
return self._get_closest().eval(session)
@property
def _save_slice_info(self):
return self._primary._save_slice_info # pylint: disable=protected-access
def _get_save_slice_info(self):
return self._primary._get_save_slice_info() # pylint: disable=protected-access
def _set_save_slice_info(self, save_slice_info):
for v in self._values:
v._set_save_slice_info(save_slice_info) # pylint: disable=protected-access
@property
def device(self):
return self._get_closest().device
@property
def trainable(self):
return self._primary.trainable
@property
def distribute_strategy(self):
return self._distribute_strategy
def get_shape(self):
return self._primary.get_shape()
def to_proto(self, export_scope=None):
return self._primary.to_proto(export_scope=export_scope)
@property
def op(self):
# We want cross-replica code that does some var.op.X calls
# to work (even if the current device isn't in self._devices), but
# other uses of var.op in a cross-replica context to fail.
if ds_context.in_cross_replica_context():
return DistributedVarOp(self._primary.op.name, self._primary.op.graph,
self._primary.op.traceback, self._primary.op.type)
return self._get().op
@property
def _in_graph_mode(self):
return self._primary._in_graph_mode # pylint: disable=protected-access
def read_value(self):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return array_ops.identity(self._get())
def value(self):
return self._get_closest().value()
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
ops.register_dense_tensor_like_type(DistributedVariable)
def _validate_colocate_extended(v, extended):
variable_strategy = v._distribute_strategy # pylint: disable=protected-access
if variable_strategy.extended is not extended:
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not %s created in scope: %s" %
(v, variable_strategy))
def validate_colocate_distributed_variable(v, extended):
if not isinstance(v, DistributedVariable):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def validate_colocate(v, extended):
if not hasattr(v, "_distribute_strategy"):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def _apply_aggregation(strategy, value, aggregation, destinations):
if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return strategy.extended.broadcast_to(
strategy.experimental_local_results(value)[0],
destinations=destinations)
reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation)
return strategy.extended.reduce_to(reduce_op, value, destinations)
_aggregation_error_msg = (
"You must specify an aggregation method to update a "
"{variable_type} in Replica Context. You can do so by passing "
"an explicit value for argument `aggregation` to tf.Variable(..)."
"e.g. `tf.Variable(..., aggregation=tf.VariableAggregation.SUM)`"
"`tf.VariableAggregation` lists the possible aggregation methods."
"This is required because {variable_type} should always be "
"kept in sync. When updating them or assigning to them in a "
"replica context, we automatically try to aggregate the values "
"before updating the variable. For this aggregation, we need to "
"know the aggregation method. "
"Another alternative is to not try to update such "
"{variable_type} in replica context, but in cross replica "
"context. You can enter cross replica context by calling "
"`tf.distribute.get_replica_context().merge_call(merge_fn, ..)`."
"Inside `merge_fn`, you can then update the {variable_type} "
"using `tf.distribute.StrategyExtended.update()`.")
class _MirroredSaveable(saveable_object_util.ResourceVariableSaveable):
"""Class for defining how to restore a MirroredVariable."""
def __init__(self, mirrored_variable, primary_variable, name):
self._mirrored_variable = mirrored_variable
super(_MirroredSaveable, self).__init__(primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return control_flow_ops.group(
tuple(
_assign_on_device(v.device, v, tensor)
for v in self._mirrored_variable.values))
def create_mirrored_variable( # pylint: disable=missing-docstring
strategy, real_mirrored_creator, mirrored_cls, sync_on_read_cls, **kwargs):
# Figure out what collections this variable should be added to.
# We'll add the MirroredVariable to those collections instead.
var_collections = kwargs.pop("collections", None)
if var_collections is None:
var_collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
synchronization = kwargs.get("synchronization",
vs.VariableSynchronization.ON_WRITE)
if synchronization == vs.VariableSynchronization.NONE:
raise ValueError(
"`NONE` variable synchronization mode is not supported with `Mirrored` "
"distribution strategy. Please change the `synchronization` for "
"variable: " + str(kwargs["name"]))
elif synchronization == vs.VariableSynchronization.ON_READ:
is_sync_on_read = True
elif synchronization in (vs.VariableSynchronization.ON_WRITE,
vs.VariableSynchronization.AUTO):
# `AUTO` synchronization defaults to `ON_WRITE`.
is_sync_on_read = False
else:
raise ValueError(
"Invalid variable synchronization mode: %s for variable: %s" %
(synchronization, kwargs["name"]))
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in (vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA):
raise ValueError("Invalid variable aggregation mode: %s for variable: %s" %
(aggregation, kwargs["name"]))
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
value_list = real_mirrored_creator(**kwargs)
var_cls = sync_on_read_cls if is_sync_on_read else mirrored_cls
result = var_cls(strategy, value_list, aggregation)
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
var_collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for value in value_list:
for i, trainable_variable in enumerate(l):
if value is trainable_variable:
del l[i]
break
g.add_to_collections(var_collections, result)
elif ops.GraphKeys.GLOBAL_STEP in var_collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, result)
return result
class MirroredVariable(DistributedVariable, Mirrored):
"""Holds a map from replica to variables whose values are kept in sync."""
def _mirrored_update(self, update_fn, value, **kwargs):
"""Apply identical updates using `update_fn` to variables on each replica."""
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
update_replica_id = distribute_lib.get_update_replica_id()
if update_replica_id is not None:
# We are calling an update function on the mirrored variable in an
# update context.
#
# The arguments to update() are automatically unwrapped so the
# update() function would normally see regular variables, not
# MirroredVariables. However, the update function can still operate on
# wrapped MirroredVariables through object members, captured arguments
# , etc. This is more likely in an update_non_slot() function
# , which can update several non-slot variables in one call.
return update_fn(self._values[update_replica_id], value, **kwargs)
# We are calling update on the mirrored variable in cross replica
# context, use `strategy.extended.update()` to update the variable.
return self._distribute_strategy.extended.update(
self, update_fn, args=(value,), kwargs=kwargs)
else:
_assert_replica_context(self._distribute_strategy)
# We are calling an update function on the mirrored variable in replica
# context.
# We reduce the value we want to update. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function on each of the mirrored variables with the
# reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(
_aggregation_error_msg.format(variable_type="MirroredVariable"))
def merge_fn(strategy, value, **other_kwargs):
"""Aggregate across replicas and update MV with aggregated value."""
# Don't allow MEAN with non float dtype, since it may cause unexpected
# precision loss. Python3 and NumPy automatically upcast integers to
# float in division, but we should always preserve the type.
#
# Note that to be backward compatible we allow the case when the value
# is *always* the same on each replica. I.E. value is not a
# PerReplica. Refer to regroup() to see how values are grouped.
if self._aggregation == vs.VariableAggregation.MEAN and (
not self.dtype.is_floating) and isinstance(value, PerReplica):
raise ValueError(
"Cannot update non-float variables with "
"tf.VariableAggregation.MEAN aggregation in replica context. "
"Either change the variable dtype to float or update it in "
"cross-replica context.")
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, update_fn, args=(v,), kwargs=other_kwargs)
return ds_context.get_replica_context().merge_call(
merge_fn, args=(value,), kwargs=kwargs)
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._mirrored_update(
update_fn=assign_sub_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._mirrored_update(
update_fn=assign_add_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._mirrored_update(
update_fn=assign_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
scatter_sub_fn = lambda var, *a, **kw: var.scatter_sub(*a, **kw)
return self._mirrored_update(
update_fn=scatter_sub_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
scatter_add_fn = lambda var, *a, **kw: var.scatter_add(*a, **kw)
return self._mirrored_update(
update_fn=scatter_add_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
scatter_mul_fn = lambda var, *a, **kw: var.scatter_mul(*a, **kw)
return self._mirrored_update(
update_fn=scatter_mul_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
scatter_div_fn = lambda var, *a, **kw: var.scatter_div(*a, **kw)
return self._mirrored_update(
update_fn=scatter_div_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError("scatter_min is only supported for mirrored "
"variable (variable created within certain "
"`tf.distribute.Strategy` scope) with NONE or "
"`ONLY_FIRST_REPLICA` aggregation, got: %s" %
self._aggregation)
scatter_min_fn = lambda var, *a, **kw: var.scatter_min(*a, **kw)
return self._mirrored_update(
update_fn=scatter_min_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError("scatter_max is only supported for mirrored "
"variable (variable created within certain "
"`tf.distribute.Strategy` scope) with NONE or "
"`ONLY_FIRST_REPLICA` aggregation, got: %s" %
self._aggregation)
scatter_max_fn = lambda var, *a, **kw: var.scatter_max(*a, **kw)
return self._mirrored_update(
update_fn=scatter_max_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError("scatter_update is only supported for mirrored "
"variable (variable created within certain "
"`tf.distribute.Strategy` scope) with NONE or "
"`ONLY_FIRST_REPLICA` aggregation, got: %s" %
self._aggregation)
scatter_update_fn = lambda var, *a, **kw: var.scatter_update(*a, **kw)
return self._mirrored_update(
update_fn=scatter_update_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def _get_cross_replica(self):
# Return identity, to avoid directly exposing the variable to the user and
# allowing it to be modified by mistake.
return array_ops.identity(Mirrored._get_cross_replica(self))
def _as_graph_element(self):
return self._get_closest()._as_graph_element() # pylint: disable=protected-access
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self._primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() call.
if as_ref:
# A TF 1.x case where the variable is a boolean variable and used like:
# tf.cond(v, true_fn, false_fn).
raise ValueError(
"You may be using variable created under distribute strategy in TF "
"1.x control flows. Try explicitly converting the variable to Tensor "
"using variable.read_value(), or switch to TF 2.x.")
return ops.convert_to_tensor(
self._get(), dtype=dtype, name=name, as_ref=as_ref)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(MirroredVariable,
_tensor_conversion_mirrored)
def _tensor_conversion_mirrored_val(value, dtype=None, name=None, as_ref=False):
return ops.convert_to_tensor(
value._get(), dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(Mirrored,
_tensor_conversion_mirrored_val)
def is_distributed_variable(v):
"""Determine if a variable is ds variable or TPU mirrored variable."""
return isinstance(v, DistributedVariable)
class _SyncOnReadSaveable(saveable_object.SaveableObject):
"""Class for defining how to restore a SyncOnReadVariable."""
def __init__(self, sync_on_read_variable, name):
self._sync_on_read_variable = sync_on_read_variable
# We use a callable so that we don't have to evaluate this expression
# in the case where we are trying to restore instead of save.
def tensor():
strategy = sync_on_read_variable._distribute_strategy # pylint: disable=protected-access
return strategy.extended.read_var(sync_on_read_variable)
spec = saveable_object.SaveSpec(
tensor=tensor,
slice_spec="",
name=name,
dtype=sync_on_read_variable.dtype,
device=sync_on_read_variable._primary.device) # pylint: disable=protected-access
super(_SyncOnReadSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
tensor, = restored_tensors
if self._sync_on_read_variable.aggregation == vs.VariableAggregation.SUM:
tensor = math_ops.cast(tensor / len(self._sync_on_read_variable._devices), # pylint: disable=protected-access
self._sync_on_read_variable.dtype)
return control_flow_ops.group(
tuple(
_assign_on_device(v.device, v, tensor)
for v in self._sync_on_read_variable.values))
def _assert_replica_context(strategy):
replica_context = ds_context.get_replica_context()
if not replica_context:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
if replica_context.strategy is not strategy:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
class SyncOnReadVariable(DistributedVariable):
"""Holds a map from replica to variables whose values are reduced on save."""
def assign_sub(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
if self._aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_sub` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return control_flow_ops.group(
tuple(
_assign_sub_on_device(v.device, v, args[0])
for v in self._values))
else:
return self._get().assign_sub(*args, **kwargs)
def assign_add(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
if self._aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_add` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return control_flow_ops.group(
tuple(
_assign_add_on_device(v.device, v, args[0])
for v in self._values))
else:
return self._get().assign_add(*args, **kwargs)
def assign(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
tensor = args[0]
if self._aggregation == vs.VariableAggregation.SUM:
tensor = math_ops.cast(tensor / len(self._values), self.dtype)
return control_flow_ops.group(
tuple(_assign_on_device(v.device, v, tensor) for v in self._values))
else:
return self._get().assign(*args, **kwargs)
def value(self):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
return self._get_cross_replica()
else:
# _get_closest() returns a Variable.
return self._get_closest().value()
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
else:
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
def _get_cross_replica(self):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return self._primary
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return self._distribute_strategy.reduce(
reduce_util.ReduceOp.from_variable_aggregation(self.aggregation),
self,
axis=None)
def _as_graph_element(self):
# pylint: disable=protected-access
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
return ops.convert_to_tensor(self._get_cross_replica())
return self._get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
`SyncOnReadVariable`s.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _SyncOnReadSaveable(self, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return ops.convert_to_tensor(
self._get(), dtype=dtype, name=name, as_ref=as_ref)
# Register a conversion function for SyncOnReadVariable which allows as_ref to
# be true.
def _tensor_conversion_sync_on_read(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(SyncOnReadVariable,
_tensor_conversion_sync_on_read)
def regroup(values, wrap_class=PerReplica, always_wrap=False):
"""Makes a nest per-replica into a nest of PerReplica/Mirrored values.
Args:
values: Values to regroup
wrap_class: Class that `values` be wrapped in.
always_wrap: Always wrap the `values` in `wrap_class` even if the values
are the same except for DistributeVariable.
Returns:
Wrapped `values`.
"""
v0 = values[0]
if isinstance(v0, list):
for v in values[1:]:
assert isinstance(v, list)
assert len(v) == len(v0), ("len(v) == %d, len(v0) == %d, v: %s, v0: %s" %
(len(v), len(v0), v, v0))
return [
regroup(tuple(v[i] for v in values), wrap_class)
for i in range(len(v0))
]
if isinstance(v0, tuple):
for v in values[1:]:
assert isinstance(v, tuple)
assert len(v) == len(v0)
regrouped_tuple = tuple(
regroup(tuple(v[i] for v in values), wrap_class)
for i in range(len(v0)))
if hasattr(v0, "_fields"):
# This tuple is in fact a namedtuple! Create a new namedtuple instance
# and initialize it with the regrouped values:
assert hasattr(type(v0), "_make")
return type(v0)._make(regrouped_tuple)
else:
return regrouped_tuple
if isinstance(v0, dict):
v0keys = v0.keys()
for v in values[1:]:
assert isinstance(v, dict), ("v[0]: %r v[i]: %r" % (v0, v))
assert set(v.keys()) == set(v0keys), ("v[0].keys: %s v[i].keys: %s" %
(set(v0keys), set(v.keys())))
# Use the actual type in case it is a class inherited from a dict.
return type(v0)({
key: regroup(tuple(v[key] for v in values), wrap_class)
for key in v0keys
})
# If exactly the same object across all devices, return it unwrapped.
same_id = True
for v in values[1:]:
if v is not v0:
same_id = False
break
# Consider three cases where same_id is true:
# * If v0 is a DistributedVariable (a MirroredVariable or
# SyncOnReadVariable, and same_id means it is the same across all
# devices), we want to return it. We check DistributedVariable
# specifically since it can look like it has a
# _distributed_container member since its members do.
if same_id and isinstance(v0, DistributedVariable):
return v0
# * If v0 is a member of a distributed variable, in which case
# hasattr(v0, "_distributed_container") is true, we want to
# return the DistributedVariable that contains it using the
# _distributed_container logic below. This case can trigger
# same_id when there is only one device.
# * In any other situation, same_id means we return v0 unless `always_wrap` is
# true.
if same_id and not always_wrap and not hasattr(v0, "_distributed_container"):
return v0
# Detect the case where each device has a parallel component of the
# same MirroredVariable (or SyncOnReadVariable). In this case we
# want to return the containing MirroredVariable, after a bunch of
# sanity checking. In particular, each component should have the
# same container, and the devices of the variables should match the
# keys of the per-replica dictionary.
if hasattr(v0, "_distributed_container"):
# pylint: disable=protected-access
assert not isinstance(v0, MirroredVariable), (
"ids = %s, values = %s" % ([id(v) for v in values], values))
distributed_container = v0._distributed_container()
assert distributed_container is not None
for v in values[1:]:
assert distributed_container is v._distributed_container()
return distributed_container
# pylint: enable=protected-access
return wrap_class(values)
def select_replica(replica_id, structured):
"""Specialize a nest of regular & per-replica values for one replica."""
def _get(x):
# `DistributedValues` would be sliced according to replica unless it is a
# `DistributedVariable` because `DistributedVariable` can be handled
# directly in the replica context.
if (isinstance(x, DistributedVariable) or
not isinstance(x, DistributedValues)):
return x
else:
return x.values[replica_id]
return nest.map_structure(_get, structured)
def select_replica_mirrored(replica_id, structured):
"""Specialize a nest of regular & mirrored values for one replica."""
def _get_mirrored(x):
if isinstance(x, DistributedValues):
if not isinstance(x, Mirrored):
raise TypeError(
"Expected value to be mirrored across replicas: %s in %s." %
(x, structured))
return x.values[replica_id]
else:
return x
return nest.map_structure(_get_mirrored, structured)
def update_regroup(extended, updates, group):
"""Regroup for an update, with dependencies to ensure all updates execute."""
if not group:
regrouped = regroup(updates, Mirrored)
return nest.map_structure(extended._local_results, regrouped) # pylint: disable=protected-access
def _make_grouped_mirrored(values):
"""Convert per-replica list `values` into Mirrored type with grouping."""
if len(values) == 1:
return Mirrored(values)
# Make sure we run all updates. Without this, something like
# session.run(extended.update(...)) may only update one replica.
g = control_flow_ops.group(values)
# If values is just ops, the grouping is enough. Everything in values
# should have the same type, since we expect every replica to be performing
# the same computation.
if not all(tensor_util.is_tensor(v) for v in values):
return g
# Otherwise we need tensors with the same values as `values`, but
# that have a dependency on `g`.
with_dep = []
for v in values:
with ops.device(v.device), ops.control_dependencies([g]):
with_dep.append(array_ops.identity(v))
return Mirrored(with_dep)
return regroup(updates, _make_grouped_mirrored)
def value_container(val):
"""Returns the container that this per-replica `value` belongs to.
Args:
val: A value returned by `call_for_each_replica()` or a variable created in
`scope()`.
Returns:
A container that `value` belongs to.
If value does not belong to any container (including the case of
container having been destroyed), returns the value itself.
"""
if (hasattr(val, "_distributed_container") and
# DistributedVariable has _distributed_container defined
# but we don't want to return it.
not isinstance(val, DistributedVariable)):
container = val._distributed_container() # pylint: disable=protected-access
if container is not None:
return container
return val
class AggregatingVariable(variables_lib.Variable):
"""A wrapper around a variable that aggregates updates across replicas."""
def __init__(self, strategy, v, aggregation):
self._distribute_strategy = strategy
self._v = v
# NOTE: We don't use "_distributed_container" here because we don't want
# to trigger that code path in regroup().
v._aggregating_container = weakref.ref(self) # pylint: disable=protected-access
self._aggregation = aggregation
def get(self):
return self._v
@property
def distribute_strategy(self):
return self._distribute_strategy
def __getattr__(self, name):
return getattr(self._v, name)
def _assign_func(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if ds_context.in_cross_replica_context():
if distribute_lib.get_update_replica_id() is not None:
# We are calling an assign function in an update context.
return f(self._v, *args, **kwargs)
# We are calling an assign function in cross replica context, wrap it in
# an update call.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
replica_context = ds_context.get_replica_context()
assert replica_context
# We are calling an assign function in replica context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function with the reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(
_aggregation_error_msg.format(
variable_type="AggregatingVariable"))
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def initializer(self):
return self._v.initializer
def initialized_value(self):
return self._v.initialized_value()
@property
def initial_value(self):
return self._v.initial_value
@property
def op(self):
return self._v.op
def read_value(self):
return self._v.read_value()
def eval(self, session=None):
return self._v.eval(session)
@property
def graph(self):
return self._v.graph
@property
def device(self):
return self._v.device
@property
def shape(self):
return self._v.shape
@property
def aggregation(self):
return self._aggregation
@property
def synchronization(self):
return self._v.synchronization
@property
def name(self):
return self._v.name
@property
def trainable(self):
return self._v.trainable
@property
def dtype(self):
return self._v.dtype
# TODO(josh11b): Test saving & restoring.
def _gather_saveables_for_checkpoint(self):
return {trackable.VARIABLE_VALUE_KEY: self._v}
# pylint: disable=multiple-statements
def __add__(self, o):
return self._v + o
def __radd__(self, o):
return o + self._v
def __sub__(self, o):
return self._v - o
def __rsub__(self, o):
return o - self._v
def __mul__(self, o):
return self._v * o
def __rmul__(self, o):
return o * self._v
def __truediv__(self, o):
return self._v / o
def __rtruediv__(self, o):
return o / self._v
def __floordiv__(self, o):
return self._v // o
def __rfloordiv__(self, o):
return o // self._v
def __mod__(self, o):
return self._v % o
def __rmod__(self, o):
return o % self._v
def __lt__(self, o):
return self._v < o
def __le__(self, o):
return self._v <= o
def __gt__(self, o):
return self._v > o
def __ge__(self, o):
return self._v >= o
def __and__(self, o):
return self._v & o
def __rand__(self, o):
return o & self._v
def __or__(self, o):
return self._v | o
def __ror__(self, o):
return o | self._v
def __xor__(self, o):
return self._v ^ o
def __rxor__(self, o):
return o ^ self._v
def __getitem__(self, o):
return self._v[o]
def __pow__(self, o, modulo=None):
return pow(self._v, o, modulo)
def __rpow__(self, o):
return pow(o, self._v)
def __invert__(self):
return ~self._v
def __neg__(self):
return -self._v
def __abs__(self):
return abs(self._v)
def __div__(self, o):
try:
return self._v.__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._v.__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._v.__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._v.__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __str__(self):
return str(self._v)
def __repr__(self):
return repr(self._v)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
return ops.convert_to_tensor(self.get(), dtype=dtype, name=name,
as_ref=as_ref)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype, name, as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(AggregatingVariable,
_tensor_conversion_aggregate)
ops.register_dense_tensor_like_type(AggregatingVariable)
|
py
|
1a5b8852bc53a19fd3daec6c2e445a8d5c3c59ce
|
import ethereum.utils as utils
canary_addresses = map(utils.decode_hex, [
'539dd9aaf45c3feb03f9c004f4098bd3268fef6b', # Gav
'c8158da0b567a8cc898991c2c2a073af67dc03a9', # Vitalik
'959c33de5961820567930eccce51ea715c496f85', # Jeff
'7a19a893f91d5b6e2cdf941b6acbba2cbcf431ee' # Christoph
])
|
py
|
1a5b8879eb8668ceb74002344904409db37cf190
|
from __future__ import print_function, division
import numpy as np
import pytest
import itertools
import os.path
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage.io import imread
from skimage import data_dir
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = imread(os.path.join(data_dir, "phantom.png"),
as_grey=True)[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1, multichannel=False)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def check_radon_center(shape, circle):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=np.float)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
def test_radon_center():
shapes = [(16, 16), (17, 17)]
circles = [False, True]
for shape, circle in itertools.product(shapes, circles):
yield check_radon_center, shape, circle
rectangular_shapes = [(32, 16), (33, 17)]
for shape in rectangular_shapes:
yield check_radon_center, shape, False
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=np.float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=np.float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
def test_iradon_center():
sizes = [16, 17]
thetas = [0, 90]
circles = [False, True]
for size, theta, circle in itertools.product(sizes, thetas, circles):
yield check_iradon_center, size, theta, circle
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image, circle=False), filter=filter_type,
interpolation=interpolation_type)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
def test_radon_iradon():
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
for interpolation_type, filter_type in \
itertools.product(interpolation_types, filter_types):
yield check_radon_iradon, interpolation_type, filter_type
# cubic interpolation is slow; only run one test for it
yield check_radon_iradon, 'cubic', 'shepp-logan'
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
theta = np.linspace(0, 180, nb_angles, endpoint=False)
radon_image_200 = radon(image, theta=theta, circle=False)
reconstructed = iradon(radon_image_200, circle=False)
delta_200 = np.mean(abs(_rescale_intensity(image) -
_rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=theta, circle=False)
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80, circle=False)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=np.float)
image[slices] = 1.
sinogram = radon(image, theta, circle=False)
reconstructed = iradon(sinogram, theta, circle=False)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
def test_radon_iradon_minimal():
shapes = [(3, 3), (4, 4), (5, 5)]
for shape in shapes:
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
for coordinate in coordinates:
yield check_radon_iradon_minimal, shape, coordinate
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2], circle=False)
iradon(p, theta=[0, 1, 2], circle=False)
with pytest.raises(ValueError):
iradon(p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
def argmax_shape(a):
return np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square) ==
argmax_shape(sinogram_circle_to_square))
def test_sinogram_circle_to_square():
for size in (50, 51):
yield check_sinogram_circle_to_square, size
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
def test_radon_iradon_circle():
shape = (61, 79)
interpolations = ('nearest', 'linear')
output_sizes = (None, min(shape), max(shape), 97)
for interpolation, output_size in itertools.product(interpolations,
output_sizes):
yield check_radon_iradon_circle, interpolation, shape, output_size
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8, mode='reflect')
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack(np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
py
|
1a5b8b682e22a6705d47f3689d1140b887dcfd36
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pod_status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from resource_monitor_sdk.model.container import container_status_pb2 as resource__monitor__sdk_dot_model_dot_container_dot_container__status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pod_status.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x10pod_status.proto\x12\tcontainer\x1a;resource_monitor_sdk/model/container/container_status.proto\"\xbc\x01\n\tPodStatus\x12\r\n\x05phase\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x39\n\x15initContainerStatuses\x18\x03 \x03(\x0b\x32\x1a.container.ContainerStatus\x12\x35\n\x11\x63ontainerStatuses\x18\x04 \x03(\x0b\x32\x1a.container.ContainerStatus\x12\x0e\n\x06hostIP\x18\x05 \x01(\t\x12\r\n\x05podIP\x18\x06 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[resource__monitor__sdk_dot_model_dot_container_dot_container__status__pb2.DESCRIPTOR,])
_PODSTATUS = _descriptor.Descriptor(
name='PodStatus',
full_name='container.PodStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phase', full_name='container.PodStatus.phase', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='container.PodStatus.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initContainerStatuses', full_name='container.PodStatus.initContainerStatuses', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='containerStatuses', full_name='container.PodStatus.containerStatuses', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostIP', full_name='container.PodStatus.hostIP', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='podIP', full_name='container.PodStatus.podIP', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=281,
)
_PODSTATUS.fields_by_name['initContainerStatuses'].message_type = resource__monitor__sdk_dot_model_dot_container_dot_container__status__pb2._CONTAINERSTATUS
_PODSTATUS.fields_by_name['containerStatuses'].message_type = resource__monitor__sdk_dot_model_dot_container_dot_container__status__pb2._CONTAINERSTATUS
DESCRIPTOR.message_types_by_name['PodStatus'] = _PODSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PodStatus = _reflection.GeneratedProtocolMessageType('PodStatus', (_message.Message,), {
'DESCRIPTOR' : _PODSTATUS,
'__module__' : 'pod_status_pb2'
# @@protoc_insertion_point(class_scope:container.PodStatus)
})
_sym_db.RegisterMessage(PodStatus)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py
|
1a5b8bf891beb62cfef0262a4359125e2a7ed596
|
#!/usr/bin/env python3
# plot_shapely.py
# %%
from dataclasses import dataclass
from typing import List, Tuple, Set
import matplotlib.pyplot as plt
from aray.problem import Problem
from shapely.geometry import Polygon, Point, LineString
'''
Datastructures we want to have
point: integer pair, not the shapely kind
delta: integer pair, difference between two points
edge: integer pair, indexes into a point or vertex list
segment: point pair
problem data:
hole: list of points (form a polygon)
vertices: list of points
edges: list of edges, indexes into vertices
computed data:
points: sorted list of valid points
edge_dists: list of edge distances, corresponds to edges
dist_deltas: map from dist to a list of deltas
delta_forbidden: map from delta to a list of forbidden segments
'''
# %%
def get_points(hole):
poly = Polygon(hole)
min_x, min_y, max_x, max_y = poly.bounds
points = []
for x in range(int(min_x), int(max_x) + 1):
for y in range(int(min_y), int(max_y) + 1):
if poly.intersects(Point(x, y)):
points.append((x, y))
return sorted(points)
def get_forbidden(hole, delta):
poly = Polygon(hole)
forbidden = []
for a in points:
b = (a[0] + delta[0], a[1] + delta[1])
if b not in points:
continue
ab = LineString((a, b))
if poly.contains(ab) or ab.within(poly):
continue
elif poly.exterior.crosses(ab):
forbidden.append((a, b))
elif poly.touches(ab) and not poly.exterior.contains(ab):
forbidden.append((a, b))
return forbidden
def get_deltas(d_old: int, epsilon: int) -> List[Tuple[int, int]]:
deltas = []
n = int(d_old ** 0.5 + 1) * 2
for x in range(-n, n + 1):
for y in range(-n, n + 1):
d_new = dsq((0, 0), (x, y))
if abs(d_new / d_old - 1) <= epsilon / 1e6:
deltas.append((x, y))
return deltas
fig, ax = plt.subplots()
problem = Problem.get(14)
problem.plot(fig, ax)
points = get_points(problem.hole)
xs = [p[0] for p in points]
ys = [p[1] for p in points]
ax.scatter(xs, ys)
forbidden = get_forbidden(problem.hole, (-1, -1))
for a, b in forbidden:
ax.plot((a[0], b[0]), (a[1], b[1]))
forbidden
# %%
problem = Problem.get(14)
vert = problem.vertices
def dsq(a, b): return (a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2
edge_dsq = [dsq(vert[i], vert[j]) for i, j in problem.edges]
epsilon = problem.epsilon
for d_old in sorted(set(edge_dsq)):
print(d_old, edge_dsq.count(d_old))
deltas = get_deltas(d_old, epsilon)
print('d', deltas)
fig, ax = plt.subplots()
ax.grid(True)
# set x and y ticks
ax.set_xticks(range(-n, n + 1))
ax.set_yticks(range(-n, n + 1))
ax.scatter([p[0] for p in deltas], [p[1] for p in deltas])
|
py
|
1a5b8ce9e13f93b3c585ede083fc3fd387114e43
|
#!/usr/bin/env python2
# coding: utf-8
"""Test ONLY_ON_SYMBOLIZED."""
import unittest
from triton import ARCH, MODE, CPUSIZE, TritonContext, Instruction, MemoryAccess
def checkAstIntegrity(instruction):
"""
This function check if all ASTs under an Instruction class are still
available.
"""
try:
for se in instruction.getSymbolicExpressions():
str(se.getAst())
for x, y in instruction.getLoadAccess():
str(y)
for x, y in instruction.getStoreAccess():
str(y)
for x, y in instruction.getReadRegisters():
str(y)
for x, y in instruction.getWrittenRegisters():
str(y)
for x, y in instruction.getReadImmediates():
str(y)
return True
except:
return False
class TestOnlySymbolizedMode(unittest.TestCase):
"""Testing the ONLY_ON_SYMBOLIZED mode."""
def test_1(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.enableMode(MODE.ONLY_ON_SYMBOLIZED, False)
inst = Instruction("\x48\x89\xc3") # mov rbx, rax
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(len(inst.getReadRegisters()), 1)
self.assertEqual(len(inst.getWrittenRegisters()), 2)
ctx.enableMode(MODE.ONLY_ON_SYMBOLIZED, True)
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(len(inst.getReadRegisters()), 0)
self.assertEqual(len(inst.getWrittenRegisters()), 0)
self.assertEqual(len(inst.getLoadAccess()), 0)
self.assertEqual(len(inst.getStoreAccess()), 0)
def test_2(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.enableMode(MODE.ONLY_ON_SYMBOLIZED, True)
ctx.convertRegisterToSymbolicVariable(ctx.registers.rax)
inst = Instruction("\x48\x89\xc3") # mov rbx, rax
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(len(inst.getReadRegisters()), 1)
self.assertEqual(len(inst.getWrittenRegisters()), 1)
self.assertEqual(len(inst.getLoadAccess()), 0)
self.assertEqual(len(inst.getStoreAccess()), 0)
def test_3(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
inst = Instruction("\x48\x8b\x18") # mov rbx, qword ptr [rax]
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(len(inst.getReadRegisters()), 1)
self.assertEqual(len(inst.getWrittenRegisters()), 2)
self.assertEqual(len(inst.getLoadAccess()), 1)
self.assertEqual(len(inst.getStoreAccess()), 0)
def test_4(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.enableMode(MODE.ONLY_ON_SYMBOLIZED, True)
ctx.convertRegisterToSymbolicVariable(ctx.registers.rax)
inst = Instruction("\x48\x8b\x18") # mov rbx, qword ptr [rax]
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(len(inst.getReadRegisters()), 1)
self.assertEqual(len(inst.getWrittenRegisters()), 0)
self.assertEqual(len(inst.getLoadAccess()), 0)
self.assertEqual(len(inst.getStoreAccess()), 0)
def test_5(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.enableMode(MODE.ONLY_ON_SYMBOLIZED, True)
ctx.convertMemoryToSymbolicVariable(MemoryAccess(0, CPUSIZE.QWORD))
inst = Instruction("\x48\x8b\x18") # mov rbx, qword ptr [rax]
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(len(inst.getReadRegisters()), 0)
self.assertEqual(len(inst.getWrittenRegisters()), 1)
self.assertEqual(len(inst.getLoadAccess()), 1)
self.assertEqual(len(inst.getStoreAccess()), 0)
def test_6(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.enableMode(MODE.ONLY_ON_SYMBOLIZED, True)
ctx.convertRegisterToSymbolicVariable(ctx.registers.rax)
ctx.convertMemoryToSymbolicVariable(MemoryAccess(0, CPUSIZE.QWORD))
inst = Instruction("\x48\x8b\x18") # mov rbx, qword ptr [rax]
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(len(inst.getReadRegisters()), 1)
self.assertEqual(len(inst.getWrittenRegisters()), 1)
self.assertEqual(len(inst.getLoadAccess()), 1)
self.assertEqual(len(inst.getStoreAccess()), 0)
def test_7(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.enableMode(MODE.ONLY_ON_SYMBOLIZED, True)
ctx.setConcreteRegisterValue(ctx.registers.rax, 0x1337)
inst = Instruction("\x48\x8b\x18") # mov rbx, qword ptr [rax]
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(inst.getOperands()[1].getAddress(), 0x1337)
self.assertIsNone(inst.getOperands()[1].getLeaAst())
def test_8(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.enableMode(MODE.ONLY_ON_SYMBOLIZED, True)
ctx.setConcreteRegisterValue(ctx.registers.rax, 0x1337)
ctx.convertRegisterToSymbolicVariable(ctx.registers.rax)
ctx.convertMemoryToSymbolicVariable(MemoryAccess(0, CPUSIZE.QWORD))
inst = Instruction("\x48\x8b\x18") # mov rbx, qword ptr [rax]
self.assertTrue(ctx.processing(inst))
self.assertTrue(checkAstIntegrity(inst))
self.assertEqual(inst.getOperands()[1].getAddress(), 0x1337)
self.assertIsNotNone(inst.getOperands()[1].getLeaAst())
|
py
|
1a5b8dd089352a597e4e5b01b027e3bd9942e40e
|
import ctypes
from vivsys.common import *
class ServiceControlManager:
def __init__(self):
self.hScm = None
def __enter__(self):
self.hScm = advapi32.OpenSCManagerW(None, None, SC_MANAGER_CREATE_SERVICE)
if not self.hScm:
raise ctypes.WinError()
return self
def __exit__(self, exc, ex, tb):
advapi32.CloseServiceHandle(self.hScm)
self.hScm = None
def _req_with(self):
if self.hScm == None:
raise Exception('ServiceControlManager not in with block!')
def openService(self, name, access=SERVICE_ALL_ACCESS):
'''
Retrieve a Service object for the given service name.
Example:
with ServiceControlManager() as scm:
with scm.openService('woot') as svc:
dostuff(svc)
'''
self._req_with()
hSvc = advapi32.OpenServiceW(self.hScm, name, access)
if not hSvc:
raise ctypes.WinError()
return Service(self.hScm, hSvc)
def createDriverService(self, path, name):
'''
Helper method for creation of driver services.
'''
self._req_with()
hSvc = advapi32.CreateServiceW(self.hScm,
name,
None,
SERVICE_START | DELETE | SERVICE_STOP,
SERVICE_KERNEL_DRIVER,
SERVICE_DEMAND_START,
SERVICE_ERROR_IGNORE,
path,
None, NULL, None, None, None)
if not hSvc:
raise ctypes.WinError()
return Service(self.hScm,hSvc)
def isServiceName(self, name):
'''
Return True/False if a service (by name) exists.
'''
self._req_with()
retval = False
hSvc = advapi32.OpenServiceW(self.hScm, name, SERVICE_ALL_ACCESS)
if hSvc:
retval = True
advapi32.CloseServiceHandle(hSvc)
return retval
class Service:
'''
An object to minimally wrap the Windows Service APIs
which are needed for loading/unloading drivers.
'''
def __init__(self, hScm, hSvc):
self.hScm = hScm
self.hSvc = hSvc
self.inwith = False
def __enter__(self):
self.inwith = True
return self
def __exit__(self, exc, ex, tb):
self.close()
def close(self):
advapi32.CloseServiceHandle(self.hSvc)
self.hSvc = None
def _req_with(self):
if not self.inwith:
raise Exception('Service not in with block!')
def getServiceStatus(self):
'''
Returns a SERVICE_STATUS structure for the service.
'''
self._req_with()
status = SERVICE_STATUS()
if not advapi32.QueryServiceStatus(self.hSvc, ctypes.byref(status)):
raise ctypes.WinError()
return status
def delService(self):
'''
Delete the service.
Example:
scm = ServiceControlManager()
with scm:
with scm.openService('woot') as svc:
svc.delService()
'''
self._req_with()
if not advapi32.DeleteService(self.hSvc):
err = ctypes.WinError()
if ERROR_SERVICE_MARKED_FOR_DELETE != err.winerror:
raise err
def stopService(self):
'''
Stop the service ( if running ).
'''
self._req_with()
status = self.getServiceStatus()
if status.dwCurrentState == SERVICE_RUNNING:
if not advapi32.ControlService(self.hSvc, SERVICE_CONTROL_STOP, ctypes.byref(status)):
raise ctypes.WinError()
return status
def startService(self):
'''
Start the service.
'''
self._req_with()
if not advapi32.StartServiceW(self.hSvc, 0, NULL):
err = ctypes.WinError()
if ERROR_SERVICE_ALREADY_RUNNING != err.winerror:
raise err
|
py
|
1a5b8fd843897f82d1a66d1235691c4e608b57d5
|
#CODE2---For calculating pathway details----
#Python 3.6.5 |Anaconda, Inc.
import sys
import glob
import errno
import csv
#path = '/home/16AT72P01/Excelra/SMPDB/output/metabolic_proteins.csv'
path = '/home/16AT72P01/Excelra/SMPDB/output/metabolics.csv'
files = glob.glob(path1)
unique_pathway = set()
with open(path) as f1:
reader = csv.DictReader(f1, quotechar='"', delimiter='\t')
print(reader)
for row in reader:
#unique_pathway.add(row['PATHWAY_NAME'])
unique_pathway.add(row['PATHWAY_NAME'])
f1.close()
print(len(unique_pathway))
|
py
|
1a5b929ffd9f0ad22d5e12471e8b4812fe66a63b
|
# Natural Language Toolkit: Zen Chatbot
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Amy Holland <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Zen Chatbot talks in gems of Zen wisdom.
This is a sample conversation with Zen Chatbot:
ZC: Welcome, my child.
me: Good afternoon.
ZC: Ask the question you have come to ask.
me: How can I achieve enlightenment?
ZC: How do you suppose?
me: Through meditation.
ZC: Form is emptiness, and emptiness form.
me: How can I empty my mind of worldly troubles?
ZC: Will an answer to that really help in your search for enlightenment?
me: Yes.
ZC: It is better to be right than to be certain.
me: I seek truth and wisdom.
ZC: The search for truth is a long journey.
me: Are you sure?
ZC: Maybe sure, maybe not sure.
The chatbot structure is based on that of chat.eliza. Thus, it uses
a translation table to convert from question to response
i.e. "I am" --> "you are"
Of course, since Zen Chatbot does not understand the meaning of any words,
responses are very limited. Zen Chatbot will usually answer very vaguely, or
respond to a question by asking a different question, in much the same way
as Eliza.
"""
from __future__ import print_function
from cnltk.chat.util import Chat, reflections
# responses are matched top to bottom, so non-specific matches occur later
# for each match, a list of possible responses is provided
responses = (
# Zen Chatbot opens with the line "Welcome, my child." The usual
# response will be a greeting problem: 'good' matches "good morning",
# "good day" etc, but also "good grief!" and other sentences starting
# with the word 'good' that may not be a greeting
(r'(hello(.*))|(good [a-zA-Z]+)',
( "The path to enlightenment is often difficult to see.",
"Greetings. I sense your mind is troubled. Tell me of your troubles.",
"Ask the question you have come to ask.",
"Hello. Do you seek englightenment?")),
# "I need" and "I want" can be followed by a thing (eg 'help')
# or an action (eg 'to see you')
#
# This is a problem with this style of response -
# person: "I need you"
# chatbot: "me can be achieved by hard work and dedication of the mind"
# i.e. 'you' is not really a thing that can be mapped this way, so this
# interpretation only makes sense for some inputs
#
(r'i need (.*)',
( "%1 can be achieved by hard work and dedication of the mind.",
"%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
"Focus your mind on%1, and you will find what you need.")),
(r'i want (.*)',
( "Desires of the heart will distract you from the path to enlightenment.",
"Will%1 help you attain enlightenment?",
"Is%1 a desire of the mind, or of the heart?")),
# why questions are separated into three types:
# "why..I" e.g. "why am I here?" "Why do I like cake?"
# "why..you" e.g. "why are you here?" "Why won't you tell me?"
# "why..." e.g. "Why is the sky blue?"
# problems:
# person: "Why can't you tell me?"
# chatbot: "Are you sure I tell you?"
# - this style works for positives (e.g. "why do you like cake?")
# but does not work for negatives (e.g. "why don't you like cake?")
(r'why (.*) i (.*)\?',
( "You%1%2?",
"Perhaps you only think you%1%2")),
(r'why (.*) you(.*)\?',
( "Why%1 you%2?",
"%2 I%1",
"Are you sure I%2?")),
(r'why (.*)\?',
( "I cannot tell you why%1.",
"Why do you think %1?" )),
# e.g. "are you listening?", "are you a duck"
(r'are you (.*)\?',
( "Maybe%1, maybe not%1.",
"Whether I am%1 or not is God's business.")),
# e.g. "am I a duck?", "am I going to die?"
(r'am i (.*)\?',
( "Perhaps%1, perhaps not%1.",
"Whether you are%1 or not is not for me to say.")),
# what questions, e.g. "what time is it?"
# problems:
# person: "What do you want?"
# chatbot: "Seek truth, not what do me want."
(r'what (.*)\?',
( "Seek truth, not what%1.",
"What%1 should not concern you.")),
# how questions, e.g. "how do you do?"
(r'how (.*)\?',
( "How do you suppose?",
"Will an answer to that really help in your search for enlightenment?",
"Ask yourself not how, but why.")),
# can questions, e.g. "can you run?", "can you come over here please?"
(r'can you (.*)\?',
( "I probably can, but I may not.",
"Maybe I can%1, and maybe I cannot.",
"I can do all, and I can do nothing.")),
# can questions, e.g. "can I have some cake?", "can I know truth?"
(r'can i (.*)\?',
( "You can%1 if you believe you can%1, and have a pure spirit.",
"Seek truth and you will know if you can%1.")),
# e.g. "It is raining" - implies the speaker is certain of a fact
(r'it is (.*)',
( "How can you be certain that%1, when you do not even know yourself?",
"Whether it is%1 or not does not change the way the world is.")),
# e.g. "is there a doctor in the house?"
(r'is there (.*)\?',
( "There is%1 if you believe there is.",
"It is possible that there is%1.")),
# e.g. "is it possible?", "is this true?"
(r'is(.*)\?',
( "%1 is not relevant.",
"Does this matter?")),
# non-specific question
(r'(.*)\?',
( "Do you think %1?",
"You seek the truth. Does the truth seek you?",
"If you intentionally pursue the answers to your questions, the answers become hard to see.",
"The answer to your question cannot be told. It must be experienced.")),
# expression of hate of form "I hate you" or "Kelly hates cheese"
(r'(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)',
( "Perhaps it is not about hating %2, but about hate from within.",
"Weeds only grow when we dislike them",
"Hate is a very strong emotion.")),
# statement containing the word 'truth'
(r'(.*) truth(.*)',
( "Seek truth, and truth will seek you.",
"Remember, it is not the spoon which bends - only yourself.",
"The search for truth is a long journey.")),
# desire to do an action
# e.g. "I want to go shopping"
(r'i want to (.*)',
( "You may %1 if your heart truly desires to.",
"You may have to %1.")),
# desire for an object
# e.g. "I want a pony"
(r'i want (.*)',
( "Does your heart truly desire %1?",
"Is this a desire of the heart, or of the mind?")),
# e.g. "I can't wait" or "I can't do this"
(r'i can\'t (.*)',
( "What we can and can't do is a limitation of the mind.",
"There are limitations of the body, and limitations of the mind.",
"Have you tried to%1 with a clear mind?")),
# "I think.." indicates uncertainty. e.g. "I think so."
# problem: exceptions...
# e.g. "I think, therefore I am"
(r'i think (.*)',
( "Uncertainty in an uncertain world.",
"Indeed, how can we be certain of anything in such uncertain times.",
"Are you not, in fact, certain that%1?")),
# "I feel...emotions/sick/light-headed..."
(r'i feel (.*)',
( "Your body and your emotions are both symptoms of your mind."
"What do you believe is the root of such feelings?",
"Feeling%1 can be a sign of your state-of-mind.")),
# exclaimation mark indicating emotion
# e.g. "Wow!" or "No!"
(r'(.*)!',
( "I sense that you are feeling emotional today.",
"You need to calm your emotions.")),
# because [statement]
# e.g. "because I said so"
(r'because (.*)',
( "Does knowning the reasons behind things help you to understand"
" the things themselves?",
"If%1, what else must be true?")),
# yes or no - raise an issue of certainty/correctness
(r'(yes)|(no)',
( "Is there certainty in an uncertain world?",
"It is better to be right than to be certain.")),
# sentence containing word 'love'
(r'(.*)love(.*)',
( "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
"Free love!")),
# sentence containing word 'understand' - r
(r'(.*)understand(.*)',
( "If you understand, things are just as they are;"
" if you do not understand, things are just as they are.",
"Imagination is more important than knowledge.")),
# 'I', 'me', 'my' - person is talking about themself.
# this breaks down when words contain these - eg 'Thyme', 'Irish'
(r'(.*)(me )|( me)|(my)|(mine)|(i)(.*)',
( "'I', 'me', 'my'... these are selfish expressions.",
"Have you ever considered that you might be a selfish person?",
"Try to consider others, not just yourself.",
"Think not just of yourself, but of others.")),
# 'you' starting a sentence
# e.g. "you stink!"
(r'you (.*)',
( "My path is not of conern to you.",
"I am but one, and you but one more.")),
# say goodbye with some extra Zen wisdom.
(r'exit',
( "Farewell. The obstacle is the path.",
"Farewell. Life is a journey, not a destination.",
"Good bye. We are cups, constantly and quietly being filled."
"\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.")),
# fall through case -
# when stumped, respond with generic zen wisdom
#
(r'(.*)',
( "When you're enlightened, every word is wisdom.",
"Random talk is useless.",
"The reverse side also has a reverse side.",
"Form is emptiness, and emptiness is form.",
"I pour out a cup of water. Is the cup empty?"))
)
zen_chatbot = Chat(responses, reflections)
def zen_chat():
print('*'*75)
print("Zen Chatbot!".center(75))
print('*'*75)
print('"Look beyond mere words and letters - look into your mind"'.center(75))
print("* Talk your way to truth with Zen Chatbot.")
print("* Type 'quit' when you have had enough.")
print('*'*75)
print("Welcome, my child.")
zen_chatbot.converse()
def demo():
zen_chat()
if __name__ == "__main__":
demo()
|
py
|
1a5b933c89e48e68bd725a6fb40545e2f68ce35a
|
# test reading a given number of characters
def do(mode):
if mode == "rb":
enc = None
else:
enc = "utf-8"
f = open("unicode/data/utf-8_2.txt", mode=mode, encoding=enc)
print(f.read(1))
print(f.read(1))
print(f.read(2))
print(f.read(4))
# skip to end of line
f.readline()
# check 3-byte utf-8 char
print(f.read(1 if mode == "rt" else 3))
# check 4-byte utf-8 char
print(f.read(1 if mode == "rt" else 4))
f.close()
do("rb")
do("rt")
|
py
|
1a5b936639c9d6bfe70a6854d43a7d0b643aa6ca
|
import pandas as pd
from dash import Dash
import dash_html_components as html
import dash_core_components as dcc
import dash_dependencies as ddp
app = Dash(__name__)
|
py
|
1a5b936aaa9477cdee446982d293d2233b2692e4
|
#!/usr/bin/env python
# coding: utf-8
# This file is a part of `qal`.
#
# Copyright (c) 2021, University of Nebraska Board of Regents.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import ABCMeta, abstractmethod
class Publication(metaclass=ABCMeta):
def __init__(self, identifier, title, authors, year):
self.search_terms = {}
self.identifier = identifier
self.title = title
self.authors = authors
self.year = year
def add_search_terms(self, source, search_terms):
if source in self.search_terms.keys():
self.search_terms[source].append(search_terms)
else:
self.search_terms[source] = [search_terms]
def asdict(self):
return {'identifier': self.identifier,
'title': self.title,
'authors': self.authors,
'year': self.year,
'search_terms': self.search_terms}
@abstractmethod
def venue(self):
raise NotImplementedError("Must define venue getter.")
class Article(Publication):
def __init__(self, identifier, title, authors, year, journal, volume, issue, abstract=None, pages=None):
super().__init__(identifier, title, authors, year)
self.journal = journal
self.volume = volume
self.issue = issue
self.abstract = abstract
self.pages = pages
def venue(self):
return self.journal
def asdict(self):
d = super().asdict()
d['journal'] = self.journal
d['volume'] = self.volume
d['issue'] = self.issue
d['abstract'] = self.abstract
d['pages'] = self.pages
return d
class Conference(Publication):
def __init__(self, identifier, title, authors, year, book_title, conference, abstract=None, pages=None):
super().__init__(identifier, title, authors, year)
self.book_title = book_title
self.conference = conference
self.abstract = abstract
self.pages = pages
def venue(self):
if self.conference:
return self.conference
else:
return self.book_title
def asdict(self):
d = super().asdict()
d['book_title'] = self.book_title
d['conference'] = self.conference
d['abstract'] = self.abstract
d['pages'] = self.pages
return d
class Book(Publication):
def __init__(self, identifier, title, authors, year, abstract=None):
super().__init__(identifier, title, authors, year)
self.abstract = abstract
def venue(self):
return self.title
def asdict(self):
d = super().asdict()
d['abstract'] = self.abstract
class BookChapter(Publication):
def __init__(self, identifier, title, authors, year, book_title, abstract=None, pages=None):
super().__init__(identifier, title, authors, year)
self.book_title = book_title
self.abstract = abstract
self.pages = pages
def venue(self):
return self.book_title
def asdict(self):
d = super().asdict()
d['book_title'] = self.book_title
d['abstract'] = self.abstract
d['pages'] = self.pages
return d
|
py
|
1a5b95b4051b36d8ec834b9d7440acc1165b7690
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
def is_valid_python_version():
version_valid = False
ver = sys.version_info
if (2 == ver.major) and (7 <= ver.minor):
version_valid = True
if (3 == ver.major) and (4 <= ver.minor):
version_valid = True
return version_valid
def python_short_ver_str():
ver = sys.version_info
return "%s.%s" % (ver.major, ver.minor)
def are_deps_installed():
installed = False
try:
import peewee
import bitcoinrpc.authproxy
import simplejson
installed = True
except ImportError as e:
print("[error]: Missing dependencies")
return installed
def is_database_correctly_configured():
import peewee
import config
configured = False
cannot_connect_message = "Cannot connect to database. Please ensure database service is running and user access is properly configured in 'sentinel.conf'."
try:
db = config.db
db.connect()
configured = True
except (peewee.ImproperlyConfigured, peewee.OperationalError, ImportError) as e:
print("[error]: %s" % e)
print(cannot_connect_message)
sys.exit(1)
return configured
def has_brixcoin_conf():
import config
import io
valid_brixcoin_conf = False
# ensure brixcoin_conf exists & readable
#
# if not, print a message stating that Brixcoin Core must be installed and
# configured, including JSONRPC access in brixcoin.conf
try:
f = io.open(config.brixcoin_conf)
valid_brixcoin_conf = True
except IOError as e:
print(e)
return valid_brixcoin_conf
# === begin main
def main():
install_instructions = "\tpip install -r requirements.txt"
if not is_valid_python_version():
print("Python %s is not supported" % python_short_ver_str())
sys.exit(1)
if not are_deps_installed():
print("Please ensure all dependencies are installed:")
print(install_instructions)
sys.exit(1)
if not is_database_correctly_configured():
print("Please ensure correct database configuration.")
sys.exit(1)
if not has_brixcoin_conf():
print("BrixcoinCore must be installed and configured, including JSONRPC access in brixcoin.conf")
sys.exit(1)
main()
|
py
|
1a5b95bc2779fcee0b0b5bb2b0a4fa62d30a507f
|
from PEPit import PEP
from PEPit.functions import SmoothStronglyConvexFunction
def wc_polyak_steps_in_function_value(L, mu, gamma, verbose=1):
"""
Consider the minimization problem
.. math:: f_\\star \\triangleq \\min_x f(x),
where :math:`f` is :math:`L`-smooth and :math:`\\mu`-strongly convex, and :math:`x_\\star=\\arg\\min_x f(x)`.
This code computes a worst-case guarantee for a variant of a **gradient method** relying on **Polyak step-sizes**.
That is, it computes the smallest possible :math:`\\tau(L, \\mu, \\gamma)` such that the guarantee
.. math:: f(x_{t+1}) - f_\\star \\leqslant \\tau(L, \\mu, \\gamma) (f(x_t) - f_\\star)
is valid, where :math:`x_t` is the output of the gradient method with PS and :math:`\\gamma` is the effective value
of the step-size of the gradient method.
In short, for given values of :math:`L`, :math:`\\mu`, and :math:`\\gamma`, :math:`\\tau(L, \\mu, \\gamma)` is computed as the worst-case
value of :math:`f(x_{t+1})-f_\\star` when :math:`f(x_t)-f_\\star \\leqslant 1`.
**Algorithm**:
Gradient descent is described by
.. math:: x_{t+1} = x_t - \\gamma \\nabla f(x_t),
where :math:`\\gamma` is a step-size. The Polyak step-size rule under consideration here corresponds to choosing
of :math:`\\gamma` satisfying:
.. math:: \\|\\nabla f(x_t)\\|^2 = 2 L (2 - L \\gamma) (f(x_t) - f_\\star).
**Theoretical guarantee**:
The gradient method with the variant of Polyak step-sizes under consideration enjoys the
**tight** theoretical guarantee [1, Proposition 2]:
.. math:: f(x_{t+1})-f_\\star \\leqslant \\tau(L,\\mu,\\gamma) (f(x_{t})-f_\\star),
where :math:`\\gamma` is the effective step-size used at iteration :math:`t` and
.. math::
:nowrap:
\\begin{eqnarray}
\\tau(L,\\mu,\\gamma) & = & \\left\\{\\begin{array}{ll} (\\gamma L - 1) (L \\gamma (3 - \\gamma (L + \\mu)) - 1) & \\text{if } \\gamma\in[\\tfrac{1}{L},\\tfrac{2L-\mu}{L^2}],\\\\
0 & \\text{otherwise.} \\end{array}\\right.
\\end{eqnarray}
**References**:
`[1] M. Barré, A. Taylor, A. d’Aspremont (2020). Complexity guarantees for Polyak steps with momentum.
In Conference on Learning Theory (COLT).
<https://arxiv.org/pdf/2002.00915.pdf>`_
Args:
L (float): the smoothness parameter.
mu (float): the strong convexity parameter.
gamma (float): the step-size.
verbose (int): Level of information details to print.
-1: No verbose at all.
0: This example's output.
1: This example's output + PEPit information.
2: This example's output + PEPit information + CVXPY details.
Returns:
pepit_tau (float): worst-case value
theoretical_tau (float): theoretical value
Example:
>>> L = 1
>>> mu = 0.1
>>> gamma = 2 / (L + mu)
>>> pepit_tau, theoretical_tau = wc_polyak_steps_in_function_value(L=L, mu=mu, gamma=gamma, verbose=1)
(PEPit) Setting up the problem: size of the main PSD matrix: 4x4
(PEPit) Setting up the problem: performance measure is minimum of 1 element(s)
(PEPit) Setting up the problem: initial conditions (2 constraint(s) added)
(PEPit) Setting up the problem: interpolation conditions for 1 function(s)
function 1 : 6 constraint(s) added
(PEPit) Compiling SDP
(PEPit) Calling SDP solver
(PEPit) Solver status: optimal (solver: SCS); optimal value: 0.6694215432773613
*** Example file: worst-case performance of Polyak steps ***
PEPit guarantee: f(x_1) - f_* <= 0.669422 (f(x_0) - f_*)
Theoretical guarantee: f(x_1) - f_* <= 0.669421 (f(x_0) - f_*)
"""
# Instantiate PEP
problem = PEP()
# Declare a smooth convex function
func = problem.declare_function(SmoothStronglyConvexFunction, L=L, mu=mu)
# Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_*
xs = func.stationary_point()
fs = func.value(xs)
# Then define the starting point x0 of the algorithm as well as corresponding gradient and function value gn and fn
x0 = problem.set_initial_point()
g0, f0 = func.oracle(x0)
# Set the initial condition to the distance betwenn x0 and xs
problem.set_initial_condition(f0 - fs <= 1)
# Set the initial condition to the Polyak step-size
problem.add_constraint(g0 ** 2 == 2 * L * (2 - L * gamma) * (f0 - fs))
# Run the Polayk steps at iteration 1
x1 = x0 - gamma * g0
g1, f1 = func.oracle(x1)
# Set the performance metric to the distance in function values between x_1 and x_* = xs
problem.set_performance_metric(f1 - fs)
# Solve the PEP
pepit_verbose = max(verbose, 0)
pepit_tau = problem.solve(verbose=pepit_verbose)
# Compute theoretical guarantee (for comparison)
if 1/L <= gamma <= (2 * L - mu)/L**2:
theoretical_tau = (gamma * L - 1) * (L * gamma * (3 - gamma * (L + mu)) - 1)
else:
theoretical_tau = 0.
# Print conclusion if required
if verbose != -1:
print('*** Example file: worst-case performance of Polyak steps ***')
print('\tPEPit guarantee:\t f(x_1) - f_* <= {:.6} (f(x_0) - f_*) '.format(pepit_tau))
print('\tTheoretical guarantee:\t f(x_1) - f_* <= {:.6} (f(x_0) - f_*)'.format(theoretical_tau))
# Return the worst-case guarantee of the evaluated method (and the reference theoretical value)
return pepit_tau, theoretical_tau
if __name__ == "__main__":
L = 1
mu = 0.1
gamma = 2 / (L + mu)
pepit_tau, theoretical_tau = wc_polyak_steps_in_function_value(L=L, mu=mu, gamma=gamma, verbose=1)
|
py
|
1a5b9820fa2e2c320ab7b617e82cb2689c7ec044
|
import csv
import itertools
import json
import os
import threading
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import wait
from pathlib import Path
from subprocess import CalledProcessError
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Union
from cleo.io.null_io import NullIO
from poetry.core.packages.file_dependency import FileDependency
from poetry.core.packages.package import Package
from poetry.core.packages.utils.link import Link
from poetry.core.packages.utils.utils import url_to_path
from poetry.core.pyproject.toml import PyProjectTOML
from poetry.utils._compat import decode
from poetry.utils.env import EnvCommandError
from poetry.utils.helpers import safe_rmtree
from poetry.utils.pip import pip_editable_install
from ..utils.authenticator import Authenticator
from ..utils.pip import pip_install
from .chef import Chef
from .chooser import Chooser
from .operations.install import Install
from .operations.operation import Operation
from .operations.uninstall import Uninstall
from .operations.update import Update
if TYPE_CHECKING:
from cleo.io.io import IO # noqa
from poetry.config.config import Config
from poetry.repositories import Pool
from poetry.utils.env import Env
from .operations import OperationTypes
class Executor:
def __init__(
self,
env: "Env",
pool: "Pool",
config: "Config",
io: "IO",
parallel: bool = None,
) -> None:
self._env = env
self._io = io
self._dry_run = False
self._enabled = True
self._verbose = False
self._authenticator = Authenticator(config, self._io)
self._chef = Chef(config, self._env)
self._chooser = Chooser(pool, self._env)
if parallel is None:
parallel = config.get("installer.parallel", True)
if parallel:
# This should be directly handled by ThreadPoolExecutor
# however, on some systems the number of CPUs cannot be determined
# (it raises a NotImplementedError), so, in this case, we assume
# that the system only has one CPU.
try:
self._max_workers = os.cpu_count() + 4
except NotImplementedError:
self._max_workers = 5
else:
self._max_workers = 1
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
self._total_operations = 0
self._executed_operations = 0
self._executed = {"install": 0, "update": 0, "uninstall": 0}
self._skipped = {"install": 0, "update": 0, "uninstall": 0}
self._sections = dict()
self._lock = threading.Lock()
self._shutdown = False
self._hashes: Dict[str, str] = {}
@property
def installations_count(self) -> int:
return self._executed["install"]
@property
def updates_count(self) -> int:
return self._executed["update"]
@property
def removals_count(self) -> int:
return self._executed["uninstall"]
def supports_fancy_output(self) -> bool:
return self._io.output.is_decorated() and not self._dry_run
def disable(self) -> "Executor":
self._enabled = False
return self
def dry_run(self, dry_run: bool = True) -> "Executor":
self._dry_run = dry_run
return self
def verbose(self, verbose: bool = True) -> "Executor":
self._verbose = verbose
return self
def pip_install(
self, req: Union[Path, Link], upgrade: bool = False, editable: bool = False
) -> int:
func = pip_install
if editable:
func = pip_editable_install
try:
func(req, self._env, upgrade=upgrade)
except EnvCommandError as e:
output = decode(e.e.output)
if (
"KeyboardInterrupt" in output
or "ERROR: Operation cancelled by user" in output
):
return -2
raise
return 0
def execute(self, operations: List["OperationTypes"]) -> int:
self._total_operations = len(operations)
for job_type in self._executed:
self._executed[job_type] = 0
self._skipped[job_type] = 0
if operations and (self._enabled or self._dry_run):
self._display_summary(operations)
# We group operations by priority
groups = itertools.groupby(operations, key=lambda o: -o.priority)
self._sections = dict()
for _, group in groups:
tasks = []
serial_operations = []
for operation in group:
if self._shutdown:
break
# Some operations are unsafe, we must execute them serially in a group
# https://github.com/python-poetry/poetry/issues/3086
# https://github.com/python-poetry/poetry/issues/2658
#
# We need to explicitly check source type here, see:
# https://github.com/python-poetry/poetry-core/pull/98
is_parallel_unsafe = operation.job_type == "uninstall" or (
operation.package.develop
and operation.package.source_type in {"directory", "git"}
)
if not operation.skipped and is_parallel_unsafe:
serial_operations.append(operation)
continue
tasks.append(self._executor.submit(self._execute_operation, operation))
try:
wait(tasks)
for operation in serial_operations:
wait([self._executor.submit(self._execute_operation, operation)])
except KeyboardInterrupt:
self._shutdown = True
if self._shutdown:
# Cancelling further tasks from being executed
[task.cancel() for task in tasks]
self._executor.shutdown(wait=True)
break
return 1 if self._shutdown else 0
def _write(self, operation: "OperationTypes", line: str) -> None:
if not self.supports_fancy_output() or not self._should_write_operation(
operation
):
return
if self._io.is_debug():
with self._lock:
section = self._sections[id(operation)]
section.write_line(line)
return
with self._lock:
section = self._sections[id(operation)]
section.clear()
section.write(line)
def _execute_operation(self, operation: "OperationTypes") -> None:
try:
if self.supports_fancy_output():
if id(operation) not in self._sections:
if self._should_write_operation(operation):
with self._lock:
self._sections[id(operation)] = self._io.section()
self._sections[id(operation)].write_line(
" <fg=blue;options=bold>•</> {message}: <fg=blue>Pending...</>".format(
message=self.get_operation_message(operation),
),
)
else:
if self._should_write_operation(operation):
if not operation.skipped:
self._io.write_line(
" <fg=blue;options=bold>•</> {message}".format(
message=self.get_operation_message(operation),
),
)
else:
self._io.write_line(
" <fg=default;options=bold,dark>•</> {message}: "
"<fg=default;options=bold,dark>Skipped</> "
"<fg=default;options=dark>for the following reason:</> "
"<fg=default;options=bold,dark>{reason}</>".format(
message=self.get_operation_message(operation),
reason=operation.skip_reason,
)
)
try:
result = self._do_execute_operation(operation)
except EnvCommandError as e:
if e.e.returncode == -2:
result = -2
else:
raise
# If we have a result of -2 it means a KeyboardInterrupt
# in the any python subprocess, so we raise a KeyboardInterrupt
# error to be picked up by the error handler.
if result == -2:
raise KeyboardInterrupt
except Exception as e:
try:
from cleo.ui.exception_trace import ExceptionTrace
if not self.supports_fancy_output():
io = self._io
else:
message = (
" <error>•</error> {message}: <error>Failed</error>".format(
message=self.get_operation_message(operation, error=True),
)
)
self._write(operation, message)
io = self._sections.get(id(operation), self._io)
with self._lock:
trace = ExceptionTrace(e)
trace.render(io)
io.write_line("")
finally:
with self._lock:
self._shutdown = True
except KeyboardInterrupt:
try:
message = " <warning>•</warning> {message}: <warning>Cancelled</warning>".format(
message=self.get_operation_message(operation, warning=True),
)
if not self.supports_fancy_output():
self._io.write_line(message)
else:
self._write(operation, message)
finally:
with self._lock:
self._shutdown = True
def _do_execute_operation(self, operation: "OperationTypes") -> int:
method = operation.job_type
operation_message = self.get_operation_message(operation)
if operation.skipped:
if self.supports_fancy_output():
self._write(
operation,
" <fg=default;options=bold,dark>•</> {message}: "
"<fg=default;options=bold,dark>Skipped</> "
"<fg=default;options=dark>for the following reason:</> "
"<fg=default;options=bold,dark>{reason}</>".format(
message=operation_message,
reason=operation.skip_reason,
),
)
self._skipped[operation.job_type] += 1
return 0
if not self._enabled or self._dry_run:
self._io.write_line(
" <fg=blue;options=bold>•</> {message}".format(
message=operation_message,
)
)
return 0
result = getattr(self, f"_execute_{method}")(operation)
if result != 0:
return result
message = " <fg=green;options=bold>•</> {message}".format(
message=self.get_operation_message(operation, done=True),
)
self._write(operation, message)
self._increment_operations_count(operation, True)
return result
def _increment_operations_count(
self, operation: "OperationTypes", executed: bool
) -> None:
with self._lock:
if executed:
self._executed_operations += 1
self._executed[operation.job_type] += 1
else:
self._skipped[operation.job_type] += 1
def run_pip(self, *args: Any, **kwargs: Any) -> int:
try:
self._env.run_pip(*args, **kwargs)
except EnvCommandError as e:
output = decode(e.e.output)
if (
"KeyboardInterrupt" in output
or "ERROR: Operation cancelled by user" in output
):
return -2
raise
return 0
def get_operation_message(
self,
operation: "OperationTypes",
done: bool = False,
error: bool = False,
warning: bool = False,
) -> str:
base_tag = "fg=default"
operation_color = "c2"
source_operation_color = "c2"
package_color = "c1"
if error:
operation_color = "error"
elif warning:
operation_color = "warning"
elif done:
operation_color = "success"
if operation.skipped:
base_tag = "fg=default;options=dark"
operation_color += "_dark"
source_operation_color += "_dark"
package_color += "_dark"
if operation.job_type == "install":
return "<{}>Installing <{}>{}</{}> (<{}>{}</>)</>".format(
base_tag,
package_color,
operation.package.name,
package_color,
operation_color,
operation.package.full_pretty_version,
)
if operation.job_type == "uninstall":
return "<{}>Removing <{}>{}</{}> (<{}>{}</>)</>".format(
base_tag,
package_color,
operation.package.name,
package_color,
operation_color,
operation.package.full_pretty_version,
)
if operation.job_type == "update":
return "<{}>Updating <{}>{}</{}> (<{}>{}</{}> -> <{}>{}</>)</>".format(
base_tag,
package_color,
operation.initial_package.name,
package_color,
source_operation_color,
operation.initial_package.full_pretty_version,
source_operation_color,
operation_color,
operation.target_package.full_pretty_version,
)
return ""
def _display_summary(self, operations: List["OperationTypes"]) -> None:
installs = 0
updates = 0
uninstalls = 0
skipped = 0
for op in operations:
if op.skipped:
skipped += 1
continue
if op.job_type == "install":
installs += 1
elif op.job_type == "update":
updates += 1
elif op.job_type == "uninstall":
uninstalls += 1
if not installs and not updates and not uninstalls and not self._verbose:
self._io.write_line("")
self._io.write_line("No dependencies to install or update")
return
self._io.write_line("")
self._io.write_line(
"<b>Package operations</b>: "
"<info>{}</> install{}, "
"<info>{}</> update{}, "
"<info>{}</> removal{}"
"{}".format(
installs,
"" if installs == 1 else "s",
updates,
"" if updates == 1 else "s",
uninstalls,
"" if uninstalls == 1 else "s",
f", <info>{skipped}</> skipped" if skipped and self._verbose else "",
)
)
self._io.write_line("")
def _execute_install(self, operation: Union[Install, Update]) -> int:
status_code = self._install(operation)
self._save_url_reference(operation)
return status_code
def _execute_update(self, operation: Union[Install, Update]) -> int:
status_code = self._update(operation)
self._save_url_reference(operation)
return status_code
def _execute_uninstall(self, operation: Uninstall) -> int:
message = (
" <fg=blue;options=bold>•</> {message}: <info>Removing...</info>".format(
message=self.get_operation_message(operation),
)
)
self._write(operation, message)
return self._remove(operation)
def _install(self, operation: Union[Install, Update]) -> int:
package = operation.package
if package.source_type == "directory":
return self._install_directory(operation)
if package.source_type == "git":
return self._install_git(operation)
if package.source_type == "file":
archive = self._prepare_file(operation)
elif package.source_type == "url":
archive = self._download_link(operation, Link(package.source_url))
else:
archive = self._download(operation)
operation_message = self.get_operation_message(operation)
message = (
" <fg=blue;options=bold>•</> {message}: <info>Installing...</info>".format(
message=operation_message,
)
)
self._write(operation, message)
return self.pip_install(archive, upgrade=operation.job_type == "update")
def _update(self, operation: Union[Install, Update]) -> int:
return self._install(operation)
def _remove(self, operation: Uninstall) -> int:
package = operation.package
# If we have a VCS package, remove its source directory
if package.source_type == "git":
src_dir = self._env.path / "src" / package.name
if src_dir.exists():
safe_rmtree(str(src_dir))
try:
return self.run_pip("uninstall", package.name, "-y")
except CalledProcessError as e:
if "not installed" in str(e):
return 0
raise
def _prepare_file(self, operation: Union[Install, Update]) -> Path:
package = operation.package
message = (
" <fg=blue;options=bold>•</> {message}: <info>Preparing...</info>".format(
message=self.get_operation_message(operation),
)
)
self._write(operation, message)
archive = Path(package.source_url)
if not Path(package.source_url).is_absolute() and package.root_dir:
archive = package.root_dir / archive
archive = self._chef.prepare(archive)
return archive
def _install_directory(self, operation: Union[Install, Update]) -> int:
from poetry.factory import Factory
package = operation.package
operation_message = self.get_operation_message(operation)
message = (
" <fg=blue;options=bold>•</> {message}: <info>Building...</info>".format(
message=operation_message,
)
)
self._write(operation, message)
if package.root_dir:
req = package.root_dir / package.source_url
else:
req = Path(package.source_url).resolve(strict=False)
pyproject = PyProjectTOML(os.path.join(req, "pyproject.toml"))
if pyproject.is_poetry_project():
# Even if there is a build system specified
# some versions of pip (< 19.0.0) don't understand it
# so we need to check the version of pip to know
# if we can rely on the build system
legacy_pip = (
self._env.pip_version
< self._env.pip_version.__class__.from_parts(19, 0, 0)
)
package_poetry = Factory().create_poetry(pyproject.file.path.parent)
if package.develop and not package_poetry.package.build_script:
from poetry.masonry.builders.editable import EditableBuilder
# This is a Poetry package in editable mode
# we can use the EditableBuilder without going through pip
# to install it, unless it has a build script.
builder = EditableBuilder(package_poetry, self._env, NullIO())
builder.build()
return 0
elif legacy_pip or package_poetry.package.build_script:
from poetry.core.masonry.builders.sdist import SdistBuilder
# We need to rely on creating a temporary setup.py
# file since the version of pip does not support
# build-systems
# We also need it for non-PEP-517 packages
builder = SdistBuilder(package_poetry)
with builder.setup_py():
if package.develop:
return self.pip_install(req, editable=True)
return self.pip_install(req, upgrade=True)
if package.develop:
return self.pip_install(req, editable=True)
return self.pip_install(req, upgrade=True)
def _install_git(self, operation: Union[Install, Update]) -> int:
from poetry.core.vcs import Git
package = operation.package
operation_message = self.get_operation_message(operation)
message = (
" <fg=blue;options=bold>•</> {message}: <info>Cloning...</info>".format(
message=operation_message,
)
)
self._write(operation, message)
src_dir = self._env.path / "src" / package.name
if src_dir.exists():
safe_rmtree(str(src_dir))
src_dir.parent.mkdir(exist_ok=True)
git = Git()
git.clone(package.source_url, src_dir)
reference = package.source_resolved_reference
if not reference:
reference = package.source_reference
git.checkout(reference, src_dir)
# Now we just need to install from the source directory
original_url = package.source_url
package._source_url = str(src_dir)
status_code = self._install_directory(operation)
package._source_url = original_url
return status_code
def _download(self, operation: Union[Install, Update]) -> Link:
link = self._chooser.choose_for(operation.package)
return self._download_link(operation, link)
def _download_link(self, operation: Union[Install, Update], link: Link) -> Link:
package = operation.package
archive = self._chef.get_cached_archive_for_link(link)
if archive is link:
# No cached distributions was found, so we download and prepare it
try:
archive = self._download_archive(operation, link)
except BaseException:
cache_directory = self._chef.get_cache_directory_for_link(link)
cached_file = cache_directory.joinpath(link.filename)
# We can't use unlink(missing_ok=True) because it's not available
# in pathlib2 for Python 2.7
if cached_file.exists():
cached_file.unlink()
raise
# TODO: Check readability of the created archive
if not link.is_wheel:
archive = self._chef.prepare(archive)
if package.files:
archive_hash = self._validate_archive_hash(archive, package)
self._hashes[package.name] = archive_hash
return archive
@staticmethod
def _validate_archive_hash(archive: Union[Path, Link], package: Package) -> str:
archive_path = (
url_to_path(archive.url) if isinstance(archive, Link) else archive
)
file_dep = FileDependency(
package.name,
archive_path,
)
archive_hash = "sha256:" + file_dep.hash()
known_hashes = {f["hash"] for f in package.files}
if archive_hash not in known_hashes:
raise RuntimeError(
f"Hash for {package} from archive {archive_path.name} not found in known hashes (was: {archive_hash})"
)
return archive_hash
def _download_archive(self, operation: Union[Install, Update], link: Link) -> Path:
response = self._authenticator.request(
"get", link.url, stream=True, io=self._sections.get(id(operation), self._io)
)
wheel_size = response.headers.get("content-length")
operation_message = self.get_operation_message(operation)
message = (
" <fg=blue;options=bold>•</> {message}: <info>Downloading...</>".format(
message=operation_message,
)
)
progress = None
if self.supports_fancy_output():
if wheel_size is None:
self._write(operation, message)
else:
from cleo.ui.progress_bar import ProgressBar
progress = ProgressBar(
self._sections[id(operation)], max=int(wheel_size)
)
progress.set_format(message + " <b>%percent%%</b>")
if progress:
with self._lock:
progress.start()
done = 0
archive = self._chef.get_cache_directory_for_link(link) / link.filename
archive.parent.mkdir(parents=True, exist_ok=True)
with archive.open("wb") as f:
for chunk in response.iter_content(chunk_size=4096):
if not chunk:
break
done += len(chunk)
if progress:
with self._lock:
progress.set_progress(done)
f.write(chunk)
if progress:
with self._lock:
progress.finish()
return archive
def _should_write_operation(self, operation: Operation) -> bool:
return not operation.skipped or self._dry_run or self._verbose
def _save_url_reference(self, operation: "OperationTypes") -> None:
"""
Create and store a PEP-610 `direct_url.json` file, if needed.
"""
if operation.job_type not in {"install", "update"}:
return
package = operation.package
if not package.source_url:
# Since we are installing from our own distribution cache
# pip will write a `direct_url.json` file pointing to the cache
# distribution.
# That's not what we want so we remove the direct_url.json file,
# if it exists.
for (
direct_url_json
) in self._env.site_packages.find_distribution_direct_url_json_files(
distribution_name=package.name, writable_only=True
):
# We can't use unlink(missing_ok=True) because it's not always available
if direct_url_json.exists():
direct_url_json.unlink()
return
url_reference = None
if package.source_type == "git":
url_reference = self._create_git_url_reference(package)
elif package.source_type == "url":
url_reference = self._create_url_url_reference(package)
elif package.source_type == "directory":
url_reference = self._create_directory_url_reference(package)
elif package.source_type == "file":
url_reference = self._create_file_url_reference(package)
if url_reference:
for dist in self._env.site_packages.distributions(
name=package.name, writable_only=True
):
dist._path.joinpath("direct_url.json").write_text(
json.dumps(url_reference),
encoding="utf-8",
)
record = dist._path.joinpath("RECORD")
if record.exists():
with record.open(mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(
[
str(
dist._path.joinpath("direct_url.json").relative_to(
record.parent.parent
)
),
"",
"",
]
)
def _create_git_url_reference(
self, package: "Package"
) -> Dict[str, Union[str, Dict[str, str]]]:
reference = {
"url": package.source_url,
"vcs_info": {
"vcs": "git",
"requested_revision": package.source_reference,
"commit_id": package.source_resolved_reference,
},
}
return reference
def _create_url_url_reference(
self, package: "Package"
) -> Dict[str, Union[str, Dict[str, str]]]:
archive_info = {}
if package.name in self._hashes:
archive_info["hash"] = self._hashes[package.name]
reference = {"url": package.source_url, "archive_info": archive_info}
return reference
def _create_file_url_reference(
self, package: "Package"
) -> Dict[str, Union[str, Dict[str, str]]]:
archive_info = {}
if package.name in self._hashes:
archive_info["hash"] = self._hashes[package.name]
reference = {
"url": Path(package.source_url).as_uri(),
"archive_info": archive_info,
}
return reference
def _create_directory_url_reference(
self, package: "Package"
) -> Dict[str, Union[str, Dict[str, str]]]:
dir_info = {}
if package.develop:
dir_info["editable"] = True
reference = {
"url": Path(package.source_url).as_uri(),
"dir_info": dir_info,
}
return reference
|
py
|
1a5b98d878314f00121821f069977718114416d7
|
# coding: utf-8
from __future__ import absolute_import
# import models into model package
from huaweicloudsdkkms.v1.model.action_resources import ActionResources
from huaweicloudsdkkms.v1.model.api_link import ApiLink
from huaweicloudsdkkms.v1.model.api_version_detail import ApiVersionDetail
from huaweicloudsdkkms.v1.model.batch_create_kms_tags_request import BatchCreateKmsTagsRequest
from huaweicloudsdkkms.v1.model.batch_create_kms_tags_request_body import BatchCreateKmsTagsRequestBody
from huaweicloudsdkkms.v1.model.batch_create_kms_tags_response import BatchCreateKmsTagsResponse
from huaweicloudsdkkms.v1.model.cancel_grant_request import CancelGrantRequest
from huaweicloudsdkkms.v1.model.cancel_grant_response import CancelGrantResponse
from huaweicloudsdkkms.v1.model.cancel_key_deletion_request import CancelKeyDeletionRequest
from huaweicloudsdkkms.v1.model.cancel_key_deletion_response import CancelKeyDeletionResponse
from huaweicloudsdkkms.v1.model.cancel_self_grant_request import CancelSelfGrantRequest
from huaweicloudsdkkms.v1.model.cancel_self_grant_response import CancelSelfGrantResponse
from huaweicloudsdkkms.v1.model.create_datakey_request import CreateDatakeyRequest
from huaweicloudsdkkms.v1.model.create_datakey_request_body import CreateDatakeyRequestBody
from huaweicloudsdkkms.v1.model.create_datakey_response import CreateDatakeyResponse
from huaweicloudsdkkms.v1.model.create_datakey_without_plaintext_request import CreateDatakeyWithoutPlaintextRequest
from huaweicloudsdkkms.v1.model.create_datakey_without_plaintext_response import CreateDatakeyWithoutPlaintextResponse
from huaweicloudsdkkms.v1.model.create_grant_request import CreateGrantRequest
from huaweicloudsdkkms.v1.model.create_grant_request_body import CreateGrantRequestBody
from huaweicloudsdkkms.v1.model.create_grant_response import CreateGrantResponse
from huaweicloudsdkkms.v1.model.create_key_request import CreateKeyRequest
from huaweicloudsdkkms.v1.model.create_key_request_body import CreateKeyRequestBody
from huaweicloudsdkkms.v1.model.create_key_response import CreateKeyResponse
from huaweicloudsdkkms.v1.model.create_kms_tag_request import CreateKmsTagRequest
from huaweicloudsdkkms.v1.model.create_kms_tag_request_body import CreateKmsTagRequestBody
from huaweicloudsdkkms.v1.model.create_kms_tag_response import CreateKmsTagResponse
from huaweicloudsdkkms.v1.model.create_parameters_for_import_request import CreateParametersForImportRequest
from huaweicloudsdkkms.v1.model.create_parameters_for_import_response import CreateParametersForImportResponse
from huaweicloudsdkkms.v1.model.create_random_request import CreateRandomRequest
from huaweicloudsdkkms.v1.model.create_random_response import CreateRandomResponse
from huaweicloudsdkkms.v1.model.create_secret_request import CreateSecretRequest
from huaweicloudsdkkms.v1.model.create_secret_request_body import CreateSecretRequestBody
from huaweicloudsdkkms.v1.model.create_secret_response import CreateSecretResponse
from huaweicloudsdkkms.v1.model.create_secret_version_request import CreateSecretVersionRequest
from huaweicloudsdkkms.v1.model.create_secret_version_request_body import CreateSecretVersionRequestBody
from huaweicloudsdkkms.v1.model.create_secret_version_response import CreateSecretVersionResponse
from huaweicloudsdkkms.v1.model.decrypt_data_request import DecryptDataRequest
from huaweicloudsdkkms.v1.model.decrypt_data_request_body import DecryptDataRequestBody
from huaweicloudsdkkms.v1.model.decrypt_data_response import DecryptDataResponse
from huaweicloudsdkkms.v1.model.decrypt_datakey_request import DecryptDatakeyRequest
from huaweicloudsdkkms.v1.model.decrypt_datakey_request_body import DecryptDatakeyRequestBody
from huaweicloudsdkkms.v1.model.decrypt_datakey_response import DecryptDatakeyResponse
from huaweicloudsdkkms.v1.model.delete_imported_key_material_request import DeleteImportedKeyMaterialRequest
from huaweicloudsdkkms.v1.model.delete_imported_key_material_response import DeleteImportedKeyMaterialResponse
from huaweicloudsdkkms.v1.model.delete_key_request import DeleteKeyRequest
from huaweicloudsdkkms.v1.model.delete_key_response import DeleteKeyResponse
from huaweicloudsdkkms.v1.model.delete_secret_for_schedule_request import DeleteSecretForScheduleRequest
from huaweicloudsdkkms.v1.model.delete_secret_for_schedule_request_body import DeleteSecretForScheduleRequestBody
from huaweicloudsdkkms.v1.model.delete_secret_for_schedule_response import DeleteSecretForScheduleResponse
from huaweicloudsdkkms.v1.model.delete_secret_request import DeleteSecretRequest
from huaweicloudsdkkms.v1.model.delete_secret_response import DeleteSecretResponse
from huaweicloudsdkkms.v1.model.delete_secret_stage_request import DeleteSecretStageRequest
from huaweicloudsdkkms.v1.model.delete_secret_stage_response import DeleteSecretStageResponse
from huaweicloudsdkkms.v1.model.delete_tag_request import DeleteTagRequest
from huaweicloudsdkkms.v1.model.delete_tag_response import DeleteTagResponse
from huaweicloudsdkkms.v1.model.disable_key_request import DisableKeyRequest
from huaweicloudsdkkms.v1.model.disable_key_response import DisableKeyResponse
from huaweicloudsdkkms.v1.model.disable_key_rotation_request import DisableKeyRotationRequest
from huaweicloudsdkkms.v1.model.disable_key_rotation_response import DisableKeyRotationResponse
from huaweicloudsdkkms.v1.model.enable_key_request import EnableKeyRequest
from huaweicloudsdkkms.v1.model.enable_key_response import EnableKeyResponse
from huaweicloudsdkkms.v1.model.enable_key_rotation_request import EnableKeyRotationRequest
from huaweicloudsdkkms.v1.model.enable_key_rotation_response import EnableKeyRotationResponse
from huaweicloudsdkkms.v1.model.encrypt_data_request import EncryptDataRequest
from huaweicloudsdkkms.v1.model.encrypt_data_request_body import EncryptDataRequestBody
from huaweicloudsdkkms.v1.model.encrypt_data_response import EncryptDataResponse
from huaweicloudsdkkms.v1.model.encrypt_datakey_request import EncryptDatakeyRequest
from huaweicloudsdkkms.v1.model.encrypt_datakey_request_body import EncryptDatakeyRequestBody
from huaweicloudsdkkms.v1.model.encrypt_datakey_response import EncryptDatakeyResponse
from huaweicloudsdkkms.v1.model.gen_random_request_body import GenRandomRequestBody
from huaweicloudsdkkms.v1.model.get_parameters_for_import_request_body import GetParametersForImportRequestBody
from huaweicloudsdkkms.v1.model.grants import Grants
from huaweicloudsdkkms.v1.model.import_key_material_request import ImportKeyMaterialRequest
from huaweicloudsdkkms.v1.model.import_key_material_request_body import ImportKeyMaterialRequestBody
from huaweicloudsdkkms.v1.model.import_key_material_response import ImportKeyMaterialResponse
from huaweicloudsdkkms.v1.model.ke_k_info import KeKInfo
from huaweicloudsdkkms.v1.model.key_alias_info import KeyAliasInfo
from huaweicloudsdkkms.v1.model.key_description_info import KeyDescriptionInfo
from huaweicloudsdkkms.v1.model.key_details import KeyDetails
from huaweicloudsdkkms.v1.model.key_status_info import KeyStatusInfo
from huaweicloudsdkkms.v1.model.list_grants_request import ListGrantsRequest
from huaweicloudsdkkms.v1.model.list_grants_request_body import ListGrantsRequestBody
from huaweicloudsdkkms.v1.model.list_grants_response import ListGrantsResponse
from huaweicloudsdkkms.v1.model.list_key_detail_request import ListKeyDetailRequest
from huaweicloudsdkkms.v1.model.list_key_detail_response import ListKeyDetailResponse
from huaweicloudsdkkms.v1.model.list_keys_request import ListKeysRequest
from huaweicloudsdkkms.v1.model.list_keys_request_body import ListKeysRequestBody
from huaweicloudsdkkms.v1.model.list_keys_response import ListKeysResponse
from huaweicloudsdkkms.v1.model.list_kms_by_tags_request import ListKmsByTagsRequest
from huaweicloudsdkkms.v1.model.list_kms_by_tags_request_body import ListKmsByTagsRequestBody
from huaweicloudsdkkms.v1.model.list_kms_by_tags_response import ListKmsByTagsResponse
from huaweicloudsdkkms.v1.model.list_kms_tags_request import ListKmsTagsRequest
from huaweicloudsdkkms.v1.model.list_kms_tags_response import ListKmsTagsResponse
from huaweicloudsdkkms.v1.model.list_retirable_grants_request import ListRetirableGrantsRequest
from huaweicloudsdkkms.v1.model.list_retirable_grants_request_body import ListRetirableGrantsRequestBody
from huaweicloudsdkkms.v1.model.list_retirable_grants_response import ListRetirableGrantsResponse
from huaweicloudsdkkms.v1.model.list_secret_stage_request import ListSecretStageRequest
from huaweicloudsdkkms.v1.model.list_secret_stage_response import ListSecretStageResponse
from huaweicloudsdkkms.v1.model.list_secret_versions_request import ListSecretVersionsRequest
from huaweicloudsdkkms.v1.model.list_secret_versions_response import ListSecretVersionsResponse
from huaweicloudsdkkms.v1.model.list_secrets_request import ListSecretsRequest
from huaweicloudsdkkms.v1.model.list_secrets_response import ListSecretsResponse
from huaweicloudsdkkms.v1.model.operate_key_request_body import OperateKeyRequestBody
from huaweicloudsdkkms.v1.model.page_info import PageInfo
from huaweicloudsdkkms.v1.model.quotas import Quotas
from huaweicloudsdkkms.v1.model.resources import Resources
from huaweicloudsdkkms.v1.model.restore_secret_request import RestoreSecretRequest
from huaweicloudsdkkms.v1.model.restore_secret_response import RestoreSecretResponse
from huaweicloudsdkkms.v1.model.revoke_grant_request_body import RevokeGrantRequestBody
from huaweicloudsdkkms.v1.model.schedule_key_deletion_request_body import ScheduleKeyDeletionRequestBody
from huaweicloudsdkkms.v1.model.secret import Secret
from huaweicloudsdkkms.v1.model.show_key_rotation_status_request import ShowKeyRotationStatusRequest
from huaweicloudsdkkms.v1.model.show_key_rotation_status_response import ShowKeyRotationStatusResponse
from huaweicloudsdkkms.v1.model.show_kms_tags_request import ShowKmsTagsRequest
from huaweicloudsdkkms.v1.model.show_kms_tags_response import ShowKmsTagsResponse
from huaweicloudsdkkms.v1.model.show_secret_request import ShowSecretRequest
from huaweicloudsdkkms.v1.model.show_secret_response import ShowSecretResponse
from huaweicloudsdkkms.v1.model.show_secret_version_request import ShowSecretVersionRequest
from huaweicloudsdkkms.v1.model.show_secret_version_response import ShowSecretVersionResponse
from huaweicloudsdkkms.v1.model.show_user_instances_request import ShowUserInstancesRequest
from huaweicloudsdkkms.v1.model.show_user_instances_response import ShowUserInstancesResponse
from huaweicloudsdkkms.v1.model.show_user_quotas_request import ShowUserQuotasRequest
from huaweicloudsdkkms.v1.model.show_user_quotas_response import ShowUserQuotasResponse
from huaweicloudsdkkms.v1.model.show_version_request import ShowVersionRequest
from huaweicloudsdkkms.v1.model.show_version_response import ShowVersionResponse
from huaweicloudsdkkms.v1.model.show_versions_request import ShowVersionsRequest
from huaweicloudsdkkms.v1.model.show_versions_response import ShowVersionsResponse
from huaweicloudsdkkms.v1.model.stage import Stage
from huaweicloudsdkkms.v1.model.tag import Tag
from huaweicloudsdkkms.v1.model.tag_item import TagItem
from huaweicloudsdkkms.v1.model.update_key_alias_request import UpdateKeyAliasRequest
from huaweicloudsdkkms.v1.model.update_key_alias_request_body import UpdateKeyAliasRequestBody
from huaweicloudsdkkms.v1.model.update_key_alias_response import UpdateKeyAliasResponse
from huaweicloudsdkkms.v1.model.update_key_description_request import UpdateKeyDescriptionRequest
from huaweicloudsdkkms.v1.model.update_key_description_request_body import UpdateKeyDescriptionRequestBody
from huaweicloudsdkkms.v1.model.update_key_description_response import UpdateKeyDescriptionResponse
from huaweicloudsdkkms.v1.model.update_key_rotation_interval_request import UpdateKeyRotationIntervalRequest
from huaweicloudsdkkms.v1.model.update_key_rotation_interval_request_body import UpdateKeyRotationIntervalRequestBody
from huaweicloudsdkkms.v1.model.update_key_rotation_interval_response import UpdateKeyRotationIntervalResponse
from huaweicloudsdkkms.v1.model.update_secret_request import UpdateSecretRequest
from huaweicloudsdkkms.v1.model.update_secret_request_body import UpdateSecretRequestBody
from huaweicloudsdkkms.v1.model.update_secret_response import UpdateSecretResponse
from huaweicloudsdkkms.v1.model.update_secret_stage_request import UpdateSecretStageRequest
from huaweicloudsdkkms.v1.model.update_secret_stage_request_body import UpdateSecretStageRequestBody
from huaweicloudsdkkms.v1.model.update_secret_stage_response import UpdateSecretStageResponse
from huaweicloudsdkkms.v1.model.version import Version
from huaweicloudsdkkms.v1.model.version_metadata import VersionMetadata
|
py
|
1a5b9a9691cc7cf744bf68ca67929ae82181a00f
|
from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404
from rest_framework import status, viewsets
from rest_framework.response import Response
from .serializers import ReadOnlyUserSerializer, WriteOnlyUserSerializer
User = get_user_model()
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
def get_serializer_class(self):
if self.action == 'list' or self.action == "retrieve":
return ReadOnlyUserSerializer
else:
return WriteOnlyUserSerializer
def destroy(self, request, pk=None):
self.user = get_object_or_404(User, id=pk)
self.user.is_active = 'False'
self.user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
|
py
|
1a5b9b97b0c8f4672e94504da2d5c296552002b1
|
from django.contrib import admin
from core.models import Item, Listing, UserProfile, PromoCode, Address
from image_cropping import ImageCroppingMixin
class ItemAdmin(ImageCroppingMixin, admin.ModelAdmin):
pass
admin.site.register(Item, ItemAdmin)
admin.site.register(Listing)
admin.site.register(UserProfile)
admin.site.register(PromoCode)
admin.site.register(Address)
|
py
|
1a5b9c72bf06483922887e3593c04f6636500650
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import namedtuple
import torch
from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d,
Module, PReLU, ReLU, Sequential, Sigmoid)
# yapf: disable
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa
"""
# yapf: enable
class Flatten(Module):
"""Flatten Module."""
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
"""l2 normalization.
Args:
input (torch.Tensor): The input tensor.
axis (int, optional): Specifies which axis of input to calculate the
norm across. Defaults to 1.
Returns:
Tensor: Tensor after L2 normalization per-instance.
"""
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
"""A named tuple describing a ResNet block."""
def get_block(in_channel, depth, num_units, stride=2):
"""Get a single block config.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
num_units (int): Number of unit modules.
stride (int, optional): Conv2d stride. Defaults to 2.
Returns:
list: A list of unit modules' config.
"""
return [Bottleneck(in_channel, depth, stride)
] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
"""Get block configs of backbone.
Args:
num_layers (int): Number of ConvBlock layers in backbone.
Raises:
ValueError: `num_layers` must be one of [50, 100, 152].
Returns:
list: A list of block configs.
"""
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError(
'Invalid number of layers: {}. Must be one of [50, 100, 152]'.
format(num_layers))
return blocks
class SEModule(Module):
"""Squeeze-and-Excitation Modules.
Args:
channels (int): Input channels.
reduction (int): Intermediate channels reduction ratio.
"""
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(
channels,
channels // reduction,
kernel_size=1,
padding=0,
bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(
channels // reduction,
channels,
kernel_size=1,
padding=0,
bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
"""Forward Function."""
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
"""Intermediate Resblock of bottleneck.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
def __init__(self, in_channel, depth, stride):
"""Intermediate Resblock of bottleneck.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth))
def forward(self, x):
"""Forward function."""
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
"""Intermediate Resblock of bottleneck with SEModule.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth), SEModule(depth, 16))
def forward(self, x):
"""Forward function."""
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
|
py
|
1a5b9deff9564833eb66dd7cc50478c48fce4ffc
|
#!/usr/bin/env python3
import json
import sys
import os
import subprocess
import time
description = """
process all data_set within a bundle
"""
def main():
try:
args = parse_args()
bundle = read_bundle_json(args["bundle_json"])
for data_set in bundle["DATA_SETS"]:
process_one_set(data_set)
except:
print_help()
raise
def print_help():
print(description)
print("Usage:")
print("./process_bundle.py <bundle_json>")
print()
def parse_args():
if len(sys.argv) != 2:
raise Exception("Wrong number of args, need 1")
args = {}
args["bundle_json"] = sys.argv[1]
return args
# Read the json file contains info for 1 bundle
def read_bundle_json(json_filename):
with open(json_filename, "r") as outfile:
bundle_json = json.load(outfile)
return bundle_json
# Run process_one_set.sh
# data_set argument is added into the environment
def process_one_set(data_set):
assert(isinstance(data_set, dict))
my_env = os.environ.copy()
my_env.update(data_set) # add data_set into the environment
try:
proc = subprocess.check_output(["/bin/bash", "process_one_set.sh"], env=my_env)
except subprocess.SubprocessError:
print("Error when running for data_set: ", data_set)
sys.exit(proc.returncode)
main()
|
py
|
1a5b9e64465ceda6aa10eb55cb00f91d204d94b0
|
# -*- coding: utf-8 -*-
import numpy as np
"""
This script is for outputting PC1/PC2/PC3 data from preprocd_dataset.npz
of MD samples
"""
def makePC123(dtsetfile, outfile, grpname):
dtset= np.load(dtsetfile, allow_pickle=True)
#allow_pickle op is for adapting spec change of numpy 1.16.3 and later
dts= dtset['dataset']
dataset0=[]
for dt in dts:
dt0=dt['inputs/0']
dataset0.append(dt0)
dim0=len(dataset0)
dim1=len(dataset0[0])
dim2=len(dataset0[0][0])
with open(outfile, 'w') as f1:
for dt64 in dataset0:
for dt in dt64:
wdt=str(dt[0])+" "+str(dt[1])+" "+str(dt[2])+"\n"
f1.write(wdt)
print(f'Saved PC1/PC2/PC3 data of {grpname}: Shape= {dim0} x {dim1} x {dim2}')
if __name__ == '__main__':
mdfolder="/home/okugawa/HDNNP/Si-190808-md"
outfolder=mdfolder+"/result/PC123/"
grps=['1000K','1200K']
for grp in grps:
for j in range(1,11):
grpname=grp+"-"+str(j)
dtsetdir=mdfolder+"/"+grp+"/"+str(j)
dtsetfile=dtsetdir+"/data/CrystalSi64/preprocd_dataset.npz"
outfile=outfolder+grpname+"-PC123.txt"
makePC123(dtsetfile, outfile, grpname)
|
py
|
1a5b9e6631d17f2e5e9b23505fa9872e2a797b64
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.constants.globals import UNKNOWN
class EventTypes:
ADDED = "ADDED"
MODIFIED = "MODIFIED"
DELETED = "DELETED"
ERROR = "ERROR"
class PodConditions:
READY = "Ready"
INITIALIZED = "Initialized"
SCHEDULED = "PodScheduled"
UNSCHEDULABLE = "Unschedulable"
VALUES = [READY, INITIALIZED, SCHEDULED]
class PodLifeCycle:
CONTAINER_CREATING = "ContainerCreating"
PENDING = "Pending"
RUNNING = "Running"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
UNKNOWN = UNKNOWN
DONE_STATUS = [FAILED, SUCCEEDED]
|
py
|
1a5b9f37a398c4086b04df33ce0ae567a56c1954
|
from pytorch_lightning.callbacks import ModelCheckpoint
def compute_receptive_field(kernel_pattern, dilation_pattern):
""" Compute the receptive field in samples."""
rf = 1
for kernel_size, dilation in zip(kernel_pattern, dilation_pattern):
rf += (kernel_size-1) * dilation
return rf
def to_np(x):
return x.detach().cpu().squeeze().numpy()
class CheckpointSaverCallback(ModelCheckpoint):
def on_keyboard_interrupt(self, trainer, pl_module):
print('CheckpointSaverCallback - Keyboard Interrupt. Best model path, best model score', self.best_model_path, self.best_model_score)
pl_module.logger.experiment.log_model(f'best_model', self.best_model_path)
pl_module.logger.experiment.log_parameter("best_model_path", self.best_model_path)
pl_module.logger.experiment.end()
def on_train_start(self, trainer, pl_module):
super(CheckpointSaverCallback, self).on_train_start(trainer, pl_module)
trainable_parameters = sum(p.numel() for p in pl_module.parameters() if p.requires_grad)
pl_module.logger.experiment.log_parameter("trainable_params", trainable_parameters)
# save before training
local_model_path = pl_module.logger.save_dir+f"/checkpoints/epoch0.ckpt"
trainer.save_checkpoint(local_model_path)
pl_module.logger.experiment.log_model(f'epoch0', local_model_path)
def on_train_end(self, trainer, pl_module):
print('CheckpointSaverCallback - Train End. Best model path, best model score', self.best_model_path, self.best_model_score)
super(CheckpointSaverCallback, self).on_train_end(trainer, pl_module)
pl_module.logger.experiment.log_model(f'best_model', self.best_model_path)
pl_module.logger.experiment.log_parameter("best_model_path", self.best_model_path)
pl_module.logger.experiment.end()
def on_validation_end(self, trainer, pl_module):
super(CheckpointSaverCallback, self).on_validation_end(trainer, pl_module)
epoch = pl_module.current_epoch
if epoch in [1,2,3,5,10,25,50,75,100,150,200,500,750,1000,1500,2000]:
print(f'Epoch {epoch}: Saving checkpoint, logging histogram.')
local_model_path = pl_module.logger.save_dir+f"/checkpoints/epoch{epoch}.ckpt"
trainer.save_checkpoint(local_model_path)
pl_module.logger.experiment.log_model(f'epoch{epoch}', local_model_path)
|
py
|
1a5b9ffacb6ba35cfd893944fd6eed7ba2b5c55b
|
#!/usr/bin/env python
'''
Python WebSocket library with support for "wss://" encryption.
Copyright 2011 Joel Martin
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
Supports following protocol versions:
- http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
- http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
- http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10
You can make a cert/key with openssl using:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import os, sys, time, errno, signal, socket, traceback, select
import array, struct
from base64 import b64encode, b64decode
# Imports that vary by python version
# python 3.0 differences
if sys.hexversion > 0x3000000:
b2s = lambda buf: buf.decode('latin_1')
s2b = lambda s: s.encode('latin_1')
s2a = lambda s: s
else:
b2s = lambda buf: buf # No-op
s2b = lambda s: s # No-op
s2a = lambda s: [ord(c) for c in s]
try: from io import StringIO
except: from cStringIO import StringIO
try: from http.server import SimpleHTTPRequestHandler
except: from SimpleHTTPServer import SimpleHTTPRequestHandler
# python 2.6 differences
try: from hashlib import md5, sha1
except: from md5 import md5; from sha import sha as sha1
# python 2.5 differences
try:
from struct import pack, unpack_from
except:
from struct import pack
def unpack_from(fmt, buf, offset=0):
slice = buffer(buf, offset, struct.calcsize(fmt))
return struct.unpack(fmt, slice)
# Degraded functionality if these imports are missing
for mod, sup in [('numpy', 'HyBi protocol'), ('ssl', 'TLS/SSL/wss'),
('multiprocessing', 'Multi-Processing'),
('resource', 'daemonizing')]:
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
print("WARNING: no '%s' module, %s is slower or disabled" % (
mod, sup))
if multiprocessing and sys.platform == 'win32':
# make sockets pickle-able/inheritable
import multiprocessing.reduction
class WebSocketServer(object):
"""
WebSockets server class.
Must be sub-classed with new_client method definition.
"""
buffer_size = 65536
server_handshake_hixie = """HTTP/1.1 101 Web Socket Protocol Handshake\r
Upgrade: WebSocket\r
Connection: Upgrade\r
%sWebSocket-Origin: %s\r
%sWebSocket-Location: %s://%s%s\r
"""
server_handshake_hybi = """HTTP/1.1 101 Switching Protocols\r
Upgrade: websocket\r
Connection: Upgrade\r
Sec-WebSocket-Accept: %s\r
"""
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
policy_response = """<cross-domain-policy><allow-access-from domain="*" to-ports="*" /></cross-domain-policy>\n"""
# An exception before the WebSocket connection was established
class EClose(Exception):
pass
# An exception while the WebSocket client was connected
class CClose(Exception):
pass
def __init__(self, listen_host='', listen_port=None, source_is_ipv6=False,
verbose=False, cert='', key='', ssl_only=None,
daemon=False, record='', web='',
run_once=False, timeout=0, idle_timeout=0):
# settings
self.verbose = verbose
self.listen_host = listen_host
self.listen_port = listen_port
self.prefer_ipv6 = source_is_ipv6
self.ssl_only = ssl_only
self.daemon = daemon
self.run_once = run_once
self.timeout = timeout
self.idle_timeout = idle_timeout
self.launch_time = time.time()
self.ws_connection = False
self.handler_id = 1
# Make paths settings absolute
self.cert = os.path.abspath(cert)
self.key = self.web = self.record = ''
if key:
self.key = os.path.abspath(key)
if web:
self.web = os.path.abspath(web)
if record:
self.record = os.path.abspath(record)
if self.web:
os.chdir(self.web)
# Sanity checks
if not ssl and self.ssl_only:
raise Exception("No 'ssl' module and SSL-only specified")
if self.daemon and not resource:
raise Exception("Module 'resource' required to daemonize")
# Show configuration
print("WebSocket server settings:")
print(" - Listen on %s:%s" % (
self.listen_host, self.listen_port))
print(" - Flash security policy server")
if self.web:
print(" - Web server. Web root: %s" % self.web)
if ssl:
if os.path.exists(self.cert):
print(" - SSL/TLS support")
if self.ssl_only:
print(" - Deny non-SSL/TLS connections")
else:
print(" - No SSL/TLS support (no cert file)")
else:
print(" - No SSL/TLS support (no 'ssl' module)")
if self.daemon:
print(" - Backgrounding (daemon)")
if self.record:
print(" - Recording to '%s.*'" % self.record)
#
# WebSocketServer static methods
#
@staticmethod
def socket(host, port=None, connect=False, prefer_ipv6=False, unix_socket=None, use_ssl=False):
""" Resolve a host (and optional port) to an IPv4 or IPv6
address. Create a socket. Bind to it if listen is set,
otherwise connect to it. Return the socket.
"""
flags = 0
if host == '':
host = None
if connect and not (port or unix_socket):
raise Exception("Connect mode requires a port")
if use_ssl and not ssl:
raise Exception("SSL socket requested but Python SSL module not loaded.");
if not connect and use_ssl:
raise Exception("SSL only supported in connect mode (for now)")
if not connect:
flags = flags | socket.AI_PASSIVE
if not unix_socket:
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM,
socket.IPPROTO_TCP, flags)
if not addrs:
raise Exception("Could not resolve host '%s'" % host)
addrs.sort(key=lambda x: x[0])
if prefer_ipv6:
addrs.reverse()
sock = socket.socket(addrs[0][0], addrs[0][1])
if connect:
sock.connect(addrs[0][4])
if use_ssl:
sock = ssl.wrap_socket(sock)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addrs[0][4])
sock.listen(100)
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(unix_socket)
return sock
@staticmethod
def daemonize(keepfd=None, chdir='/'):
os.umask(0)
if chdir:
os.chdir(chdir)
else:
os.chdir('/')
os.setgid(os.getgid()) # relinquish elevations
os.setuid(os.getuid()) # relinquish elevations
# Double fork to daemonize
if os.fork() > 0: os._exit(0) # Parent exits
os.setsid() # Obtain new process group
if os.fork() > 0: os._exit(0) # Parent exits
# Signal handling
def terminate(a,b): os._exit(0)
signal.signal(signal.SIGTERM, terminate)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Close open files
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY: maxfd = 256
for fd in reversed(range(maxfd)):
try:
if fd != keepfd:
os.close(fd)
except OSError:
_, exc, _ = sys.exc_info()
if exc.errno != errno.EBADF: raise
# Redirect I/O to /dev/null
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdin.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdout.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stderr.fileno())
@staticmethod
def unmask(buf, hlen, plen):
pstart = hlen + 4
pend = pstart + plen
if numpy:
b = c = s2b('')
if plen >= 4:
mask = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=hlen, count=1)
data = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=pstart, count=int(plen / 4))
#b = numpy.bitwise_xor(data, mask).data
b = numpy.bitwise_xor(data, mask).tostring()
if plen % 4:
#print("Partial unmask")
mask = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=hlen, count=(plen % 4))
data = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=pend - (plen % 4),
count=(plen % 4))
c = numpy.bitwise_xor(data, mask).tostring()
return b + c
else:
# Slower fallback
mask = buf[hlen:hlen+4]
data = array.array('B')
mask = s2a(mask)
data.fromstring(buf[pstart:pend])
for i in range(len(data)):
data[i] ^= mask[i % 4]
return data.tostring()
@staticmethod
def encode_hybi(buf, opcode, base64=False):
""" Encode a HyBi style WebSocket frame.
Optional opcode:
0x0 - continuation
0x1 - text frame (base64 encode buf)
0x2 - binary frame (use raw buf)
0x8 - connection close
0x9 - ping
0xA - pong
"""
if base64:
buf = b64encode(buf)
b1 = 0x80 | (opcode & 0x0f) # FIN + opcode
payload_len = len(buf)
if payload_len <= 125:
header = pack('>BB', b1, payload_len)
elif payload_len > 125 and payload_len < 65536:
header = pack('>BBH', b1, 126, payload_len)
elif payload_len >= 65536:
header = pack('>BBQ', b1, 127, payload_len)
#print("Encoded: %s" % repr(header + buf))
return header + buf, len(header), 0
@staticmethod
def decode_hybi(buf, base64=False):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : 0_or_1,
'opcode' : number,
'masked' : boolean,
'hlen' : header_bytes_number,
'length' : payload_bytes_number,
'payload' : decoded_buffer,
'left' : bytes_left_number,
'close_code' : number,
'close_reason' : string}
"""
f = {'fin' : 0,
'opcode' : 0,
'masked' : False,
'hlen' : 2,
'length' : 0,
'payload' : None,
'left' : 0,
'close_code' : 1000,
'close_reason' : ''}
blen = len(buf)
f['left'] = blen
if blen < f['hlen']:
return f # Incomplete frame header
b1, b2 = unpack_from(">BB", buf)
f['opcode'] = b1 & 0x0f
f['fin'] = (b1 & 0x80) >> 7
f['masked'] = (b2 & 0x80) >> 7
f['length'] = b2 & 0x7f
if f['length'] == 126:
f['hlen'] = 4
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxH', buf)
elif f['length'] == 127:
f['hlen'] = 10
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxQ', buf)
full_len = f['hlen'] + f['masked'] * 4 + f['length']
if blen < full_len: # Incomplete frame
return f # Incomplete frame header
# Number of bytes that are part of the next frame(s)
f['left'] = blen - full_len
# Process 1 frame
if f['masked']:
# unmask payload
f['payload'] = WebSocketServer.unmask(buf, f['hlen'],
f['length'])
else:
print("Unmasked frame: %s" % repr(buf))
f['payload'] = buf[(f['hlen'] + f['masked'] * 4):full_len]
if base64 and f['opcode'] in [1, 2]:
try:
f['payload'] = b64decode(f['payload'])
except:
print("Exception while b64decoding buffer: %s" %
repr(buf))
raise
if f['opcode'] == 0x08:
if f['length'] >= 2:
f['close_code'] = unpack_from(">H", f['payload'])[0]
if f['length'] > 3:
f['close_reason'] = f['payload'][2:]
return f
@staticmethod
def encode_hixie(buf):
return s2b("\x00" + b2s(b64encode(buf)) + "\xff"), 1, 1
@staticmethod
def decode_hixie(buf):
end = buf.find(s2b('\xff'))
return {'payload': b64decode(buf[1:end]),
'hlen': 1,
'masked': False,
'length': end - 1,
'left': len(buf) - (end + 1)}
@staticmethod
def gen_md5(keys):
""" Generate hash value for WebSockets hixie-76. """
key1 = keys['Sec-WebSocket-Key1']
key2 = keys['Sec-WebSocket-Key2']
key3 = keys['key3']
spaces1 = key1.count(" ")
spaces2 = key2.count(" ")
num1 = int("".join([c for c in key1 if c.isdigit()])) / spaces1
num2 = int("".join([c for c in key2 if c.isdigit()])) / spaces2
return b2s(md5(pack('>II8s',
int(num1), int(num2), key3)).digest())
#
# WebSocketServer logging/output functions
#
def traffic(self, token="."):
""" Show traffic flow in verbose mode. """
if self.verbose and not self.daemon:
sys.stdout.write(token)
sys.stdout.flush()
def msg(self, msg):
""" Output message with handler_id prefix. """
if not self.daemon:
print("% 3d: %s" % (self.handler_id, msg))
def vmsg(self, msg):
""" Same as msg() but only if verbose. """
if self.verbose:
self.msg(msg)
#
# Main WebSocketServer methods
#
def send_frames(self, bufs=None):
""" Encode and send WebSocket frames. Any frames already
queued will be sent first. If buf is not set then only queued
frames will be sent. Returns the number of pending frames that
could not be fully sent. If returned pending frames is greater
than 0, then the caller should call again when the socket is
ready. """
tdelta = int(time.time()*1000) - self.start_time
if bufs:
for buf in bufs:
if self.version.startswith("hybi"):
if self.base64:
encbuf, lenhead, lentail = self.encode_hybi(
buf, opcode=1, base64=True)
else:
encbuf, lenhead, lentail = self.encode_hybi(
buf, opcode=2, base64=False)
else:
encbuf, lenhead, lentail = self.encode_hixie(buf)
if self.rec:
self.rec.write("%s,\n" %
repr("{%s{" % tdelta
+ encbuf[lenhead:len(encbuf)-lentail]))
self.send_parts.append(encbuf)
while self.send_parts:
# Send pending frames
buf = self.send_parts.pop(0)
sent = self.client.send(buf)
if sent == len(buf):
self.traffic("<")
else:
self.traffic("<.")
self.send_parts.insert(0, buf[sent:])
break
return len(self.send_parts)
def recv_frames(self):
""" Receive and decode WebSocket frames.
Returns:
(bufs_list, closed_string)
"""
closed = False
bufs = []
tdelta = int(time.time()*1000) - self.start_time
buf = self.client.recv(self.buffer_size)
if len(buf) == 0:
closed = {'code': 1000, 'reason': "Client closed abruptly"}
return bufs, closed
if self.recv_part:
# Add partially received frames to current read buffer
buf = self.recv_part + buf
self.recv_part = None
while buf:
if self.version.startswith("hybi"):
frame = self.decode_hybi(buf, base64=self.base64)
#print("Received buf: %s, frame: %s" % (repr(buf), frame))
if frame['payload'] == None:
# Incomplete/partial frame
self.traffic("}.")
if frame['left'] > 0:
self.recv_part = buf[-frame['left']:]
break
else:
if frame['opcode'] == 0x8: # connection close
closed = {'code': frame['close_code'],
'reason': frame['close_reason']}
break
else:
if buf[0:2] == s2b('\xff\x00'):
closed = {'code': 1000,
'reason': "Client sent orderly close frame"}
break
elif buf[0:2] == s2b('\x00\xff'):
buf = buf[2:]
continue # No-op
elif buf.count(s2b('\xff')) == 0:
# Partial frame
self.traffic("}.")
self.recv_part = buf
break
frame = self.decode_hixie(buf)
self.traffic("}")
if self.rec:
start = frame['hlen']
end = frame['hlen'] + frame['length']
if frame['masked']:
recbuf = WebSocketServer.unmask(buf, frame['hlen'],
frame['length'])
else:
recbuf = buf[frame['hlen']:frame['hlen'] +
frame['length']]
self.rec.write("%s,\n" %
repr("}%s}" % tdelta + recbuf))
bufs.append(frame['payload'])
if frame['left']:
buf = buf[-frame['left']:]
else:
buf = ''
return bufs, closed
def send_close(self, code=1000, reason=''):
""" Send a WebSocket orderly close frame. """
if self.version.startswith("hybi"):
msg = pack(">H%ds" % len(reason), code, reason)
buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False)
self.client.send(buf)
elif self.version == "hixie-76":
buf = s2b('\xff\x00')
self.client.send(buf)
# No orderly close for 75
def do_websocket_handshake(self, headers, path):
h = self.headers = headers
self.path = path
prot = 'WebSocket-Protocol'
protocols = h.get('Sec-'+prot, h.get(prot, '')).split(',')
ver = h.get('Sec-WebSocket-Version')
if ver:
# HyBi/IETF version of the protocol
# HyBi-07 report version 7
# HyBi-08 - HyBi-12 report version 8
# HyBi-13 reports version 13
if ver in ['7', '8', '13']:
self.version = "hybi-%02d" % int(ver)
else:
raise self.EClose('Unsupported protocol version %s' % ver)
key = h['Sec-WebSocket-Key']
# Choose binary if client supports it
if 'binary' in protocols:
self.base64 = False
elif 'base64' in protocols:
self.base64 = True
else:
raise self.EClose("Client must support 'binary' or 'base64' protocol")
# Generate the hash value for the accept header
accept = b64encode(sha1(s2b(key + self.GUID)).digest())
response = self.server_handshake_hybi % b2s(accept)
if self.base64:
response += "Sec-WebSocket-Protocol: base64\r\n"
else:
response += "Sec-WebSocket-Protocol: binary\r\n"
response += "\r\n"
else:
# Hixie version of the protocol (75 or 76)
if h.get('key3'):
trailer = self.gen_md5(h)
pre = "Sec-"
self.version = "hixie-76"
else:
trailer = ""
pre = ""
self.version = "hixie-75"
# We only support base64 in Hixie era
self.base64 = True
response = self.server_handshake_hixie % (pre,
h['Origin'], pre, self.scheme, h['Host'], path)
if 'base64' in protocols:
response += "%sWebSocket-Protocol: base64\r\n" % pre
else:
self.msg("Warning: client does not report 'base64' protocol support")
response += "\r\n" + trailer
return response
def do_handshake(self, sock, address):
"""
do_handshake does the following:
- Peek at the first few bytes from the socket.
- If the connection is Flash policy request then answer it,
close the socket and return.
- If the connection is an HTTPS/SSL/TLS connection then SSL
wrap the socket.
- Read from the (possibly wrapped) socket.
- If we have received a HTTP GET request and the webserver
functionality is enabled, answer it, close the socket and
return.
- Assume we have a WebSockets connection, parse the client
handshake data.
- Send a WebSockets handshake server response.
- Return the socket for this WebSocket client.
"""
stype = ""
ready = select.select([sock], [], [], 3)[0]
if not ready:
raise self.EClose("ignoring socket not ready")
# Peek, but do not read the data so that we have a opportunity
# to SSL wrap the socket first
handshake = sock.recv(1024, socket.MSG_PEEK)
#self.msg("Handshake [%s]" % handshake)
if handshake == "":
raise self.EClose("ignoring empty handshake")
elif handshake.startswith(s2b("<policy-file-request/>")):
# Answer Flash policy request
handshake = sock.recv(1024)
sock.send(s2b(self.policy_response))
raise self.EClose("Sending flash policy response")
elif handshake[0] in ("\x16", "\x80", 22, 128):
# SSL wrap the connection
if not ssl:
raise self.EClose("SSL connection but no 'ssl' module")
if not os.path.exists(self.cert):
raise self.EClose("SSL connection but '%s' not found"
% self.cert)
retsock = None
try:
retsock = ssl.wrap_socket(
sock,
server_side=True,
certfile=self.cert,
keyfile=self.key)
except ssl.SSLError:
_, x, _ = sys.exc_info()
if x.args[0] == ssl.SSL_ERROR_EOF:
if len(x.args) > 1:
raise self.EClose(x.args[1])
else:
raise self.EClose("Got SSL_ERROR_EOF")
else:
raise
self.scheme = "wss"
stype = "SSL/TLS (wss://)"
elif self.ssl_only:
raise self.EClose("non-SSL connection received but disallowed")
else:
retsock = sock
self.scheme = "ws"
stype = "Plain non-SSL (ws://)"
wsh = WSRequestHandler(retsock, address, not self.web)
if wsh.last_code == 101:
# Continue on to handle WebSocket upgrade
pass
elif wsh.last_code == 405:
raise self.EClose("Normal web request received but disallowed")
elif wsh.last_code < 200 or wsh.last_code >= 300:
raise self.EClose(wsh.last_message)
elif self.verbose:
raise self.EClose(wsh.last_message)
else:
raise self.EClose("")
response = self.do_websocket_handshake(wsh.headers, wsh.path)
self.msg("%s: %s WebSocket connection" % (address[0], stype))
self.msg("%s: Version %s, base64: '%s'" % (address[0],
self.version, self.base64))
if self.path != '/':
self.msg("%s: Path: '%s'" % (address[0], self.path))
# Send server WebSockets handshake response
#self.msg("sending response [%s]" % response)
retsock.send(s2b(response))
# Return the WebSockets socket which may be SSL wrapped
return retsock
#
# Events that can/should be overridden in sub-classes
#
def started(self):
""" Called after WebSockets startup """
self.vmsg("WebSockets server started")
def poll(self):
""" Run periodically while waiting for connections. """
#self.vmsg("Running poll()")
pass
def fallback_SIGCHLD(self, sig, stack):
# Reap zombies when using os.fork() (python 2.4)
self.vmsg("Got SIGCHLD, reaping zombies")
try:
result = os.waitpid(-1, os.WNOHANG)
while result[0]:
self.vmsg("Reaped child process %s" % result[0])
result = os.waitpid(-1, os.WNOHANG)
except (OSError):
pass
def do_SIGINT(self, sig, stack):
self.msg("Got SIGINT, exiting")
sys.exit(0)
def top_new_client(self, startsock, address):
""" Do something with a WebSockets client connection. """
# Initialize per client settings
self.send_parts = []
self.recv_part = None
self.base64 = False
self.rec = None
self.start_time = int(time.time()*1000)
# handler process
try:
try:
self.client = self.do_handshake(startsock, address)
if self.record:
# Record raw frame data as JavaScript array
fname = "%s.%s" % (self.record,
self.handler_id)
self.msg("opening record file: %s" % fname)
self.rec = open(fname, 'w+')
encoding = "binary"
if self.base64: encoding = "base64"
self.rec.write("var VNC_frame_encoding = '%s';\n"
% encoding)
self.rec.write("var VNC_frame_data = [\n")
self.ws_connection = True
self.new_client()
except self.CClose:
# Close the client
_, exc, _ = sys.exc_info()
if self.client:
self.send_close(exc.args[0], exc.args[1])
except self.EClose:
_, exc, _ = sys.exc_info()
# Connection was not a WebSockets connection
if exc.args[0]:
self.msg("%s: %s" % (address[0], exc.args[0]))
except Exception:
_, exc, _ = sys.exc_info()
self.msg("handler exception: %s" % str(exc))
if self.verbose:
self.msg(traceback.format_exc())
finally:
if self.rec:
self.rec.write("'EOF'];\n")
self.rec.close()
if self.client and self.client != startsock:
# Close the SSL wrapped socket
# Original socket closed by caller
self.client.close()
def new_client(self):
""" Do something with a WebSockets client connection. """
raise("WebSocketServer.new_client() must be overloaded")
def start_server(self):
"""
Daemonize if requested. Listen for for connections. Run
do_handshake() method for each connection. If the connection
is a WebSockets client then call new_client() method (which must
be overridden) for each new client connection.
"""
lsock = self.socket(self.listen_host, self.listen_port, False, self.prefer_ipv6)
if self.daemon:
self.daemonize(keepfd=lsock.fileno(), chdir=self.web)
self.started() # Some things need to happen after daemonizing
# Allow override of SIGINT
signal.signal(signal.SIGINT, self.do_SIGINT)
if not multiprocessing:
# os.fork() (python 2.4) child reaper
signal.signal(signal.SIGCHLD, self.fallback_SIGCHLD)
last_active_time = self.launch_time
while True:
try:
try:
self.client = None
startsock = None
pid = err = 0
child_count = 0
if multiprocessing and self.idle_timeout:
child_count = len(multiprocessing.active_children())
time_elapsed = time.time() - self.launch_time
if self.timeout and time_elapsed > self.timeout:
self.msg('listener exit due to --timeout %s'
% self.timeout)
break
if self.idle_timeout:
idle_time = 0
if child_count == 0:
idle_time = time.time() - last_active_time
else:
idle_time = 0
last_active_time = time.time()
if idle_time > self.idle_timeout and child_count == 0:
self.msg('listener exit due to --idle-timeout %s'
% self.idle_timeout)
break
try:
self.poll()
ready = select.select([lsock], [], [], 1)[0]
if lsock in ready:
startsock, address = lsock.accept()
else:
continue
except Exception:
_, exc, _ = sys.exc_info()
if hasattr(exc, 'errno'):
err = exc.errno
elif hasattr(exc, 'args'):
err = exc.args[0]
else:
err = exc[0]
if err == errno.EINTR:
self.vmsg("Ignoring interrupted syscall")
continue
else:
raise
if self.run_once:
# Run in same process if run_once
self.top_new_client(startsock, address)
if self.ws_connection :
self.msg('%s: exiting due to --run-once'
% address[0])
break
elif multiprocessing:
self.vmsg('%s: new handler Process' % address[0])
p = multiprocessing.Process(
target=self.top_new_client,
args=(startsock, address))
p.start()
# child will not return
else:
# python 2.4
self.vmsg('%s: forking handler' % address[0])
pid = os.fork()
if pid == 0:
# child handler process
self.top_new_client(startsock, address)
break # child process exits
# parent process
self.handler_id += 1
except KeyboardInterrupt:
_, exc, _ = sys.exc_info()
print("In KeyboardInterrupt")
pass
except SystemExit:
_, exc, _ = sys.exc_info()
print("In SystemExit")
break
except Exception:
_, exc, _ = sys.exc_info()
self.msg("handler exception: %s" % str(exc))
if self.verbose:
self.msg(traceback.format_exc())
finally:
if startsock:
startsock.close()
# HTTP handler with WebSocket upgrade support
class WSRequestHandler(SimpleHTTPRequestHandler):
def __init__(self, req, addr, only_upgrade=False):
self.only_upgrade = only_upgrade # only allow upgrades
SimpleHTTPRequestHandler.__init__(self, req, addr, object())
def do_GET(self):
if (self.headers.get('upgrade') and
self.headers.get('upgrade').lower() == 'websocket'):
if (self.headers.get('sec-websocket-key1') or
self.headers.get('websocket-key1')):
# For Hixie-76 read out the key hash
self.headers.__setitem__('key3', self.rfile.read(8))
# Just indicate that an WebSocket upgrade is needed
self.last_code = 101
self.last_message = "101 Switching Protocols"
elif self.only_upgrade:
# Normal web request responses are disabled
self.last_code = 405
self.last_message = "405 Method Not Allowed"
else:
SimpleHTTPRequestHandler.do_GET(self)
def send_response(self, code, message=None):
# Save the status code
self.last_code = code
SimpleHTTPRequestHandler.send_response(self, code, message)
def log_message(self, f, *args):
# Save instead of printing
self.last_message = f % args
|
py
|
1a5ba09427cd8ad7aeebc13d7db6cadac0d0d964
|
__author__ = 'Omry_Nachman'
from time import sleep
class Tap(object):
def __init__(self, pi_face, output_pin, open_value=True):
self.pi_face = pi_face
self.output_pin = output_pin
self.open_value = open_value
self.state = None
self.close()
def switch(self, open_tap=True):
self.state = open_tap
if open_tap:
self.pi_face.output_pins[self.output_pin].value = self.open_value
else:
self.pi_face.output_pins[self.output_pin].value = not self.open_value
def open(self):
self.switch(True)
def close(self):
self.switch(False)
def toggle(self):
self.switch(not self.state)
def flick(self, duration, return_lambda=False, off_on_off=True):
def execute():
self.switch(not off_on_off)
sleep(duration)
self.switch(off_on_off)
if return_lambda:
return execute
else:
execute()
class DCTap(Tap):
def __init__(self, pi_face, charge_discharge_pin=6, discharge_value=False, open_close_pin=7, open_value=False):
self.pi_face = pi_face
self.charge_discharge_pin = charge_discharge_pin
self.discharge_value = discharge_value
self.open_close_pin = open_close_pin
self.open_value = open_value
self.state = None
self.close()
def switch(self, open_tap=True):
self.state = open_tap
if open_tap:
direction = self.open_value
else:
direction = not self.open_value
self.pi_face.output_pins[self.charge_discharge_pin].value = not self.discharge_value
self.pi_face.output_pins[self.charge_discharge_pin].value = direction
sleep(0.1)
self.pi_face.output_pins[self.charge_discharge_pin].value = self.discharge_value
sleep(0.1)
self.pi_face.output_pins[self.charge_discharge_pin].value = not self.discharge_value
|
py
|
1a5ba128bab8cd848f5e7cd77432a18eb54e3489
|
import urllib, urllib2, sys, httplib
url = "/DaaS-0.1"
HOST_IP="128.130.172.216"
input_description="""Expected command line arguments: KeyspaceName TableName PrimaryKeyCondition
Example: TestKeyspace TestTable "key IN (9640,4830)"
TestKeyspace TestTable key = 9640 \n
Condition specified on primary key supporting = and IN . Example: key = 3, or key IN (1,100)), where key is PRIMARY KEY
""";
if __name__=='__main__':
connection = httplib.HTTPConnection(HOST_IP+':8080')
if (len(sys.argv) > 3):
headers={
'Content-Type':'application/xml; charset=utf-8',
'Accept':'application/xml, multipart/related'
}
keySpaceName=sys.argv[1]
tableName=sys.argv[2]
body_content = '<Query><Table name="'+tableName+'"><Keyspace name="'+keySpaceName+'"/></Table>'
condition = "";
for index in range(3,len(sys.argv),1):
condition = condition+sys.argv[index];
if (len(condition) > 0):
body_content = body_content + '<Condition>'+condition+'</Condition>';
body_content = body_content + '</Query>';
#print body_content
connection.request('DELETE', url+'/api/m2m/table/row', body=body_content,headers=headers,)
result = connection.getresponse()
resultMessage = result.read()
#print resultMessage;
if ("Exception" in resultMessage) or ("Bad Request" in resultMessage):
print resultMessage;
sys.exit(-1)
else:
print input_description;
|
py
|
1a5ba146e60c198fc5f5dd6a8d6f2eb4cab2e862
|
import turtle as t
t.screensize(400,400,"black")
t.pensize(1)
t.speed(30)
for index in range(400):
if index % 4 in [1]:
t.pencolor("red")
elif index % 4 in [2]:
t.pencolor("orange")
elif index % 4 in [3]:
t.pencolor("purple")
else:
t.pencolor("blue")
t.fd(5+index*2)
t.left(91)
|
py
|
1a5ba305183fc30c53b8b065a9df1ca74a92b029
|
from django.urls import path, include # noqa
from rest_framework.routers import DefaultRouter # noqa
from recipe import views # noqa
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipes', views.RecipeViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
|
py
|
1a5ba3af4ebf70626dddd74fa17b931f291f2117
|
import datetime
import typing
import uuid
from dataclasses_avroschema import AvroModel, fields
now = datetime.datetime.now()
PRIMITIVE_TYPES = (
(str, fields.STRING),
(int, fields.INT),
(bool, fields.BOOLEAN),
(float, fields.FLOAT),
# (bytes, "bytes"),
)
PRIMITIVE_TYPES_AND_DEFAULTS = (
(str, "test"),
(int, 1),
(bool, True),
(float, 10.4),
# (bytes, "test".encode()),
)
PRIMITIVE_TYPES_AND_INVALID_DEFAULTS = (
(str, 1),
(int, "test"),
(bool, 10),
(float, False),
# (bytes, "test".encode()),
)
LIST_TYPE_AND_ITEMS_TYPE = (
(str, "string"),
(int, "int"),
(bool, "boolean"),
(float, "float"),
(bytes, "bytes"),
)
LOGICAL_TYPES = (
(datetime.date, fields.LOGICAL_DATE, now.date()),
(datetime.time, fields.LOGICAL_TIME, now.time()),
(datetime.datetime, fields.LOGICAL_DATETIME, now),
(uuid.uuid4, fields.LOGICAL_UUID, uuid.uuid4()),
)
UNION_PRIMITIVE_ELEMENTS = (
((str, int), (fields.STRING, fields.INT)),
((str, None), (fields.STRING, fields.NULL)),
(
(datetime.date, datetime.datetime),
(fields.PYTHON_TYPE_TO_AVRO[datetime.date], fields.PYTHON_TYPE_TO_AVRO[datetime.datetime],),
),
((float, str, int), (fields.FLOAT, fields.STRING, fields.INT)),
((str, float, int, bool), (fields.STRING, fields.FLOAT, fields.INT, fields.BOOLEAN),),
)
SEQUENCE_TYPES = (typing.List, typing.Tuple, typing.Sequence, typing.MutableSequence)
MAPPING_TYPES = (typing.Dict, typing.Mapping, typing.MutableMapping)
SEQUENCES_AND_TYPES = (
(sequence, python_type, items_type) for sequence in SEQUENCE_TYPES for python_type, items_type in PRIMITIVE_TYPES
)
SEQUENCES_LOGICAL_TYPES = (
(sequence, python_type, items_type, value)
for sequence in SEQUENCE_TYPES
for python_type, items_type, value in LOGICAL_TYPES
)
MAPPING_AND_TYPES = (
(mapping, python_type, items_type) for mapping in MAPPING_TYPES for python_type, items_type in PRIMITIVE_TYPES
)
MAPPING_LOGICAL_TYPES = (
(mapping, python_type, items_type, value)
for mapping in MAPPING_TYPES
for python_type, items_type, value in LOGICAL_TYPES
)
# Represent the logical types
# (python_type, avro_internal_type, logical_type)
LOGICAL_TYPES_AND_DEFAULTS = (
(datetime.date, fields.INT, fields.DATE),
(datetime.time, fields.INT, fields.TIME_MILLIS),
(datetime.datetime, fields.LONG, fields.TIMESTAMP_MILLIS),
(uuid.uuid4, fields.STRING, fields.UUID),
)
LOGICAL_TYPES_AND_INVALID_DEFAULTS = (
(datetime.date, 1, None),
(datetime.time, "test", None),
(datetime.datetime, 10, None),
(uuid.uuid4, 10, f"Invalid default type. Default should be {str} or {uuid.UUID}"),
)
class User(AvroModel):
"User"
first_name: str
avro_user = {
"name": "User",
"type": "record",
"doc": "User",
"fields": [{"name": "first_name", "type": "string"}],
}
ARRAY_WITH_UNION_TYPES = (
(typing.Union[int, str], [fields.INT, fields.STRING], [10, 20, "test"]),
(typing.Union[int, str, User], [fields.INT, fields.STRING, avro_user], [10, 20, "test"],),
)
|
py
|
1a5ba4154cb1ece504b25eb6c7914938714879b2
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import extension_info
from nova import exception
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
class fake_extension(object):
def __init__(self, name, alias, description, namespace, version):
self.name = name
self.alias = alias
self.__doc__ = description
self.namespace = namespace
self.version = version
fake_extensions = {
'ext1-alias': fake_extension('ext1', 'ext1-alias', 'ext1 description',
'ext1 namespace', 1),
'ext2-alias': fake_extension('ext2', 'ext2-alias', 'ext2 description',
'ext2 namespace', 2),
'ext3-alias': fake_extension('ext3', 'ext3-alias', 'ext3 description',
'ext3 namespace', 1)
}
def fake_policy_enforce(context, action, target, do_raise=True):
return True
def fake_policy_enforce_selective(context, action, target, do_raise=True):
if action == 'compute_extension:v3:ext1-alias:discoverable':
raise exception.NotAuthorized
else:
return True
class ExtensionInfoTest(test.TestCase):
def setUp(self):
super(ExtensionInfoTest, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
ext_info.extensions = fake_extensions
self.controller = extension_info.ExtensionInfoController(ext_info)
def test_extension_info_list(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce)
req = fakes.HTTPRequestV3.blank('/extensions')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['extensions']))
for e in res_dict['extensions']:
self.assertIn(e['alias'], fake_extensions)
self.assertEqual(e['name'], fake_extensions[e['alias']].name)
self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
self.assertEqual(e['description'],
fake_extensions[e['alias']].__doc__)
self.assertEqual(e['namespace'],
fake_extensions[e['alias']].namespace)
self.assertEqual(e['version'],
fake_extensions[e['alias']].version)
def test_extension_info_show(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce)
req = fakes.HTTPRequestV3.blank('/extensions/ext1-alias')
res_dict = self.controller.show(req, 'ext1-alias')
self.assertEqual(1, len(res_dict))
self.assertEqual(res_dict['extension']['name'],
fake_extensions['ext1-alias'].name)
self.assertEqual(res_dict['extension']['alias'],
fake_extensions['ext1-alias'].alias)
self.assertEqual(res_dict['extension']['description'],
fake_extensions['ext1-alias'].__doc__)
self.assertEqual(res_dict['extension']['namespace'],
fake_extensions['ext1-alias'].namespace)
self.assertEqual(res_dict['extension']['version'],
fake_extensions['ext1-alias'].version)
def test_extension_info_list_not_all_discoverable(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce_selective)
req = fakes.HTTPRequestV3.blank('/extensions')
res_dict = self.controller.index(req)
self.assertEqual(2, len(res_dict['extensions']))
for e in res_dict['extensions']:
self.assertNotEqual('ext1-alias', e['alias'])
self.assertIn(e['alias'], fake_extensions)
self.assertEqual(e['name'], fake_extensions[e['alias']].name)
self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
self.assertEqual(e['description'],
fake_extensions[e['alias']].__doc__)
self.assertEqual(e['namespace'],
fake_extensions[e['alias']].namespace)
self.assertEqual(e['version'],
fake_extensions[e['alias']].version)
|
py
|
1a5ba48bb54d5d7d82f5f5c653b5d509ac0a58d8
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiOperationPolicyResult',
'AwaitableGetApiOperationPolicyResult',
'get_api_operation_policy',
]
@pulumi.output_type
class GetApiOperationPolicyResult:
"""
Policy Contract details.
"""
def __init__(__self__, content_format=None, name=None, policy_content=None, type=None):
if content_format and not isinstance(content_format, str):
raise TypeError("Expected argument 'content_format' to be a str")
pulumi.set(__self__, "content_format", content_format)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if policy_content and not isinstance(policy_content, str):
raise TypeError("Expected argument 'policy_content' to be a str")
pulumi.set(__self__, "policy_content", policy_content)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="contentFormat")
def content_format(self) -> Optional[str]:
"""
Format of the policyContent.
"""
return pulumi.get(self, "content_format")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyContent")
def policy_content(self) -> str:
"""
Json escaped Xml Encoded contents of the Policy.
"""
return pulumi.get(self, "policy_content")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetApiOperationPolicyResult(GetApiOperationPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiOperationPolicyResult(
content_format=self.content_format,
name=self.name,
policy_content=self.policy_content,
type=self.type)
def get_api_operation_policy(api_id: Optional[str] = None,
operation_id: Optional[str] = None,
policy_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiOperationPolicyResult:
"""
Use this data source to access information about an existing resource.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['operationId'] = operation_id
__args__['policyId'] = policy_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/v20180601preview:getApiOperationPolicy', __args__, opts=opts, typ=GetApiOperationPolicyResult).value
return AwaitableGetApiOperationPolicyResult(
content_format=__ret__.content_format,
name=__ret__.name,
policy_content=__ret__.policy_content,
type=__ret__.type)
|
py
|
1a5ba5bc5cfe13c61ffd0abdc17275e0fa792579
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import functools
from copy import copy
from typing import List
from jinja2 import Environment
from .import_serializer import FileImportSerializer
from ..models import LROOperation, PagingOperation, CodeModel, OperationGroup
from .builder_serializer import get_operation_serializer, get_request_builder_serializer
class OperationGroupSerializer:
def __init__(
self,
code_model: CodeModel,
env: Environment,
operation_groups: List[OperationGroup],
async_mode: bool,
is_python_3_file: bool,
) -> None:
self.code_model = code_model
self.env = env
self.operation_groups = operation_groups
self.async_mode = async_mode
self.is_python_3_file = is_python_3_file
def serialize(self) -> str:
def _is_lro(operation):
return isinstance(operation, LROOperation)
def _is_paging(operation):
return isinstance(operation, PagingOperation)
operation_group_template = self.env.get_template("operations_container.py.jinja2")
if not self.code_model.options["combine_operation_files"] and self.operation_groups[0].is_empty_operation_group:
operation_group_template = self.env.get_template("operations_container_mixin.py.jinja2")
has_schemas = self.code_model.schemas or self.code_model.enums
# extract all operations from operation_groups
operaions_all = [operation for groups in self.operation_groups for operation in groups.operations]
operation_group_temp = copy(self.operation_groups[0])
operation_group_temp.operations = operaions_all
return operation_group_template.render(
code_model=self.code_model,
operation_groups=self.operation_groups,
imports=FileImportSerializer(
operation_group_temp.imports(
async_mode=self.async_mode,
has_schemas=bool(has_schemas)
), is_python_3_file=self.is_python_3_file
),
async_mode=self.async_mode,
is_python_3_file=self.is_python_3_file,
is_lro=_is_lro,
is_paging=_is_paging,
get_operation_serializer=functools.partial(
get_operation_serializer,
code_model=self.code_model,
async_mode=self.async_mode,
is_python_3_file=self.is_python_3_file,
),
request_builder_serializer=get_request_builder_serializer(
self.code_model, self.is_python_3_file,
),
)
|
py
|
1a5ba7262734c529ec21ab29f610eb057a07fbe1
|
import bz2
import gzip
import re
import tarfile
import zipfile
from io import (
BytesIO,
StringIO
)
from galaxy import util
from galaxy.util.image_util import image_type
HTML_CHECK_LINES = 100
CHUNK_SIZE = 2 ** 15 # 32Kb
HTML_REGEXPS = (
re.compile(r"<A\s+[^>]*HREF[^>]+>", re.I),
re.compile(r"<IFRAME[^>]*>", re.I),
re.compile(r"<FRAMESET[^>]*>", re.I),
re.compile(r"<META[\W][^>]*>", re.I),
re.compile(r"<SCRIPT[^>]*>", re.I),
)
def check_html(name, file_path=True):
"""
Returns True if the file/string contains HTML code.
"""
# Handles files if file_path is True or text if file_path is False
if file_path:
temp = open(name, "r", encoding='utf-8')
else:
temp = StringIO(util.unicodify(name))
try:
for _ in range(HTML_CHECK_LINES):
line = temp.readline(CHUNK_SIZE)
if not line:
break
if any(regexp.search(line) for regexp in HTML_REGEXPS):
return True
except UnicodeDecodeError:
return False
finally:
temp.close()
return False
def check_binary(name, file_path=True):
# Handles files if file_path is True or text if file_path is False
if file_path:
temp = open(name, "rb")
else:
temp = BytesIO(name)
try:
return util.is_binary(temp.read(1024))
finally:
temp.close()
def check_gzip(file_path, check_content=True):
# This method returns a tuple of booleans representing ( is_gzipped, is_valid )
# Make sure we have a gzipped file
try:
with open(file_path, "rb") as temp:
magic_check = temp.read(2)
if magic_check != util.gzip_magic:
return (False, False)
except Exception:
return (False, False)
# We support some binary data types, so check if the compressed binary file is valid
# If the file is Bam, it should already have been detected as such, so we'll just check
# for sff format.
try:
with gzip.open(file_path, 'rb') as fh:
header = fh.read(4)
if header == b'.sff':
return (True, True)
except Exception:
return(False, False)
if not check_content:
return (True, True)
with gzip.open(file_path, mode='rb') as gzipped_file:
chunk = gzipped_file.read(CHUNK_SIZE)
# See if we have a compressed HTML file
if check_html(chunk, file_path=False):
return (True, False)
return (True, True)
def check_bz2(file_path, check_content=True):
try:
with open(file_path, "rb") as temp:
magic_check = temp.read(3)
if magic_check != util.bz2_magic:
return (False, False)
except Exception:
return(False, False)
if not check_content:
return (True, True)
with bz2.BZ2File(file_path, mode='rb') as bzipped_file:
chunk = bzipped_file.read(CHUNK_SIZE)
# See if we have a compressed HTML file
if check_html(chunk, file_path=False):
return (True, False)
return (True, True)
def check_zip(file_path, check_content=True, files=1):
if not zipfile.is_zipfile(file_path):
return (False, False)
if not check_content:
return (True, True)
chunk = None
for filect, member in enumerate(iter_zip(file_path)):
handle, name = member
chunk = handle.read(CHUNK_SIZE)
if chunk and check_html(chunk, file_path=False):
return (True, False)
if filect >= files:
break
return (True, True)
def is_bz2(file_path):
is_bz2, is_valid = check_bz2(file_path, check_content=False)
return is_bz2
def is_gzip(file_path):
is_gzipped, is_valid = check_gzip(file_path, check_content=False)
return is_gzipped
def is_zip(file_path):
is_zipped, is_valid = check_zip(file_path, check_content=False)
return is_zipped
def is_single_file_zip(file_path):
for i, _ in enumerate(iter_zip(file_path)):
if i > 1:
return False
return True
def is_tar(file_path):
return tarfile.is_tarfile(file_path)
def iter_zip(file_path):
with zipfile.ZipFile(file_path) as z:
for f in filter(lambda x: not x.endswith('/'), z.namelist()):
yield (z.open(f), f)
def check_image(file_path):
""" Simple wrapper around image_type to yield a True/False verdict """
if image_type(file_path):
return True
return False
__all__ = (
'check_binary',
'check_bz2',
'check_gzip',
'check_html',
'check_image',
'check_zip',
'is_gzip',
'is_bz2',
'is_zip',
)
|
py
|
1a5ba795ee5931ad288b75c91b7e45f6cdb1d241
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from paddlers.models.ppseg.datasets import Dataset
from paddlers.models.ppseg.cvlibs import manager
from paddlers.models.ppseg.transforms import Compose
@manager.DATASETS.add_component
class Cityscapes(Dataset):
"""
Cityscapes dataset `https://www.cityscapes-dataset.com/`.
The folder structure is as follow:
cityscapes
|
|--leftImg8bit
| |--train
| |--val
| |--test
|
|--gtFine
| |--train
| |--val
| |--test
Make sure there are **labelTrainIds.png in gtFine directory. If not, please run the conver_cityscapes.py in tools.
Args:
transforms (list): Transforms for image.
dataset_root (str): Cityscapes dataset directory.
mode (str, optional): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'.
edge (bool, optional): Whether to compute edge while training. Default: False
"""
NUM_CLASSES = 19
def __init__(self, transforms, dataset_root, mode='train', edge=False):
self.dataset_root = dataset_root
self.transforms = Compose(transforms)
self.file_list = list()
mode = mode.lower()
self.mode = mode
self.num_classes = self.NUM_CLASSES
self.ignore_index = 255
self.edge = edge
if mode not in ['train', 'val', 'test']:
raise ValueError(
"mode should be 'train', 'val' or 'test', but got {}.".format(
mode))
if self.transforms is None:
raise ValueError("`transforms` is necessary, but it is None.")
img_dir = os.path.join(self.dataset_root, 'leftImg8bit')
label_dir = os.path.join(self.dataset_root, 'gtFine')
if self.dataset_root is None or not os.path.isdir(
self.dataset_root) or not os.path.isdir(
img_dir) or not os.path.isdir(label_dir):
raise ValueError(
"The dataset is not Found or the folder structure is nonconfoumance."
)
label_files = sorted(
glob.glob(
os.path.join(label_dir, mode, '*',
'*_gtFine_labelTrainIds.png')))
img_files = sorted(
glob.glob(os.path.join(img_dir, mode, '*', '*_leftImg8bit.png')))
self.file_list = [[
img_path, label_path
] for img_path, label_path in zip(img_files, label_files)]
|
py
|
1a5ba9b75bb31bf8f2b2e222beb192ffa326a7c4
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import PIL
import numpy as np
import scipy.sparse
from lib.model.utils.config import cfg
import pdb
ROOT_DIR = osp.join(osp.dirname(__file__), '..', '..')
class imdb(object):
"""Image database."""
def __init__(self, name, classes=None):
self._name = name
self._num_classes = 0
if not classes:
self._classes = []
else:
self._classes = classes
self._image_index = []
self._obj_proposer = 'gt'
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
def set_proposal_method(self, method):
method = eval('self.' + method + '_roidb')
self.roidb_handler = method
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def image_id_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def _get_widths(self):
return [PIL.Image.open(self.image_path_at(i)).size[0]
for i in range(self.num_images)]
def append_flipped_images(self):
num_images = self.num_images
widths = self._get_widths()
for i in range(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
def create_roidb_from_box_list(self, box_list, gt_roidb):
assert len(box_list) == self.num_images, \
'Number of boxes must match number of ground-truth images'
roidb = []
for i in range(self.num_images):
boxes = box_list[i]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:
gt_boxes = gt_roidb[i]['boxes']
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
overlaps = scipy.sparse.csr_matrix(overlaps)
roidb.append({
'boxes': boxes,
'gt_classes': np.zeros((num_boxes,), dtype=np.int32),
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': np.zeros((num_boxes,), dtype=np.float32),
})
return roidb
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in range(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],
b[i]['seg_areas']))
return a
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
|
py
|
1a5baa54b653e0c0d5cfc4a6a75136f05d0c19f1
|
from __future__ import absolute_import, division, print_function
import os
import time
import pandas as pd
import numpy as np
import seaborn as sns
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.externals import joblib
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.decomposition import PCA
from src.meteoro_skills import CategoricalScores
from src.meteoro_skills import ContinuousScores
import tensorflow as tf
from tensorflow import keras
from keras import backend
from tensorflow.keras import layers
from keras.layers import GaussianNoise
from keras.layers import GaussianDropout
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
#from keras.models import model_from_yaml
from keras.models import load_model
print('TF version '+tf.__version__)
# ------------------------------------------------------------------------------
def tic():
global _start_time
_start_time = time.time()
def tac():
t_sec = round(time.time() - _start_time)
(t_min, t_sec) = divmod(t_sec, 60)
(t_hour, t_min) = divmod(t_min, 60)
print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))
def mean_squared_error(y_test, y_pred):
return K.mean(K.square(y_pred - y_test), axis=-1)
# ------------------------------------------------------------------------------
class Training:
"""
This module is intended to automate the TensorFlow Neural Network training.
"""
PCA = PCA()
seed = 0
run_prefix = ''
version = ''
vernick = ''
file = ''
path = ''
fig_title = ''
path_fig = ''
mod_out_pth = ''
mod_out_name = ''
def __init__(self, random_seed=0,
run_prefix='',
version='',
version_nickname='',
csv_entry='',
csv_path='',
figure_path='',
model_out_path='',
model_out_name=''):
self.run_prefix = run_prefix
self.seed = random_seed
self.version = version
self.vernick = version_nickname
self.file = csv_entry
self.path = csv_path
self.path_fig = figure_path
self.fig_title = run_prefix + version + version_nickname
self.mod_out_pth = model_out_path
self.mod_out_name = model_out_name
# -------------------------------------------------------------------------
# DROP DATA OUTSIDE INTERVAL
# -------------------------------------------------------------------------
@staticmethod
def keep_interval(keepfrom: 0.0, keepto: 1.0, dataframe, target_col: str):
keepinterval = np.where((dataframe[target_col] >= keepfrom) &
(dataframe[target_col] <= keepto))
result = dataframe.iloc[keepinterval]
return result
# -------------------------------------------------------------------------
# BUILD MODELS DEFINITIONS : CLAS = CLASSIFICATION and REG = REGRESSION
# -------------------------------------------------------------------------
@staticmethod
def build_class_model():
'''
Fucntion to create the instance and configuration of the keras
model(Sequential and Dense).
'''
# Create the Keras model:
model = Sequential()
model.add(Dense(8, input_dim=4, kernel_initializer='uniform', activation='relu'))
model.add(Dense(2, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'],)
return model
@staticmethod
def build_reg_model(input_size):
'''
Fucntion to create the instance and configuration of the keras
model(Sequential and Dense).
'''
model = Sequential()
model.add(GaussianNoise(0.01, input_shape=(input_size,)))
model.add(Dense(24, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['mean_absolute_error', 'mean_squared_error'])
return model
# -------------------------------------------------------------------------
# EXECUTION OF READING INPUT ATTRIBUTES, SCALING, PCA, SPLIT AND RUN MODEL!
# -------------------------------------------------------------------------
def autoExecClass(self):
# Fix random seed for reproducibility:
np.random.seed(self.seed)
# Load dataset:
df = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')
x, y= df.loc[:,['36V', '89V', '166V', '190V']], df.loc[:,['TagRain']]
x_arr = np.asanyarray(x)
y_arr = np.asanyarray(y)
y_arr = np.ravel(y_arr)
# Scaling the input paramaters:
# scaler_min_max = MinMaxScaler()
norm_sc = Normalizer()
x_normalized= norm_sc.fit_transform(x_arr)
# Split the dataset in test and train samples:
x_train, x_test, y_train, y_test = train_test_split(x_normalized,
y_arr, test_size=0.10,
random_state=101)
# Create the instance for KerasRegressor:
model=self.build_class_model()
tic()
#------------------------------------------------------------------------------
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(x_train, y_train,
epochs=EPOCHS, validation_split=0.2, batch_size=10,
verbose=0, callbacks=[PrintDot()])
print(history.history.keys())
# ------------------------------------------------------------------------------
# Visualize the model's training progress using the stats
# stored in the history object.
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
# ------------------------------------------------------------------------------
# Saving model to YAML:
# model_yaml = model.to_yaml()
# with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:
# yaml_file.write(model_yaml)
#
# # serialize weights to HDF5
# model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')
# print("Saved model to disk")
# tac()
# Saving the complete model in HDF5:
model.save(self.mod_out_pth + self.mod_out_name + '.h5')
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def autoExecReg(self):
# Fix random seed for reproducibility:
np.random.seed(self.seed)
# ------------------------------------------------------------------------------
df_orig = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')
df_input = df_orig.loc[:, ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',
'166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']]
colunas = ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',
'166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']
scaler = StandardScaler()
normed_input = scaler.fit_transform(df_input)
df_normed_input = pd.DataFrame(normed_input[:],
columns=colunas)
ancillary = df_normed_input.loc[:, ['183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']]
# regions=df_orig.loc[:,['R1','R2','R3','R4','R5']]
# ------------------------------------------------------------------------------
# Choosing the number of components:
TB1 = df_normed_input.loc[:, ['10V', '10H', '18V', '18H']]
TB2 = df_normed_input.loc[:, ['36V', '36H', '89V', '89H', '166V', '166H']]
# ------------------------------------------------------------------------------
# Verifying the number of components that most contribute:
pca = self.PCA
pca1 = pca.fit(TB1)
plt.plot(np.cumsum(pca1.explained_variance_ratio_))
plt.xlabel('Number of components for TB1')
plt.ylabel('Cumulative explained variance');
plt.savefig(self.path_fig + self.version + 'PCA_TB1.png')
# ---
pca_trans1 = PCA(n_components=2)
pca1 = pca_trans1.fit(TB1)
TB1_transformed = pca_trans1.transform(TB1)
print("original shape: ", TB1.shape)
print("transformed shape:", TB1_transformed.shape)
# ------------------------------------------------------------------------------
pca = PCA()
pca2 = pca.fit(TB2)
plt.plot(np.cumsum(pca2.explained_variance_ratio_))
plt.xlabel('Number of components for TB2')
plt.ylabel('Cumulative explained variance');
plt.savefig(self.path_fig + self.version + 'PCA_TB2.png')
# ---
pca_trans2 = PCA(n_components=2)
pca2 = pca_trans2.fit(TB2)
TB2_transformed = pca_trans2.transform(TB2)
print("original shape: ", TB2.shape)
print("transformed shape:", TB2_transformed.shape)
# ------------------------------------------------------------------------------
# JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:
PCA1 = pd.DataFrame(TB1_transformed[:],
columns=['pca1_1', 'pca_2'])
PCA2 = pd.DataFrame(TB2_transformed[:],
columns=['pca2_1', 'pca2_2'])
dataset = PCA1.join(PCA2, how='right')
dataset = dataset.join(ancillary, how='right')
dataset = dataset.join(df_orig.loc[:, ['sfcprcp']], how='right')
# ------------------------------------------------------------------------------
dataset = self.keep_interval(0.2, 75, dataset, 'sfcprcp')
# ----------------------------------------
# SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)
# n = 0.98
# to_remove = np.random.choice(
# dataset.index,
# size=int(dataset.shape[0] * n),
# replace=False)
# dataset = dataset.drop(to_remove)
# ------------------------------------------------------------------------------
# Split the data into train and test
# Now split the dataset into a training set and a test set.
# We will use the test set in the final evaluation of our model.
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# ------------------------------------------------------------------------------
# Inspect the data:
# Have a quick look at the joint distribution of a few pairs of columns from the training set.
colunas = list(dataset.columns.values)
# ------------------------------------------------------------------------------
# Also look at the overall statistics:
train_stats = train_dataset.describe()
train_stats.pop("sfcprcp")
train_stats = train_stats.transpose()
# ------------------------------------------------------------------------------
# Split features from labels:
# Separate the target value, or "label", from the features.
# This label is the value that you will train the model to predict.
y_train = train_dataset.pop('sfcprcp')
y_test = test_dataset.pop('sfcprcp')
# ------------------------------------------------------------------------------
# Normalize the data:
scaler = StandardScaler()
normed_train_data = scaler.fit_transform(train_dataset)
normed_test_data = scaler.fit_transform(test_dataset)
# ------------------------------------------------------------------------------
# Build the model:
model = self.build_reg_model(len(train_dataset.keys()))
# ------------------------------------------------------------------------------
# Inspect the model:
# Use the .summary method to print a simple description of the model
model.summary()
# ------------------------------------------------------------------------------
# It seems to be working, and it produces a result
# of the expected shape and type.
# Train the model:
# Train the model for 1000 epochs, and record the training
# and validation accuracy in the history object.
# ------------------------------------------------------------------------------
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, y_train,
epochs=EPOCHS, validation_split=0.2, verbose=0,
callbacks=[PrintDot()])
print(history.history.keys())
# ------------------------------------------------------------------------------
# Visualize the model's training progress using the stats
# stored in the history object.
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
self.plot_history(history)
# ------------------------------------------------------------------------------
model = self.build_reg_model(len(train_dataset.keys()))
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, y_train, epochs=EPOCHS,
validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])
# ------------------------------------------------------------------------------
# Ploting again, but with the EarlyStopping apllied:
self.plot_history_EarlyStopping(history)
# The graph shows that on the validation set, the average error
# is usually around +/- 2 MPG. Is this good?
# We'll leave that decision up to you.
# ------------------------------------------------------------------------------
# Let's see how well the model generalizes by using
# the test set, which we did not use when training the model.
# This tells us how well we can expect the model to predict
# when we use it in the real world.
loss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} sfcprcp".format(mae))
#------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Make predictions
# Finally, predict SFCPRCP values using data in the testing set:
test_predictions = model.predict(normed_test_data).flatten()
# Appplying meteorological skills to verify the performance of the TRAIN/TESTE model, in this case, continous scores:
skills = ContinuousScores()
val_y_pred_mean, val_y_test_mean, val_mae, val_rmse, val_std, val_fseperc, val_fse, val_corr, val_num_pixels = skills.metrics(y_test, test_predictions)
#converting to text file
print("converting arrays to text files")
my_scores = {'val_y_pred_mean': val_y_pred_mean,
'val_y_test_mean': val_y_test_mean,
'val_mae': val_mae,
'val_rmse': val_rmse,
'val_std': val_std,
'val_fseperc': val_fseperc,
'val_fse': val_fse,
'val_corr': val_corr,
'val_num_pixels': val_num_pixels}
with open(self.path_fig+'continuous_scores_TEST_TRAIN_'+self.version+'.txt', 'w') as myfile:
myfile.write(str(my_scores))
print("Text file saved!")
plt.figure()
plt.scatter(y_test, test_predictions)
plt.xlabel('True Values [sfcprcp]')
plt.ylabel('Predictions [sfcprcp]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0, plt.xlim()[1]])
plt.ylim([0, plt.ylim()[1]])
plt.plot([-100, 100], [-100, 100])
fig_name = self.fig_title + "_plot_scatter_y_test_vs_y_pred.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
#------------------------------------------------------------------------------
ax = plt.gca()
ax.plot(y_test,test_predictions, 'o', c='blue', alpha=0.07, markeredgecolor='none')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('True Values [sfcprcp]')
ax.set_ylabel('Predictions [sfcprcp]')
plt.plot([-100, 100], [-100, 100])
fig_name = self.fig_title + "_plot_scatter_LOG_y_test_vs_y_pred.png"
plt.savefig(self.path_fig+fig_name)
plt.clf()
#------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# It looks like our model predicts reasonably well.
# Let's take a look at the error distribution.
error = test_predictions - y_test
plt.hist(error, bins=25)
plt.xlabel("Prediction Error [sfcprcp]")
plt.ylabel("Count")
fig_name = self.fig_title + "_prediction_error.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
# ------------------------------------------------------------------------------
# HISTROGRAM 2D
plt.hist2d(y_test, test_predictions, cmin=1, bins=(50, 50), cmap=plt.cm.jet, range=np.array([(0.2, 110), (0.2, 110)]))
plt.axis('equal')
plt.axis('square')
plt.plot([0, 100], [0, 100], ls="--", c=".3")
plt.xlim([0, max(y_test)])
plt.ylim([0, max(y_test)])
plt.colorbar()
plt.xlabel("Observed rain rate (mm/h) - Training")
plt.ylabel("Predicted rain rate (mm/h) - Training")
fig_name = self.fig_title + "_hist2D.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
# ------------------------------------------------------------------------------
# Saving model to YAML:
model_yaml = model.to_yaml()
with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')
print("Saved model to disk")
# Saving the complete model in HDF5:
model.save(self.mod_out_pth + self.mod_out_name + '_tf.h5')
# -------------------------------------------------------------------------
# FUNCTIONS TO MAKE PLOTS ABOUT TRAINING:
# -------------------------------------------------------------------------
def plot_history(self, history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [sfcprcp]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label='Val Error')
ylim_max = hist.val_mean_absolute_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$scfprcp^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label='Val Error')
ylim_max = hist.val_mean_squared_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
# plt.show()
fig_name = self.fig_title + "_error_per_epochs_history.png"
plt.savefig(self.path_fig + fig_name)
def plot_history_EarlyStopping(self, history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [sfcprcp]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label='Val Error')
ylim_max = hist.val_mean_absolute_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$sfcprcp^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label='Val Error')
ylim_max = hist.val_mean_squared_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
fig_name = self.fig_title + "_error_per_epochs_EarlyStopping.png"
plt.savefig(self.path_fig + fig_name)
|
py
|
1a5baa86e221fb6b2a278d2d857191e0533161ac
|
"""The tests the History component."""
# pylint: disable=protected-access,invalid-name
from datetime import timedelta
import json
import unittest
from homeassistant.components import history, recorder
from homeassistant.components.recorder.models import process_timestamp
import homeassistant.core as ha
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch, sentinel
from tests.common import (
get_test_home_assistant,
init_recorder_component,
mock_state_change_event,
)
from tests.components.recorder.common import wait_recording_done
class TestComponentHistory(unittest.TestCase):
"""Test History component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.stop()
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
wait_recording_done(self.hass)
def test_setup(self):
"""Test setup method of history."""
config = history.CONFIG_SCHEMA(
{
# ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ["media_player"],
history.CONF_ENTITIES: ["thermostat.test"],
},
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ["thermostat"],
history.CONF_ENTITIES: ["media_player.test"],
},
}
}
)
self.init_recorder()
assert setup_component(self.hass, history.DOMAIN, config)
def test_get_states(self):
"""Test getting states at a specific point in time."""
self.init_recorder()
states = []
now = dt_util.utcnow()
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=now
):
for i in range(5):
state = ha.State(
"test.point_in_time_{}".format(i % 5),
f"State {i}",
{"attribute_test": i},
)
mock_state_change_event(self.hass, state)
states.append(state)
wait_recording_done(self.hass)
future = now + timedelta(seconds=1)
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=future
):
for i in range(5):
state = ha.State(
"test.point_in_time_{}".format(i % 5),
f"State {i}",
{"attribute_test": i},
)
mock_state_change_event(self.hass, state)
wait_recording_done(self.hass)
# Get states returns everything before POINT
for state1, state2 in zip(
states,
sorted(
history.get_states(self.hass, future), key=lambda state: state.entity_id
),
):
assert state1 == state2
# Test get_state here because we have a DB setup
assert states[0] == history.get_state(self.hass, future, states[0].entity_id)
time_before_recorder_ran = now - timedelta(days=1000)
assert history.get_states(self.hass, time_before_recorder_ran) == []
assert history.get_state(self.hass, time_before_recorder_ran, "demo.id") is None
def test_state_changes_during_period(self):
"""Test state change during period."""
self.init_recorder()
entity_id = "media_player.test"
def set_state(state):
"""Set the state."""
self.hass.states.set(entity_id, state)
wait_recording_done(self.hass)
return self.hass.states.get(entity_id)
start = dt_util.utcnow()
point = start + timedelta(seconds=1)
end = point + timedelta(seconds=1)
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=start
):
set_state("idle")
set_state("YouTube")
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=point
):
states = [
set_state("idle"),
set_state("Netflix"),
set_state("Plex"),
set_state("YouTube"),
]
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=end
):
set_state("Netflix")
set_state("Plex")
hist = history.state_changes_during_period(self.hass, start, end, entity_id)
assert states == hist[entity_id]
def test_get_last_state_changes(self):
"""Test number of state changes."""
self.init_recorder()
entity_id = "sensor.test"
def set_state(state):
"""Set the state."""
self.hass.states.set(entity_id, state)
wait_recording_done(self.hass)
return self.hass.states.get(entity_id)
start = dt_util.utcnow() - timedelta(minutes=2)
point = start + timedelta(minutes=1)
point2 = point + timedelta(minutes=1)
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=start
):
set_state("1")
states = []
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=point
):
states.append(set_state("2"))
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=point2
):
states.append(set_state("3"))
hist = history.get_last_state_changes(self.hass, 2, entity_id)
assert states == hist[entity_id]
def test_get_significant_states(self):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
zero, four, states = self.record_states()
hist = history.get_significant_states(
self.hass, zero, four, filters=history.Filters()
)
assert states == hist
def test_get_significant_states_minimal_response(self):
"""Test that only significant states are returned.
When minimal responses is set only the first and
last states return a complete state.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
zero, four, states = self.record_states()
hist = history.get_significant_states(
self.hass, zero, four, filters=history.Filters(), minimal_response=True
)
# The second media_player.test state is reduced
# down to last_changed and state when minimal_response
# is set. We use JSONEncoder to make sure that are
# pre-encoded last_changed is always the same as what
# will happen with encoding a native state
input_state = states["media_player.test"][1]
orig_last_changed = json.dumps(
process_timestamp(input_state.last_changed), cls=JSONEncoder,
).replace('"', "")
orig_state = input_state.state
states["media_player.test"][1] = {
"last_changed": orig_last_changed,
"state": orig_state,
}
assert states == hist
def test_get_significant_states_with_initial(self):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
zero, four, states = self.record_states()
one = zero + timedelta(seconds=1)
one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states:
if entity_id == "media_player.test":
states[entity_id] = states[entity_id][1:]
for state in states[entity_id]:
if state.last_changed == one:
state.last_changed = one_and_half
hist = history.get_significant_states(
self.hass,
one_and_half,
four,
filters=history.Filters(),
include_start_time_state=True,
)
assert states == hist
def test_get_significant_states_without_initial(self):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
zero, four, states = self.record_states()
one = zero + timedelta(seconds=1)
one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states:
states[entity_id] = list(
filter(lambda s: s.last_changed != one, states[entity_id])
)
del states["media_player.test2"]
hist = history.get_significant_states(
self.hass,
one_and_half,
four,
filters=history.Filters(),
include_start_time_state=False,
)
assert states == hist
def test_get_significant_states_entity_id(self):
"""Test that only significant states are returned for one entity."""
zero, four, states = self.record_states()
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
hist = history.get_significant_states(
self.hass, zero, four, ["media_player.test"], filters=history.Filters()
)
assert states == hist
def test_get_significant_states_multiple_entity_ids(self):
"""Test that only significant states are returned for one entity."""
zero, four, states = self.record_states()
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
hist = history.get_significant_states(
self.hass,
zero,
four,
["media_player.test", "thermostat.test"],
filters=history.Filters(),
)
assert states == hist
def test_get_significant_states_exclude_domain(self):
"""Test if significant states are returned when excluding domains.
We should get back every thermostat change that includes an attribute
change, but no media player changes.
"""
zero, four, states = self.record_states()
del states["media_player.test"]
del states["media_player.test2"]
del states["media_player.test3"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_EXCLUDE: {history.CONF_DOMAINS: ["media_player"]}
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_exclude_entity(self):
"""Test if significant states are returned when excluding entities.
We should get back every thermostat and script changes, but no media
player changes.
"""
zero, four, states = self.record_states()
del states["media_player.test"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_EXCLUDE: {history.CONF_ENTITIES: ["media_player.test"]}
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_exclude(self):
"""Test significant states when excluding entities and domains.
We should not get back every thermostat and media player test changes.
"""
zero, four, states = self.record_states()
del states["media_player.test"]
del states["thermostat.test"]
del states["thermostat.test2"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ["thermostat"],
history.CONF_ENTITIES: ["media_player.test"],
}
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_exclude_include_entity(self):
"""Test significant states when excluding domains and include entities.
We should not get back every thermostat and media player test changes.
"""
zero, four, states = self.record_states()
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_ENTITIES: ["media_player.test", "thermostat.test"]
},
history.CONF_EXCLUDE: {history.CONF_DOMAINS: ["thermostat"]},
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_domain(self):
"""Test if significant states are returned when including domains.
We should get back every thermostat and script changes, but no media
player changes.
"""
zero, four, states = self.record_states()
del states["media_player.test"]
del states["media_player.test2"]
del states["media_player.test3"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ["thermostat", "script"]
}
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_entity(self):
"""Test if significant states are returned when including entities.
We should only get back changes of the media_player.test entity.
"""
zero, four, states = self.record_states()
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {history.CONF_ENTITIES: ["media_player.test"]}
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include(self):
"""Test significant states when including domains and entities.
We should only get back changes of the media_player.test entity and the
thermostat domain.
"""
zero, four, states = self.record_states()
del states["media_player.test2"]
del states["media_player.test3"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ["thermostat"],
history.CONF_ENTITIES: ["media_player.test"],
}
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_exclude_domain(self):
"""Test if significant states when excluding and including domains.
We should not get back any changes since we include only the
media_player domain but also exclude it.
"""
zero, four, states = self.record_states()
del states["media_player.test"]
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {history.CONF_DOMAINS: ["media_player"]},
history.CONF_EXCLUDE: {history.CONF_DOMAINS: ["media_player"]},
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_exclude_entity(self):
"""Test if significant states when excluding and including domains.
We should not get back any changes since we include only
media_player.test but also exclude it.
"""
zero, four, states = self.record_states()
del states["media_player.test"]
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_ENTITIES: ["media_player.test"]
},
history.CONF_EXCLUDE: {
history.CONF_ENTITIES: ["media_player.test"]
},
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_exclude(self):
"""Test if significant states when in/excluding domains and entities.
We should only get back changes of the media_player.test2 entity.
"""
zero, four, states = self.record_states()
del states["media_player.test"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ["media_player"],
history.CONF_ENTITIES: ["thermostat.test"],
},
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ["thermostat"],
history.CONF_ENTITIES: ["media_player.test"],
},
},
}
)
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_are_ordered(self):
"""Test order of results from get_significant_states.
When entity ids are given, the results should be returned with the data
in the same order.
"""
zero, four, states = self.record_states()
entity_ids = ["media_player.test", "media_player.test2"]
hist = history.get_significant_states(
self.hass, zero, four, entity_ids, filters=history.Filters()
)
assert list(hist.keys()) == entity_ids
entity_ids = ["media_player.test2", "media_player.test"]
hist = history.get_significant_states(
self.hass, zero, four, entity_ids, filters=history.Filters()
)
assert list(hist.keys()) == entity_ids
def test_get_significant_states_only(self):
"""Test significant states when significant_states_only is set."""
self.init_recorder()
entity_id = "sensor.test"
def set_state(state, **kwargs):
"""Set the state."""
self.hass.states.set(entity_id, state, **kwargs)
wait_recording_done(self.hass)
return self.hass.states.get(entity_id)
start = dt_util.utcnow() - timedelta(minutes=4)
points = []
for i in range(1, 4):
points.append(start + timedelta(minutes=i))
states = []
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=start
):
set_state("123", attributes={"attribute": 10.64})
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=points[0]
):
# Attributes are different, state not
states.append(set_state("123", attributes={"attribute": 21.42}))
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=points[1]
):
# state is different, attributes not
states.append(set_state("32", attributes={"attribute": 21.42}))
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=points[2]
):
# everything is different
states.append(set_state("412", attributes={"attribute": 54.23}))
hist = history.get_significant_states(
self.hass, start, significant_changes_only=True
)
assert len(hist[entity_id]) == 2
assert states[0] not in hist[entity_id]
assert states[1] in hist[entity_id]
assert states[2] in hist[entity_id]
hist = history.get_significant_states(
self.hass, start, significant_changes_only=False
)
assert len(hist[entity_id]) == 3
assert states == hist[entity_id]
def check_significant_states(self, zero, four, states, config):
"""Check if significant states are retrieved."""
filters = history.Filters()
exclude = config[history.DOMAIN].get(history.CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude.get(history.CONF_ENTITIES, [])
filters.excluded_domains = exclude.get(history.CONF_DOMAINS, [])
include = config[history.DOMAIN].get(history.CONF_INCLUDE)
if include:
filters.included_entities = include.get(history.CONF_ENTITIES, [])
filters.included_domains = include.get(history.CONF_DOMAINS, [])
hist = history.get_significant_states(self.hass, zero, four, filters=filters)
assert states == hist
def record_states(self):
"""Record some test states.
We inject a bunch of state updates from media player, zone and
thermostat.
"""
self.init_recorder()
mp = "media_player.test"
mp2 = "media_player.test2"
mp3 = "media_player.test3"
therm = "thermostat.test"
therm2 = "thermostat.test2"
zone = "zone.home"
script_nc = "script.cannot_cancel_this_one"
script_c = "script.can_cancel_this_one"
def set_state(entity_id, state, **kwargs):
"""Set the state."""
self.hass.states.set(entity_id, state, **kwargs)
wait_recording_done(self.hass)
return self.hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(seconds=1)
two = one + timedelta(seconds=1)
three = two + timedelta(seconds=1)
four = three + timedelta(seconds=1)
states = {therm: [], therm2: [], mp: [], mp2: [], mp3: [], script_c: []}
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=one
):
states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp2].append(
set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp3].append(
set_state(mp3, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[therm].append(
set_state(therm, 20, attributes={"current_temperature": 19.5})
)
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=two
):
# This state will be skipped only different in time
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt3)})
# This state will be skipped as it hidden
set_state(
mp3,
"Apple TV",
attributes={"media_title": str(sentinel.mt2), "hidden": True},
)
# This state will be skipped because domain blacklisted
set_state(zone, "zoning")
set_state(script_nc, "off")
states[script_c].append(
set_state(script_c, "off", attributes={"can_cancel": True})
)
states[therm].append(
set_state(therm, 21, attributes={"current_temperature": 19.8})
)
states[therm2].append(
set_state(therm2, 20, attributes={"current_temperature": 19})
)
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=three
):
states[mp].append(
set_state(mp, "Netflix", attributes={"media_title": str(sentinel.mt4)})
)
states[mp3].append(
set_state(mp3, "Netflix", attributes={"media_title": str(sentinel.mt3)})
)
# Attributes changed even though state is the same
states[therm].append(
set_state(therm, 21, attributes={"current_temperature": 20})
)
# state will be skipped since entity is hidden
set_state(therm, 22, attributes={"current_temperature": 21, "hidden": True})
return zero, four, states
async def test_fetch_period_api(hass, hass_client):
"""Test the fetch period view for history."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "history", {})
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(f"/api/history/period/{dt_util.utcnow().isoformat()}")
assert response.status == 200
async def test_fetch_period_api_with_use_include_order(hass, hass_client):
"""Test the fetch period view for history with include order."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass, "history", {history.DOMAIN: {history.CONF_ORDER: True}}
)
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(f"/api/history/period/{dt_util.utcnow().isoformat()}")
assert response.status == 200
async def test_fetch_period_api_with_minimal_response(hass, hass_client):
"""Test the fetch period view for history with minimal_response."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "history", {})
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}?minimal_response"
)
assert response.status == 200
async def test_fetch_period_api_with_no_timestamp(hass, hass_client):
"""Test the fetch period view for history with no timestamp."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "history", {})
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get("/api/history/period")
assert response.status == 200
async def test_fetch_period_api_with_include_order(hass, hass_client):
"""Test the fetch period view for history."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{
"history": {
"use_include_order": True,
"include": {"entities": ["light.kitchen"]},
}
},
)
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}",
params={"filter_entity_id": "non.existing,something.else"},
)
assert response.status == 200
|
py
|
1a5bab4bdf4ad9ca91c78bd868c8a07176d6d169
|
from profiles_api.views import UserLoginApiView
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
# ViewSet
router = DefaultRouter()
# 'basename' : creating a view set that doesen't have query set or
# want to override the name of the query set is associated to it
router.register("hello-viewset", views.HelloViewSet, basename="hello-viewset")
# no need to specify a 'basename' because we have a view set a query set object
router.register("profile", views.UserProfileViewSet)
router.register("feed", views.UserProfileFeedViewSet)
urlpatterns = [
path("hello-view/", views.HelloApiView.as_view()),
path("login/", views.UserLoginApiView.as_view()),
path("", include(router.urls)), # Pass router.register => router.urls
]
|
py
|
1a5babf8fbd06b07d58d8244c3bbf67f9877c549
|
import logging
import random
import pytest
from ocs_ci.framework.pytest_customization.marks import aws_platform_required
from ocs_ci.framework.testlib import ManageTest, tier4, tier4b
from ocs_ci.ocs.exceptions import CommandFailed
from tests import sanity_helpers
logger = logging.getLogger(__name__)
@tier4
@tier4b
@pytest.mark.polarion_id("OCS-1287")
@aws_platform_required
@pytest.mark.skip(reason="az blocking method need to be fixed")
class TestAvailabilityZones(ManageTest):
"""
test availability zone failure:
test stages:
1. Select availability zone
2. In this availability zone, backup instances original security groups
3. block availability zone by attaching security group with no permissions
4. validate - cluster functionality and health
2a. health check - warning or error
2b. create cephfs, create rbd, create pvc (validate_cluster)
5. restore availability zone access
6. validate - cluster functionality and health
"""
@pytest.fixture(autouse=True)
def init_sanity(self):
"""
init Sanity() object
"""
self.sanity_helpers = sanity_helpers.Sanity()
@pytest.fixture()
def teardown(self, request, ec2_instances, aws_obj):
def finalizer():
current_sg = aws_obj.store_security_groups_for_instances(self.instances_in_az)
if self.original_sgs != current_sg:
aws_obj.restore_instances_access(self.security_group_id, self.original_sgs)
logger.info(f"Access to EC2 instances {self.instances_in_az} has been restored")
if self.security_group_id in aws_obj.get_all_security_groups():
logger.info(f"Deleting: {self.security_group_id}")
aws_obj.delete_security_group(self.security_group_id)
request.addfinalizer(finalizer)
def test_availability_zone_failure(
self, aws_obj, ec2_instances, pvc_factory, pod_factory, teardown
):
"""
Simulate AWS availability zone failure
"""
# Select instances in randomly chosen availability zone:
self.instances_in_az = self.random_availability_zone_selector(aws_obj, ec2_instances)
logger.info(f"AZ selected, Instances: {self.instances_in_az} to be blocked")
# Storing current security groups for selected instances:
self.original_sgs = aws_obj.store_security_groups_for_instances(self.instances_in_az)
logger.info(f"Original security groups of selected instances: {self.original_sgs}")
# Blocking instances:
self.security_group_id = self.block_aws_availability_zone(aws_obj, self.instances_in_az)
logger.info(f"Access to EC2 instances {self.instances_in_az} has been blocked")
# Check cluster's health, need to be unhealthy at that point
assert not self.check_cluster_health(), (
"Cluster is wrongly reported as healthy."
"EC2 Instances {self.instances_in_az} are blocked"
)
# Create resources
logger.info("Trying to create resources on un-healthy cluster")
self.sanity_helpers.create_resources(pvc_factory, pod_factory)
logger.info("Resources Created")
# Delete resources
logger.info("Trying to delete resources on un-healthy cluster")
self.sanity_helpers.delete_resources()
logger.info("Resources Deleted")
# Restore access for blocked instances
aws_obj.restore_instances_access(self.security_group_id, self.original_sgs)
logger.info(f"Access restores")
# Check cluster's health, need to be healthy at that point
assert self.check_cluster_health(), "Cluster is unhealthy"
def random_availability_zone_selector(self, aws_obj, ec2_instances):
"""
Get all instances within random availability zone
Args:
aws_obj (obj): aws.AWS() object
ec2_instances (dict): cluster ec2 instances objects
Returns:
list: instances_in_az
"""
random_az_selector = random.choice(list(ec2_instances.keys()))
random_az_selected = aws_obj.get_availability_zone_id_by_instance_id(random_az_selector)
instances_in_az = list()
for instance in ec2_instances.keys():
az = aws_obj.get_availability_zone_id_by_instance_id(instance)
if random_az_selected == az:
instances_in_az.append(instance)
return instances_in_az
def block_aws_availability_zone(self, aws_obj, instances_in_az):
"""
1. get vpc_id
2. create security group in this vpc
3. block availability zone by using "append_security_group"
Args:
aws_obj (obj): aws.AWS() object
instances_in_az (list): ec2_instances within selected availability zone
Returns:
security_group_id (str): Newly created security id without access permissions
"""
group_name = "TEST_SEC_GROUP"
dict_permissions = {'IpProtocol': 'tcp',
'FromPort': 80,
'ToPort': 80,
'IpRanges': [{'CidrIp': '1.1.1.1/32'}]}
vpc_id = aws_obj.get_vpc_id_by_instance_id(instances_in_az[0])
security_group_id = aws_obj.create_security_group(group_name, dict_permissions, vpc_id)
aws_obj.block_instances_access(security_group_id, instances_in_az)
return security_group_id
def check_cluster_health(self):
try:
self.sanity_helpers.health_check()
return True
except CommandFailed as e:
if "Unable to connect to the server" in str(e):
logger.warning(f"{e}, Cluster is not healthy")
return False
|
py
|
1a5bac43e3ac91141984be66e976aaade8e54d63
|
# qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[1]) # number=8
prog.cx(input_qubit[0],input_qubit[1]) # number=10
prog.x(input_qubit[1]) # number=11
prog.cx(input_qubit[0],input_qubit[1]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_noisy354.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
py
|
1a5bacd709b7232bdce77b9601427ae6e2b20574
|
from dataclasses import dataclass, field
from typing import List
from .diagram_element import DiagramElement
from .point import Point
__NAMESPACE__ = "http://www.omg.org/spec/DD/20100524/DI"
@dataclass
class Edge(DiagramElement):
class Meta:
namespace = "http://www.omg.org/spec/DD/20100524/DI"
waypoint: List[Point] = field(
default_factory=list,
metadata={
"type": "Element",
"min_occurs": 2,
}
)
|
py
|
1a5bae24c2a01cb13f42e5a59fd6df3b2ee0e9fd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
# pylint: disable=line-too-long
""" Python Module that exposes the ExtPackage class """
import logging
import os
from setuptools import sandbox as use_setuptools
from resilient_circuits.util.ext.ExtCreate import ExtCreate
# Get the same logger object that is used in resilient_circuits_cmd.py
LOG = logging.getLogger("resilient_circuits_cmd_logger")
# Constants
BASE_NAME_SETUP_PY = "setup.py"
BASE_NAME_DIST_DIR = "dist"
PATH_CUSTOMIZE_PY = os.path.join("util", "customize.py")
PATH_CONFIG_PY = os.path.join("util", "config.py")
PATH_ICON_EXTENSION_LOGO = os.path.join("icons", "extension_logo.png")
PATH_ICON_COMPANY_LOGO = os.path.join("icons", "company_logo.png")
class ExtPackage(ExtCreate):
""" ExtPackage is a subclass of ExtCreate. It exposes one
method: package_extension() """
@classmethod
def package_extension(cls, path_to_src, custom_display_name=None, keep_build_dir=False):
""" Function that creates The Extension.zip file from the give source path and returns
the path to the new Extension.zip
- path_to_src [String]: must include a setup.py, customize.py and config.py file.
- custom_display_name [String]: will give the Extension that display name. Default: name from setup.py file
- keep_build_dir [Boolean]: if True, dist/build/ will not be remove. Default: False
- The code will be packaged into a Built Distribution (.tar.gz) in the /dist directory
- The Extension.zip will also be produced in the /dist directory"""
# Ensure the src directory exists and we have WRITE access
cls.__validate_directory__(os.W_OK, path_to_src)
# Generate paths to files required to create extension
path_setup_py_file = os.path.join(path_to_src, BASE_NAME_SETUP_PY)
path_customize_py_file = os.path.join(path_to_src, os.path.basename(path_to_src), PATH_CUSTOMIZE_PY)
path_config_py_file = os.path.join(path_to_src, os.path.basename(path_to_src), PATH_CONFIG_PY)
path_output_dir = os.path.join(path_to_src, BASE_NAME_DIST_DIR)
path_extension_logo = os.path.join(path_to_src, PATH_ICON_EXTENSION_LOGO)
path_company_logo = os.path.join(path_to_src, PATH_ICON_COMPANY_LOGO)
LOG.info("Creating Built Distribution in /dist directory")
# Create the built distribution
use_setuptools.run_setup(setup_script=path_setup_py_file, args=["sdist", "--formats=gztar"])
LOG.info("Built Distribution (.tar.gz) created at: %s", path_output_dir)
# Create the extension
path_the_extension_zip = cls.create_extension(
path_setup_py_file=path_setup_py_file,
path_customize_py_file=path_customize_py_file,
path_config_py_file=path_config_py_file,
output_dir=path_output_dir,
custom_display_name=custom_display_name,
keep_build_dir=keep_build_dir,
path_extension_logo=path_extension_logo,
path_company_logo=path_company_logo
)
LOG.info("Extension created at: %s", path_the_extension_zip)
return path_the_extension_zip
|
py
|
1a5baea69dde03789fbab3752f0b4739d8e1a26a
|
"""Travis CI memory can't handle all the starshots; thus only test them when explicitly asked to."""
import os.path as osp
from unittest import TestCase
from tests_basic.test_starshot import StarMixin, Point
from tests_basic import TEST_BANK_DIR
class StarBankMixin(StarMixin):
dir_location = osp.join(TEST_BANK_DIR, 'Starshots')
class Starshot2(StarBankMixin, TestCase):
file_path = ['Starshot#2.tif']
wobble_center = Point(566, 590)
wobble_diameter_mm = 0.2
num_rad_lines = 4
# outside: 0.18-0.19
class Starshot3(StarBankMixin, TestCase):
file_path = ['Starshot#3.tif']
wobble_center = Point(466, 595)
wobble_diameter_mm = 0.32
num_rad_lines = 6
# outside 0.33
class Starshot4(StarBankMixin, TestCase):
file_path = ['Starshot#4.tif']
wobble_center = Point(446, 565)
wobble_diameter_mm = 0.38
num_rad_lines = 6
# outside 0.39
class Starshot5(StarBankMixin, TestCase):
file_path = ['Starshot#5.tif']
wobble_center = Point(557, 580)
wobble_diameter_mm = 0.15
num_rad_lines = 4
wobble_tolerance = 0.2
# outside: 0.14
class Starshot6(StarBankMixin, TestCase):
file_path = ['Starshot#6.tif']
wobble_center = Point(528, 607)
wobble_diameter_mm = 0.3
num_rad_lines = 7
class Starshot7(StarBankMixin, TestCase):
file_path = ['Starshot#7.tif']
wobble_center = Point(469, 646)
wobble_diameter_mm = 0.2
num_rad_lines = 4
wobble_tolerance = 0.2
class Starshot8(StarBankMixin, TestCase):
file_path = ['Starshot#8.tiff']
wobble_center = Point(686, 669)
wobble_diameter_mm = 0.35
num_rad_lines = 5
class Starshot9(StarBankMixin, TestCase):
file_path = ['Starshot#9.tiff']
wobble_center = Point(714, 611)
wobble_diameter_mm = 0.3
num_rad_lines = 5
class Starshot10(StarBankMixin, TestCase):
file_path = ['Starshot#10.tiff']
wobble_center = Point(725, 802)
wobble_diameter_mm = 0.65
num_rad_lines = 5
class Starshot11(StarBankMixin, TestCase):
file_path = ['Starshot#11.tiff']
wobble_center = Point(760, 650)
wobble_diameter_mm = 0.6
num_rad_lines = 4
class Starshot12(StarBankMixin, TestCase):
file_path = ['Starshot#12.tiff']
wobble_center = Point(315, 292)
wobble_diameter_mm = 0.88
num_rad_lines = 4
class Starshot13(StarBankMixin, TestCase):
file_path = ['Starshot#13.tiff']
wobble_center = Point(376, 303)
wobble_diameter_mm = 0.2
num_rad_lines = 4
class Starshot14(StarBankMixin, TestCase):
file_path = ['Starshot#14.tiff']
wobble_center = Point(334, 282)
wobble_diameter_mm = 0.55
num_rad_lines = 4
class Starshot15(StarBankMixin, TestCase):
file_path = ['Starshot#15.tiff']
wobble_center = Point(346, 309)
wobble_diameter_mm = 0.6
num_rad_lines = 4
class Starshot16(StarBankMixin, TestCase):
file_path = ['Starshot#16.tiff']
wobble_center = Point(1444, 1452)
wobble_diameter_mm = 0.6
num_rad_lines = 6
class Starshot17(StarBankMixin, TestCase):
file_path = ['Starshot#17.tiff']
wobble_center = Point(1475, 1361)
wobble_diameter_mm = 0.44
num_rad_lines = 6
class Starshot18(StarBankMixin, TestCase):
file_path = ['Starshot#18.tiff']
wobble_center = Point(1516, 1214)
wobble_diameter_mm = 0.6
num_rad_lines = 6
class Starshot19(StarBankMixin, TestCase):
file_path = ['Starshot#19.tiff']
wobble_center = Point(1475, 1276)
wobble_diameter_mm = 0.6
num_rad_lines = 6
class Starshot20(StarBankMixin, TestCase):
file_path = ['Starshot#20.tiff']
wobble_center = Point(347, 328)
wobble_diameter_mm = 0.75
num_rad_lines = 4
class Starshot21(StarBankMixin, TestCase):
file_path = ['Starshot#21.tiff']
wobble_center = Point(354, 294)
wobble_diameter_mm = 1.3
wobble_tolerance = 0.2
num_rad_lines = 4
passes = False
class Starshot22(StarBankMixin, TestCase):
file_path = ['Starshot#22.tiff']
wobble_center = Point(1305, 1513)
wobble_diameter_mm = 0.9
num_rad_lines = 9
# outside 0.93mm
def test_bad_input_no_recursion_fails(self):
"""Test that without recursion, a bad setup fails."""
with self.assertRaises(RuntimeError):
self.star.analyze(radius=0.3, min_peak_height=0.95, recursive=False)
# but will pass with recursion
self.star.analyze()
self.test_passed()
class Starshot23(StarBankMixin, TestCase):
file_path = ['Starshot#23.tiff']
wobble_center = Point(1297, 1699)
wobble_diameter_mm = 0.38
num_rad_lines = 9
class Starshot24(StarBankMixin, TestCase):
file_path = ['Starshot#24.tiff']
wobble_center = Point(1370, 1454)
wobble_diameter_mm = 0.3
num_rad_lines = 4
class Starshot25(StarBankMixin, TestCase):
file_path = ['Starshot#25.tiff']
wobble_center = Point(286, 279)
wobble_diameter_mm = 0.3
num_rad_lines = 4
class Starshot26(StarBankMixin, TestCase):
file_path = ['Starshot#26.tiff']
wobble_center = Point(1511, 1452)
wobble_diameter_mm = 0.55
num_rad_lines = 4
wobble_tolerance = 0.15
class Starshot27(StarBankMixin, TestCase):
file_path = ['Starshot#27.tiff']
wobble_center = Point(1105, 1306)
wobble_diameter_mm = 0.4
num_rad_lines = 6
class CRStarshot(StarBankMixin, TestCase):
file_path = ['CR-Starshot.dcm']
wobble_center = Point(1030.5, 1253.6)
wobble_diameter_mm = 0.3
num_rad_lines = 6
class ChicagoSet(StarBankMixin, TestCase):
file_path = ['Chicago']
wobble_center = Point(638, 639.3)
wobble_diameter_mm = 0.65
num_rad_lines = 5
is_dir = True
|
py
|
1a5baeeef042d82db6be9a161686f3fe74fb69a2
|
from __future__ import print_function
import sys
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget,QTableWidgetItem,QVBoxLayout
import matplotlib as mpl
import matplotlib.pyplot as plt
import cv2 as cv
import numpy as np
import os
import pickle
import copy
import time
class ContourGraph(pg.GraphItem):
def __init__(self):
pg.GraphItem.__init__(self)
def setData(self, **kwds):
self.data = copy.deepcopy(kwds)
if 'pos' in self.data:
npts = self.data['pos'].shape[0]
self.data['data'] = np.empty(npts, dtype=[('index', int)])
self.data['data']['index'] = np.arange(npts)
pg.GraphItem.setData(self, **self.data)
class ContourViewWidget(pg.GraphicsWindow):
def __init__(self, parent=None):
pg.GraphicsWindow.__init__(self)
self.setParent(parent)
self.w_sub = self.addLayout(row=0,col=0)
self.mov_nr = 0
self.v_list = []
self.img_list = []
self.contour_list_dest_body = []
self.contour_list_dest_wing_L = []
self.contour_list_dest_wing_R = []
self.contour_list_init_body = []
self.contour_list_init_wing_L = []
self.contour_list_init_wing_R = []
self.contour_list_outer = []
self.contour_list_dest = []
self.contour_list_init = []
self.init_check = False
self.dest_check = False
self.src_check = False
def loadFLT(self,flt):
self.flt = flt
def setMovNR(self,mov_nr):
self.mov_nr = mov_nr-1
def set_init_view(self,check):
self.init_check = check
def set_dest_view(self,check):
self.dest_check = check
def set_src_view(self,check):
self.src_check = check
def add_frame(self,frame_nr):
self.flt.load_frame(self.mov_nr,frame_nr)
self.image_size = []
frame_list = self.flt.get_frame()
for i, frame in enumerate(frame_list):
self.image_size.append(np.array([frame.shape[0],frame.shape[1]]))
self.v_list.append(self.w_sub.addViewBox(row=1,col=i,lockAspect=True))
self.img_list.append(pg.ImageItem(np.transpose(np.flipud(frame))))
self.v_list[i].addItem(self.img_list[i])
self.v_list[i].disableAutoRange('xy')
self.v_list[i].autoRange()
def update_frame(self,frame_nr):
self.flt.load_frame(self.mov_nr,frame_nr)
frame_list = self.flt.get_frame()
for i, frame in enumerate(frame_list):
self.img_list[i].setImage(np.transpose(np.flipud(frame)))
self.update_contour()
def add_contour(self, frame_nr):
self.flt.load_frame(self.mov_nr,frame_nr)
self.flt.segment_frame()
self.flt.project_frame_2_pcl()
self.flt.find_initial_state()
dest_contour_list = self.flt.return_dest_contour()
init_contour_list = self.flt.return_init_contour()
# Create 10 empty contours in each window
for i, contour_list in enumerate(dest_contour_list):
self.contour_list_dest.append([])
for j in range(10):
self.contour_list_dest[i].append(pg.PlotCurveItem())
self.contour_list_dest[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
self.v_list[i].addItem(self.contour_list_dest[i][j])
for i, contour_list in enumerate(init_contour_list):
self.contour_list_init.append([])
for j in range(10):
self.contour_list_init[i].append(pg.PlotCurveItem())
self.contour_list_init[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
self.v_list[i].addItem(self.contour_list_init[i][j])
def update_contour(self):
self.flt.segment_frame()
self.flt.project_frame_2_pcl()
self.flt.find_initial_state()
color_list = [(0,0,255), (255,0,0), (0,255,0)]
dest_contour_list = self.flt.return_dest_contour()
init_contour_list = self.flt.return_init_contour()
for i, contour_list in enumerate(dest_contour_list):
N_items = len(contour_list)
for j in range(10):
if (j<N_items):
if (np.amax(contour_list[j][2,:])>0):
color_now = color_list[int(np.amax(contour_list[j][2,:])-1)]
self.contour_list_dest[i][j].setData(x=contour_list[j][0,:],y=self.image_size[i][1]-contour_list[j][1,:],pen=color_now)
else:
self.contour_list_dest[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
else:
self.contour_list_dest[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
for i, contour_list in enumerate(init_contour_list):
N_items = len(contour_list)
for j in range(10):
if (j<N_items):
if (np.amax(contour_list[j][2,:])>0):
color_now = color_list[int(np.amax(contour_list[j][2,:])-1)]
self.contour_list_init[i][j].setData(x=contour_list[j][0,:],y=self.image_size[i][1]-contour_list[j][1,:],pen=color_now)
else:
self.contour_list_init[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
else:
self.contour_list_init[i][j].setData(x=np.asarray([0.0]),y=np.asarray([0.0]),pen=(0,0,255))
|
py
|
1a5baf0fdfb3696e8bd9685cc69f92a86d105162
|
from .base import BaseRaveAPI # pylint: disable=E0401
import requests
import random
import base64
import hashlib
#from cryptography.fernet import Fernet # pylint: disable=E0401
from Cryptodome.Cipher import DES3
from .errors import RaveAPIError, InvalidDataError
try:
import json
except:
import simplejson as json
class Transaction(BaseRaveAPI):
def _handle_request(self, method, url, encrypted_payload=None):
"""Handles all requests: GET, POST, PUT, DELETE etc"""
if not encrypted_payload:
raise InvalidDataError("Error: You need to pass a valid payload")
try:
response = requests.request(method, url, headers=self._headers(), data=json.dumps(encrypted_payload))
return response
except Exception as e:
raise ValueError(e)
def _get_encrypt_key(self):
"""Implementing the getEncryptKey() from the base class"""
seckey = self._get_key()
hashedseckey = hashlib.md5(seckey.encode("utf-8")).hexdigest()
hashedseckeylast12 = hashedseckey[-12:]
seckeyadjusted = seckey.replace('FLWSECK-', '')
seckeyadjustedfirst12 = seckeyadjusted[:12]
return seckeyadjustedfirst12 + hashedseckeylast12
def encrypt_data(self, payloader):
"""Implementing the encrypt_data() from base class"""
blockSize = 8
key = self._get_encrypt_key()
plain_text = payloader.json_payload()
padDiff = blockSize - (len(plain_text) % blockSize) # Using this line as specified by the rave docs
cipher_suite = DES3.new(key, DES3.MODE_ECB)
plain_text = "{}{}".format(plain_text, "".join(chr(padDiff) * padDiff)).encode("utf8")
encrypted_data = base64.b64encode(cipher_suite.encrypt(plain_text))
data = {
'PBFPubKey': self._get_key(),
'client': encrypted_data.decode("utf8"),
'alg': '3DES-24'
}
return data
def initialize(self, payloader):
"""Implement the base class to initialize the payment
DESCRIPTION
METHOD: 'post'
ENDPOINT: 'charge'
RETURNS
response (dict): api response depending on card of the customer
"""
endpoint = 'charge'
method = 'POST'
url = self._url(endpoint)
payload = self.encrypt_data(payloader)
# process the transaction
try:
response = self._handle_request(method, url, encrypted_payload=payload)
import pdb; pdb.set_trace()
if not response.get('status', False):
raise RaveAPIError("There is a problem with your API configuration.\
contact Pastor, Emmanuel on [email protected]")
except Exception as e:
raise ValueError(e)
return response
|
py
|
1a5baf5a3dafdad35bd8a392375d15ee04c9bbb3
|
#----------------------------------------------------#
# 获取测试集的detection-result和images-optional
# 具体视频教程可查看
# https://www.bilibili.com/video/BV1zE411u7Vw
#----------------------------------------------------#
from frcnn import FRCNN
from PIL import Image
from torch.autograd import Variable
import torch
import numpy as np
import os
import torch.backends.cudnn as cudnn
from torch.nn import functional as F
from utils.utils import loc2bbox, nms, DecodeBox
from nets.frcnn import FasterRCNN
from nets.frcnn_training import get_new_img_size
from PIL import Image, ImageFont, ImageDraw
import copy
class mAP_FRCNN(FRCNN):
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self,image_id,image):
self.confidence = 0.05
f = open("./input/detection-results/"+image_id+".txt","w")
image_shape = np.array(np.shape(image)[0:2])
old_width = image_shape[1]
old_height = image_shape[0]
width,height = get_new_img_size(old_width,old_height)
image = image.resize([width,height])
photo = np.array(image,dtype = np.float32)/255
photo = np.transpose(photo, (2, 0, 1))
with torch.no_grad():
images = []
images.append(photo)
images = np.asarray(images)
images = torch.from_numpy(images).cuda()
roi_cls_locs, roi_scores, rois, roi_indices = self.model(images)
decodebox = DecodeBox(self.std, self.mean, self.num_classes)
outputs = decodebox.forward(roi_cls_locs, roi_scores, rois, height=height, width=width, score_thresh = self.confidence)
if len(outputs)==0:
return
bbox = outputs[:,:4]
conf = outputs[:, 4]
label = outputs[:, 5]
bbox[:, 0::2] = (bbox[:, 0::2])/width*old_width
bbox[:, 1::2] = (bbox[:, 1::2])/height*old_height
bbox = np.array(bbox,np.int32)
for i, c in enumerate(label):
predicted_class = self.class_names[int(c)]
score = str(conf[i])
left, top, right, bottom = bbox[i]
f.write("%s %s %s %s %s %s\n" % (predicted_class, score[:6], str(int(left)), str(int(top)), str(int(right)),str(int(bottom))))
f.close()
return
frcnn = mAP_FRCNN()
image_ids = open('VOCdevkit/VOC2007/ImageSets/Main/test.txt').read().strip().split()
if not os.path.exists("./input"):
os.makedirs("./input")
if not os.path.exists("./input/detection-results"):
os.makedirs("./input/detection-results")
if not os.path.exists("./input/images-optional"):
os.makedirs("./input/images-optional")
for image_id in image_ids:
image_path = "./VOCdevkit/VOC2007/JPEGImages/"+image_id+".jpg"
image = Image.open(image_path)
image.save("./input/images-optional/"+image_id+".jpg")
frcnn.detect_image(image_id,image)
print(image_id," done!")
print("Conversion completed!")
|
py
|
1a5bafa02f75c220a0532ff3385d1b796dd66cda
|
"""Exceptions related to the LTD Keeper.
"""
__all__ = ("KeeperError",)
from ..exceptions import ConveyorError
class KeeperError(ConveyorError):
"""Error raised because of issues using the LTD Keeper API."""
|
py
|
1a5bb10687c86087cc27e36d5a3fcd9cb6feae98
|
# -*- coding: utf-8 -*-
from nbsite.shared_conf import *
project = u' '
authors = u'Panel contributors'
copyright = u'2018 ' + authors
description = 'High-level dashboarding for python visualization libraries'
import panel
version = release = str(panel.__version__)
html_static_path += ['_static']
html_theme = 'sphinx_ioam_theme'
html_theme_options = {
'logo': 'logo_horizontal.png',
'favicon': 'favicon.ico',
'css': 'site.css'
}
_NAV = (
('User Guide', 'user_guide/index'),
('About', 'about')
)
templates_path = ['_templates']
html_context.update({
'PROJECT': project,
'DESCRIPTION': description,
'AUTHOR': authors,
'WEBSITE_URL': 'https://panel.pyviz.org',
'WEBSITE_SERVER': 'https://panel.pyviz.org',
'VERSION': version,
'NAV': _NAV,
'LINKS': _NAV,
'SOCIAL': (
('Gitter', '//gitter.im/pyviz/pyviz'),
('Github', '//github.com/pyviz/panel'),
)
})
nbbuild_patterns_to_take_along = ["simple.html"]
|
py
|
1a5bb1213615e1d91bd02a5de9134acb058c053b
|
"""
Django settings for spodaily project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
AUTH_USER_MODEL = "spodaily_api.User"
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'spodaily_api.apps.SpodailyApiConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'spodaily.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spodaily.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['NAME'],
'USER': os.environ['USER'],
'PASSWORD': os.environ['PASSWORD'],
'HOST': os.environ['HOST'],
'PORT': 5432
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/fit/home/'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ['EMAIL_HOST']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_PASSWORD']
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py
|
1a5bb1e3750adc1f2965422f2941b2e72391ea0f
|
"""Compilation of datasets for few-shot text classification.
Few-shot Text Classification with Distributional Signatures
Yujia Bao, Menghua Wu, Shiyu Chang and Regina Barzilay.
https://arxiv.org/pdf/1908.06039.pdf
@inproceedings{
bao2020fewshot,
title={Few-shot Text Classification with Distributional Signatures},
author={Yujia Bao and Menghua Wu and Shiyu Chang and Regina Barzilay},
booktitle={International Conference on Learning Representations},
year={2020}
}
"""
import os
import json
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data.dataset import Dataset
from collections import Counter, defaultdict
from transformers import RobertaTokenizer
class BaseFewShotTextDataset(Dataset):
def __init__(
self,
data_root,
n_ways=5,
n_shots=5,
n_queries=25,
split='train',
roberta_device='cpu',
fix_seed=42,
):
super().__init__()
self.data_root = data_root
self.cache_dir = os.path.realpath(os.path.join(self.data_root, '../cache'))
if not os.path.isdir(self.cache_dir):
os.makedirs(self.cache_dir)
self.split = split
self.n_ways = n_ways
self.n_shots = n_shots
self.n_queries = n_queries
self.roberta_device = roberta_device
self.rs = np.random.RandomState(fix_seed)
self.fix_seed = fix_seed
self.max_seq_len = 512
self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
self.vocab_size = self.tokenizer.vocab_size
self.pad_index = self.tokenizer.pad_token_id
self.mask_index = self.tokenizer.mask_token_id
self.mask_token = self.tokenizer.mask_token
print('loading data...')
data, self.classes = self.load_data()
# NOTE: no side information since we don't have anything special
# NOTE: no smlmt for simplicitly
self.tokens, self.masks, self.labels = self.process_data(data)
def make_classes(self):
raise NotImplementedError
def load_data(self):
train_classes, val_classes, test_classes = self.make_classes()
if self.split == 'train':
classes = train_classes
elif self.split == 'val':
classes = val_classes
elif self.split == 'test':
classes = test_classes
else:
raise Exception(f'split {self.split} not supported.')
all_data = _load_json(self.data_root)
# partition data with classes!
data = []
for example in all_data:
if example['label'] in classes:
data.append(example)
return data, classes
def process_data(self, data):
texts = [row['text'] for row in data]
labels = [row['label'] for row in data]
tokens, masks = [], []
for text in texts:
outputs = self.tokenizer(
' '.join(text),
truncation=True,
padding='max_length',
max_length=self.max_seq_len,
pad_to_max_length=True,
return_tensors='pt',
)
tokens.append(outputs['input_ids'])
masks.append(outputs['attention_mask'])
labels = np.array(labels)
return tokens, masks, labels
def prep_smlmt_task(self, data):
all_text = [row['text'] for row in data]
unique_text = []
for i in range(len(all_text)):
text_i = np.unique(all_text[i])
unique_text.append(text_i)
unique_text = np.concatenate(unique_text)
freqs = Counter(unique_text)
valid_words = []
for word, fr in freqs.items():
if fr >= (self.n_shots + self.n_queries):
valid_words.append(word)
# these are the tokens with enough
# labels to choose from!
smlmt_cats = np.array(valid_words)
# now we need to map each of these cats to
# the indices of sentences that contain them
smlmt_mapping = defaultdict(lambda: [])
pbar = tqdm(total=len(all_text))
for text in all_text:
tokens = set(text)
for word in smlmt_cats:
if word in tokens:
smlmt_mapping[word].append(text)
pbar.update()
pbar.close()
# maps valid category to all sequences containing it
return smlmt_mapping
def build_smlmt_task(self, smlmt_mapping, data):
smlmt_words = list(smlmt_mapping.keys())
words = self.rs.choice(smlmt_words, self.n_ways, replace=False)
data = []
for i, word in enumerate(words):
data_i = {}
toks_i = smlmt_mapping[word][:100] # at most 100
for text in toks_i:
# perform the masking of ALL instances
text = np.array(text)
text[text == word] = self.mask_token
text = text.tolist()
data_i['text'] = text
data_i['label'] = i
data.append(data_i)
return data
def __getitem__(self, index):
categories = self.rs.choice(self.classes, size=self.n_ways, replace=False)
task_tokens = []
task_masks = []
task_labels = []
for c in range(len(categories)):
category = categories[c]
indices = np.where(self.labels == category)[0]
should_replace = True if len(indices) < (self.n_shots+self.n_queries) else False
indices = self.rs.choice(indices, size=self.n_shots+self.n_queries, replace=should_replace)
# task_tokens_i : (n_shots+n_queries) x 512
task_tokens_i = torch.stack([self.tokens[ix] for ix in indices])
# task_masks_i : (n_shots+n_queries) x 512
task_masks_i = torch.stack([self.masks[ix] for ix in indices])
# task_labels_i : (n_shots+n_queries)
task_labels_i = torch.zeros(self.n_shots+self.n_queries).long() + c
task_tokens.append(task_tokens_i)
task_masks.append(task_masks_i)
task_labels.append(task_labels_i)
# task_tokens : n_ways x (n_shots+n_queries) x 512
task_tokens = torch.stack(task_tokens)
# task_masks : n_ways x (n_shots+n_queries) x 512
task_masks = torch.stack(task_masks)
# task_labels : n_ways x (n_shots+n_queries)
task_labels = torch.stack(task_labels)
# task_lengths : n_ways x (n_shots+n_queries)
task_lengths = torch.sum(task_masks, dim=2)
task_dict = dict(
support_toks=task_tokens[:, :self.n_shots].long(),
support_masks=task_masks[:, :self.n_shots].long(),
support_labs=task_labels[:, :self.n_shots].long(),
support_lens=task_lengths[:, :self.n_shots].long(),
# --
query_toks=task_tokens[:, -self.n_queries:].long(),
query_masks=task_masks[:, -self.n_queries:].long(),
query_labs=task_labels[:, -self.n_queries:].long(),
query_lens=task_lengths[:, -self.n_queries:].long(),
# --
task_type=0,
)
return task_dict
def num_episodes(self):
if self.split == 'train':
return 100
elif self.split == 'val':
return 100
elif self.split == 'test':
return 1000
else:
raise Exception(f'Split {self.split} not supported.')
def __len__(self): # number of episodes
return self.num_episodes()
class FewShot20News(BaseFewShotTextDataset):
LABEL_DICT = {
'talk.politics.mideast': 0,
'sci.space': 1,
'misc.forsale': 2,
'talk.politics.misc': 3,
'comp.graphics': 4,
'sci.crypt': 5,
'comp.windows.x': 6,
'comp.os.ms-windows.misc': 7,
'talk.politics.guns': 8,
'talk.religion.misc': 9,
'rec.autos': 10,
'sci.med': 11,
'comp.sys.mac.hardware': 12,
'sci.electronics': 13,
'rec.sport.hockey': 14,
'alt.atheism': 15,
'rec.motorcycles': 16,
'comp.sys.ibm.pc.hardware': 17,
'rec.sport.baseball': 18,
'soc.religion.christian': 19,
}
def make_classes(self):
train_classes = []
for key in self.LABEL_DICT.keys():
if key[:key.find('.')] in ['sci', 'rec']:
train_classes.append(self.LABEL_DICT[key])
val_classes = []
for key in self.LABEL_DICT.keys():
if key[:key.find('.')] in ['comp']:
val_classes.append(self.LABEL_DICT[key])
test_classes = []
for key in self.LABEL_DICT.keys():
if key[:key.find('.')] not in ['comp', 'sci', 'rec']:
test_classes.append(self.LABEL_DICT[key])
return train_classes, val_classes, test_classes
class FewShotAmazon(BaseFewShotTextDataset):
LABEL_DICT = {
'Amazon_Instant_Video': 0,
'Apps_for_Android': 1,
'Automotive': 2,
'Baby': 3,
'Beauty': 4,
'Books': 5,
'CDs_and_Vinyl': 6,
'Cell_Phones_and_Accessories': 7,
'Clothing_Shoes_and_Jewelry': 8,
'Digital_Music': 9,
'Electronics': 10,
'Grocery_and_Gourmet_Food': 11,
'Health_and_Personal_Care': 12,
'Home_and_Kitchen': 13,
'Kindle_Store': 14,
'Movies_and_TV': 15,
'Musical_Instruments': 16,
'Office_Products': 17,
'Patio_Lawn_and_Garden': 18,
'Pet_Supplies': 19,
'Sports_and_Outdoors': 20,
'Tools_and_Home_Improvement': 21,
'Toys_and_Games': 22,
'Video_Games': 23
}
def make_classes(self):
train_classes = [2, 3, 4, 7, 11, 12, 13, 18, 19, 20]
val_classes = [1, 22, 23, 6, 9]
test_classes = [0, 5, 14, 15, 8, 10, 16, 17, 21]
return train_classes, val_classes, test_classes
class FewShotHuffPost(BaseFewShotTextDataset):
def make_classes(self):
train_classes = list(range(20))
val_classes = list(range(20,25))
test_classes = list(range(25,41))
return train_classes, val_classes, test_classes
class FewShotRCV1(BaseFewShotTextDataset):
def make_classes(self):
train_classes = [1, 2, 12, 15, 18, 20, 22, 25, 27, 32, 33, 34, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 66]
val_classes = [5, 24, 26, 28, 29, 31, 35, 23, 67, 36]
test_classes = [0, 3, 4, 6, 7, 8, 9, 10, 11, 13, 14, 16, 17, 19, 21, 30, 37,
62, 63, 64, 65, 68, 69, 70]
return train_classes, val_classes, test_classes
class FewShotReuters(BaseFewShotTextDataset):
def make_classes(self):
train_classes = list(range(15))
val_classes = list(range(15,20))
test_classes = list(range(20,31))
return train_classes, val_classes, test_classes
class FewShotFewRel(BaseFewShotTextDataset):
def make_classes(self):
# head=WORK_OF_ART validation/test split
train_classes = [0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16, 19, 21,
22, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 43, 44, 45, 46, 48, 49, 50, 52, 53, 56, 57, 58,
59, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78]
val_classes = [7, 9, 17, 18, 20]
test_classes = [23, 29, 42, 47, 51, 54, 55, 60, 65, 79]
return train_classes, val_classes, test_classes
def _load_json(path, max_seq_len=512):
'''
load data file
@param path: str, path to the data file
@return data: list of examples
'''
label = {}
text_len = []
with open(path, 'r', errors='ignore') as f:
data = []
for line in f:
row = json.loads(line)
# count the number of examples per label
if int(row['label']) not in label:
label[int(row['label'])] = 1
else:
label[int(row['label'])] += 1
item = {'label': int(row['label']),
'text': row['text'][:max_seq_len]}
text_len.append(len(row['text']))
keys = ['head', 'tail', 'ebd_id']
for k in keys:
if k in row:
item[k] = row[k]
data.append(item)
return data
|
py
|
1a5bb1fa7d52cdca0fb7d891ae60b29595f3f4b1
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author : Eeyhan
# @File : config.py
import os
import sys
import logging
import redis
# 项目根目录
BASE_DIR = os.path.dirname(__file__)
"""
自添加USER_AGENT请按照已有数据的格式来添加
"""
USER_AGENT = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ) AppleWebKit/534.12 (KHTML, like Gecko) Maxthon/3.0 Safari/534.12',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.472.33 Safari/534.3 SE 2.X MetaSr 1.0',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E) QQBrowser/6.9.11079.201',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Tri dent/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64;Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)',
]
"""
代理网站:自添加PROXY_URLS请按照已有数据的格式来添加
"""
PROXY_URLS = [
{'url': 'https://www.xicidaili.com/nn', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/nt', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/wn', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/wt', 'type': 'xici'},
{'url': 'http://www.xiladaili.com/gaoni/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/http/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/https/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/putong/', 'type': 'xila'},
{'url': 'https://www.kuaidaili.com/free/intr/', 'type': 'kuaidaili'},
{'url': 'https://www.kuaidaili.com/free/inha/', 'type': 'kuaidaili'},
{'url': 'https://www.kuaidaili.com/ops/', 'type': 'kuaidaili_new'},
{'url': 'http://www.89ip.cn/', 'type': '89ip'},
{'url': 'http://www.qydaili.com/free/', 'type': 'qydaili'},
{'url': 'https://ip.ihuan.me/', 'type': 'ihuan'},
{'url': 'http://www.ip3366.net/', 'type': '3366'},
{'url': 'http://www.iphai.com/free/ng', 'type': 'iphai'},
{'url': 'http://www.iphai.com/free/wg', 'type': 'iphai'},
{'url': 'http://www.iphai.com/free/wp', 'type': 'iphai'},
{'url': 'http://www.goubanjia.com/', 'type': 'goubanjia'},
{'url': 'http://www.feiyiproxy.com/?page_id=1457', 'type': 'feiyi'},
{'url': 'http://www.shenjidaili.com/open/', 'type': 'shenji'},
{'url': 'http://ip.kxdaili.com/dailiip.html', 'type': 'kaixin'},
{'url': 'http://www.superfastip.com/welcome/freeIP', 'type': 'jisu'},
{'url': 'http://ip.jiangxianli.com/', 'type': 'jxl'},
{'url': 'https://lab.crossincode.com/proxy/', 'type': 'cross'},
{'url': 'http://www.nimadaili.com/gaoni/', 'type': 'nima'},
{'url': 'http://www.nimadaili.com/http/', 'type': 'nima'},
{'url': 'http://www.nimadaili.com/https/', 'type': 'nima'},
{'url': 'http://www.data5u.com/', 'type': 'da5u'},
{'url': 'https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list', 'type': 'github'},
{'url': 'https://proxy.mimvp.com/freeopen.php', 'type': 'mipu'}, # 需要图片识别端口,已解决
{'url': 'http://www.xsdaili.com/', 'type': 'xsdaili'}, # 需要爬取二级网页,已解决
{'url': 'http://www.66ip.cn/mo.php?tqsl=1024', 'type': '66ip'}, # 需要js解密,已解决
]
"""
测试代理网站:自添加测试代理的url请按照已有数据的格式来添加
"""
TEST_PROXY_URLS = [
# 下面的是主流搜索引擎搜ip的网址,相对比较开放,而且查询结果比较准确
{'url': 'https://www.baidu.com/s?wd=ip', 'type': 'baidu'},
{'url': 'https://www.sogou.com/web?query=ip', 'type': 'sogou'},
{'url': 'https://www.so.com/s?q=ip&src=srp&fr=none&psid=2d511001ad6e91af893e0d7e561f1bba', 'type': 'so'},
{'url': 'https://mijisou.com/?q=ip&category_general=on&time_range=&language=zh-CN&pageno=1', 'type': 'miji'},
# 下面的是专门查询本机公网ip的网址,请求不能过于频繁
{'url': 'http://pv.sohu.com/cityjson', 'type': 'sohu'},
{'url': 'http://ip.taobao.com/ipSearch.html', 'type': 'taobao'},
{'url': 'https://myip.ipip.net/', 'type': 'myip'},
{'url': 'http://httpbin.org/ip', 'type': 'httpbin'},
{'url': 'http://ip.chinaz.com/', 'type': 'chinaz'},
{'url': 'https://www.ipip.net/ip.html', 'type': 'ipip'},
{'url': 'https://ip.cn/', 'type': 'ipcn'},
{'url': 'https://tool.lu/ip/', 'type': 'luip'},
{'url': 'http://api.online-service.vip/ip/me', 'type': 'onlineservice'},
{'url': 'https://ip.ttt.sh/', 'type': 'ttt'},
# {'url': 'http://icanhazip.com/', 'type': 'ican'}, # 该网站有时会返回一个ipv6地址,导致结果有误
]
"""
招聘网站:自添加招聘网站的url请按照已有数据的格式来添加
"""
# ##### 智联招聘,智联招聘相关 ######
'如果失效可以用浏览器抓包工具重新获取'
# 智联的请求id
ZHAOPIN_CLIENT_ID = 'c309970e-8633-44e6-8ac6-98f8eda5533f'
# 卓聘网的请求id
ZHUOPIN_CLIENT_ID = 'cb0aeec2-9853-4525-92a4-3e01495f95a3'
# 拉钩的职位详情请求参数
'如果失效可以用浏览器抓包工具重新获取'
LAGOU_SHOW = '090fea62d1b84e1d982a52cf822b20bc'
# boss直聘cookie
'如果失效可以用浏览器抓包工具重新获取,只需要__zp_stoken__字段'
BOSS_COOKIE = {
'cookie': '__zp_stoken__=1c48c63zVGOGKBxVr5XR52KDllup9rYfQmi%2F3uzL9OjJwVnyJRn6XQO0W5ldLp2gTcuH0H05s8iAIhiHR%2BUSnzxWSg%3D%3D'
}
# 各大招聘网站
'可自定制,可增加,但同时也要给定对应的解析方法,args可以为空,不为空时之前作测试时用到的'
REQEUST_URLS = [
{'url': 'https://www.kanzhun.com/jobl/p{p}/?q={q}', 'args': 'python', 'type': 'kanzhun'},
{'url': 'https://www.zhipin.com/c100010000/?query={q}&page={p}', 'args': 'python', 'type': 'boss'},
{'url': 'https://search.51job.com/list/000000,000000,0000,00,9,99,{q},2,{p}.html', 'args': 'python',
'type': '51job'},
{
'url': 'https://fe-api.zhaopin.com/c/i/sou?start={p}&pageSize=90&cityId={c}&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw={q}&kt=3&rt=76ba428eb3a249b3ae6d859337599c01&_v=0.89754140&x-zp-page-request-id=96001ea406a04e0da1e8b0230d78e5e8-1564897992518-310307&x-zp-client-id=' + ZHAOPIN_CLIENT_ID,
'args': 'python', 'type': 'zhilian'},
{'url': 'https://data.highpin.cn/api/JobSearch/Search?x-zp-client-id=%s' % ZHUOPIN_CLIENT_ID,
'args': 'python', 'type': 'zhuopin'},
{'url': 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false', 'args': 'python',
'type': 'lagou'},
{
'url': 'https://so.dajie.com/job/ajax/search/filter?keyword={q}&order=0&city=&recruitType=&salary=&experience=&page={p}&positionFunction=&_CSRFToken=&ajax=1',
'args': 'web前端', 'type': 'dajie'},
{'url': 'http://www.job5156.com/s/result/ajax.json?keyword={q}&keywordType=0&sortBy=0&pageNo={p}', 'args': 'python',
'type': 'zhitong'},
{'url': 'http://s.cjol.com/service/joblistjson.aspx', 'args': 'java', 'type': 'cjol'},
{'url': 'http://s.yingjiesheng.com/search.php?word={q}&start={p}&sort=score', 'args': 'python', 'type': 'yjs'},
{'url': 'https://www.jobcn.com/search/result_servlet.ujson?s=search%2Ftop', 'args': 'java', 'type': 'jobcn'},
{'url': 'http://www.jiaoshizhaopin.net/jobs/jobs-list.php?key={q}&district_id=&page={p}', 'args': '小学语文',
'type': 'jiaoshizhaopin'},
{'url': 'https://china.baixing.com/search/?page={p}&query={q}', 'args': '乘务员', 'type': 'baixing'},
{'url': 'http://www.51shuobo.com/s/result/kt1_kw-{q}_pn{p}/', 'args': '工程师', 'type': 'shuobo'},
{
'url': 'https://www.liepin.com/zhaopin/?init=-1&headckid=78dfb15a24a00c28&fromSearchBtn=2&ckid=78dfb15a24a00c28°radeFlag=0&key={q}&siTag=I-7rQ0e90mv8a37po7dV3Q~fA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_unknown&d_ckId=86780e1c81b976658deb5a339a4071ec&d_curPage=1&d_pageSize=40&d_headId=86780e1c81b976658deb5a339a4071ec&curPage={p}',
'args': 'python', 'type': 'liepin'},
{'url': 'http://{c}.ganji.com/zp{q}/o{p}/', 'args': 'jiaoshi', 'type': 'ganji'}, # 普通类
{'url': 'http://{c}.ganji.com/zhaopin/s/f{p}/_{q}/', 'args': '软件', 'type': 'ganji_it'}, # it类的
{'url': 'https://{c}.58.com/job/pn{p}/?key={q}&classpolicy=main_null,service_B&final=1&jump=1', 'args': '工程师',
'type': '58'},
{'url': 'https://search.chinahr.com/{c}/job/pn{p}/?key={q}', 'args': 'java', 'type': 'chinahr'}, # 返回结果有时候很慢
{'url': 'http://www.chinahr.com/sou/?orderField=relate&keyword={q}&page={p}', 'args': 'java',
'type': 'chinahr_old'},
{'url': 'http://www.job1001.com/SearchResult.php?page={p}®ion_1=&jtzw={q}', 'args': 'java', 'type': 'job1001'},
{'url': 'https://www.linkedin.com/jobs-guest/jobs/api/jobPostings/jobs?keywords={q}&location={c}&start={p}',
'args': 'python', 'type': 'linkin'},
{'url': 'http://www.doumi.com/search/o1/kwd_{q}/', 'args': '服务员', 'type': 'doumi'},
{'url': 'http://www.gongzuochong.com/wapi/jobs/search?city={c}&categoryId=-1&keyword={q}&page={p}', 'args': '服务员',
'type': 'gongzuochong'},
{'url': 'https://hr.ofweek.com/jobs/?key={q}&page={p}', 'args': 'python', 'type': 'ofweek'},
{'url': 'https://www.telecomhr.com/jobs/index.php?bc=&sc=&jobarea=&keyword={q}&page={p}', 'args': '软件',
'type': 'telecomhr'},
{'url': 'https://www.tndbjob.com/job/list/0-0-0-0_0_0_0_0_0_0-0-0-0-{p}.html?{q}', 'args': '服务员',
'type': 'tndbjob'},
{'url': 'https://www.wztxjob.com/job/list/0-0-0-0_0_0_0_0_0_0-0-0-0-{p}.html?{q}', 'args': '服务员',
'type': 'wztxjob'},
{'url': 'https://www.qcrcw.net.cn/job/list/0-0-0-0_0_0_0_0_0_0-0-0-0-{p}.html?{q}', 'args': '服务员', 'type': 'qcrcw'},
{'url': 'https://www.pcbjob.com/job/list/0-0-0-0_0_0_0_0_0_0-0-0-0-{p}.html?{q}', 'args': '经理', 'type': 'pcbjob'},
{
'url': 'https://zhaopin.baidu.com/api/qzasync?query={q}&city={c}&is_adq=1&pcmod=1&token={token}&pn={p}&rn=20'
, 'args': 'python', 'type': 'baidu'},
{'url': 'https://zhaopin.baidu.com/api/jianzhiwiseasync?query={q}&city={c}&token={token}&pn={p}&rn=10',
'args': '服务员', 'type': 'baidu_jianzhi'},
{'url': 'http://www.jiaoshi.com.cn/jobs/jobs_list/key/{q}/page/{p}.html', 'args': '老师', 'type': 'jiaoshi'},
]
# 'http://pibao.pullwave.com:8080/index.php?name=%s&page=%s'
# redis数据库连接池
'可以自行设置为其他的数据库'
POOL = redis.ConnectionPool(host='127.0.0.1', max_connections=100, decode_responses=True, db=1)
POOL2 = redis.ConnectionPool(host='127.0.0.1', max_connections=100, decode_responses=True, db=2)
POOL3 = redis.ConnectionPool(host='127.0.0.1', max_connections=100, decode_responses=True, db=1)
# 搜索关键词
'可以自己添加关键词,以下是各大网站的职位的总和,建议不要使用全部关键词,根据实际情况选择少数的职位关键词即可'
# '测试时使用'
SEARCH_ARGS = ['Golang', 'C', '嵌入式', 'Hadoop', '图像处理', '架构师', 'Perl', 'iOS', '自动化测试', '技术总监', '人工智能', '系统管理员', 'WP',
'DBA', 'C++', '运维工程师', '驱动开发', '售前工程师', '网络安全', '语音识别', '前端开发', 'FAE', 'FPGA开发', 'CTO', 'U3D', 'Flash开发',
'移动开发', '网络工程师', 'PHP', '电气工程师', '数据挖掘', 'HTML5', 'ETL工程师', 'Ruby', '.NET', '通信技术', '测试工程师', 'ARM开发',
'Delphi', '电气设计工程师', '单片机', '自然语言处理', '算法工程师', 'PCB工艺', '运维开发', 'DSP开发', 'C#', 'web前端', 'Node.js',
'JavaScript', 'IT技术支持', '售后工程师', 'COCOS2DX', '系统集成', 'Java', '数据分析', 'Android', '自动化', '技术经理', '机器学习',
'VB', '数据采集', '深度学习', 'Python', '电路设计', 'Erlang', '图像算法', '射频工程师', '移动web前端', '图像识别']
# SEARCH_ARGS = ['运维', '总编', '土木工程', '薪资福利经理', '文案策划', '图像处理', '推广', '内容审核', '基金', '理货员', '搜索', 'U3D', '美术设计师', '软件测试',
# '药品注册', '高中教师', '零售', '组长', '药师', '交易员', 'DB2', 'SEO', '篮球/羽毛球', '售后工程师', '物仓项目', '外贸业务员', '设备工程师', '配音',
# 'HTML5', '采购工程师', '药品生产', '品牌合作', '培训', '旅游计调', '硬件工程师', '医学影像', '广告', 'C++', '工程资料', '通信研发工程师',
# '货运代理专员', '外贸', 'ETL工程师', '市场策划', 'ios', '仓储物料', '质检员', 'MySQL', 'WP', '信审', '机械制图', '电器维修', '进出口贸易',
# '采购专员', '演员', '车工', '自动化测试', '语音/视频/图形开发', '内容编辑', '秘书', 'HR', '保洁', '医药研发', '婚礼', '整形师', 'DSP开发',
# '通信技术工程师', '磨工', '审核', '工业工程师', '建筑工程师', '音频编辑', '铣工', '行政经理', '铆工', '制片', '定损理赔', '旅游', '保险理赔', '药剂师',
# '医生', '运输', '育婴师', '广告销售', '催乳师', '英语', 'Ruby', '汽车销售与制造', '自然语言处理', '保姆', '剧情设计', 'JAVA', '灰盒测试', '发型师',
# '数据分析师', '广告创意', '技术总监', '留学', '幼教', '游戏测试', 'PCB', 'WEB安全', '面料', '辅导员', '保安', '用户研究', '中介', '公司法务',
# '音乐', '手机维修', '行业分析', '供应链总监', '生物制药', '跆拳道', '写作', '病毒分析', '教师', '律师', '婚庆', '大堂经理', '无线交互设计师', '核销员',
# '嵌入式', '光网络工程师', '游戏策划', '店员/营业员', '康复治疗师', '培训师', '健身', '药学编辑', '护理', '后勤', '光传输工程师', 'COO', '票务',
# '绩效考核经理', '自媒体', '化工工程师', '厨师', '互联网金融', '售前工程师', '产品经理', '网页设计师', '可靠度工程师', '机械工程师', '语音识别', '区块链',
# '锅炉工', '普工', '投资', '文案', '药店', '产品助理', 'IDC', '美工', '铸造', '中医', '多媒体设计师', '前端开发', 'CDN', '公关', '汽车',
# '海外运营', '采购', '电气工程师', '编导', '贷款', '设计装修', '包装设计', '视觉设计', '有线传输工程师', '原画师', '单证员', '地产评估', '副主编', '调研',
# '供应链专员', '校长', 'Java开发', '化工', '物业招商管理', '主任', '茶艺师', '通信工程师', '网络客服', '化妆/造型', '主管', 'Golang', 'FPGA',
# '经理', '融资', '影视媒体', '二手车评估师', '主持人', '信托', '线下拓展', '陈列设计', 'HRD/HRM', '瑜伽', '电路设计', '并购', '财务', '礼仪迎宾',
# '买手', '汽车销售', '运维工程师', '算法工程师', '权证', 'ASP', '汽车金融', '导演', '橱柜设计', '数据挖掘', '西点师', 'IT', '市场营销', '督导',
# '游戏原画', 'UX设计师', '财务咨询顾问', '微博运营', '内衣设计', '分析师', '催收员', '咨询总监', '租赁', '班主任', '移动产品', '美容', '编剧', '顾问',
# '车身设计', 'B超', '机械设计', '开发报建', 'H5游戏开发', '保险', '游戏美工', 'Delphi', '放射科医师', '汽车改装', '调度', '数据通信工程师', '学徒',
# '临床协调', '机械保养', 'Hive', '验光师', '网络工程师', '客户代表', '咨询项目管理', '体系工程师', '性能测试', '机电工程师', '工业设计', '土建', '清算',
# '汽车设计', '项目总监', '珠宝', '快递', '通信测试工程师', '深度学习', '行政', '社区运营', '模具工程师', '底盘设计', '注塑工', '创意', '动力系统设计',
# '城市规划', '公关总监', 'Web前端', '产品总监', '负责人', '网页交互设计师', '文员', '移动通信工程师', '系统集成', '合伙人', 'APP设计师', '白盒测试',
# '销售', '技术支持', '翻译', '数据分析', '司机', '人事/HR', '销售代表', '模具设计', '客服主管', '通信设备工程师', 'Flash', '室内设计', '质量工程师',
# '送餐员', 'CFO', '后厨', '放映', '收银', '影视制作', '专利', '预结算', '法律', '股票', '总监', '报检员', '理财', '电梯工', '安全专家', '收银员',
# '导购', '美甲师', '后端开发', '工程造价', '生产总监', '医师', '主播', '餐饮', 'Go', '美术', '合规稽查', '市场顾问', '服装设计', '招聘', '操盘手',
# '木工', '系统工程师', '漆工', '喷塑工', '健康整形', '零部件', 'web前端', '精益工程师', '置业', '小游戏开发', '财务分析', '策划', '咖啡师', '实习生',
# '特效', '造价员', '期货', '氩弧焊工', '银行', '记者', '投资总监', 'BI工程师', '物流经理', '硬件测试', '教练', '冲压工程师', 'ETL', '非视觉设计',
# '担保', '安全员', 'CEO', '助理', 'VB', 'JavaScript', '模特', '区域总监', '证券', '配菜打荷', '厂长', '会计', '制程工程师', '售前咨询',
# '媒介投放', '外语', 'F5', '签证', '投融资', '资产管理', '车险', '报检报关', '游戏动画', '总装工程师', '地产置业', '游戏文案', '店长', '投资助理',
# '教育', '机械结构工程师', '测试工程师', '护士长', '图像识别', '财务主管', '花艺师', '录音', '运营', '运营专员', '系统管理员', '系统安全', '实施工程师',
# 'COCOS2D-X', '仓储管理', '空调工', '活动运营', 'Unity 3D培训讲师', '媒介', '折弯工', '助教', '游戏陪练', '女装设计', '用户研究经理', '报关员',
# '资料员', '月嫂', '电子工程设计', '男装', '通信电源工程师', 'IT咨询顾问', '电镀工', '策略', '交互设计', '算法', '热传导', '医疗器械', '全栈工程师',
# '编辑', '物流', '课程设计', '实验室技术员', '射频工程师', '银行柜员', '医学', '地产项目总监', '核心网工程师', 'CAD', '针灸推拿', '单片机', '广告制作',
# '建造师', '商务拓展', '医疗器械销售', '游戏角色', '教务', '导游', '项目主管', '项目助理', '精算师', '渠道推广', '酒店前台', '柜员', '企业管理咨询',
# '活动策划', '包装工', '品牌专员', '网页设计', '集装箱', '汽车维修', '自动化', 'UI设计', 'Node.js', '知识产权', '纹绣师', 'SEM', '理疗师',
# '工程监理', '渠道销售', '食品/饮料', '钳工', '猎头', '页游推广', '需求分析', '跟单', '游戏后端开发', '微信运营', '电商产品', 'HRBP', '典当', '插画师',
# '医美', '餐饮店长', 'Java', '分拣员', '工艺工程师', '房地产评估', 'C++游戏开发', '土建工程师', '供应链', '会籍', '领班', '地产招投标', '驱动开发',
# '硬件', 'Hadoop', '制版', '临床', '组装工', '叉车', 'Python', '房地产', '钣金', '前台', '网络推广', '.NET', '生产员', '陈列员',
# '游戏界面设计师', '游戏场景', 'DBA', '电工', 'CTO', '摄影师', '机械设计师', 'DJ', '广告投放', '电气设计工程师', 'ARM开发', '新媒体', '牙医',
# '移动开发', '物业维修', '出版发行', '校对录入', '涂料', '预算员', '酒店管理', '管理', '机修工', '后期制作', '销售工程师', '信贷', '副总裁', '经纪人',
# '品牌策划', '理赔', '查勘定损', '夹具工程师', 'web后端', '大客户销售', 'C++培训讲师', '化学', '会务会展', '施工员', '婚礼策划师', '黑盒测试',
# '证券经纪人', '游戏界面', '老师', '仓储', '电信交换工程师', '同声传译', '消防', '生产设备管理', '董事长', '机械设备工程师', '家具设计', '汽车配件销售',
# '弱电工程师', '审核员', '建筑设计师', '文秘', '资信评估', '手游推广', 'Erlang', '审计', '商务总监', '机器视觉', '园林设计', '内外饰设计工程师', '出纳',
# '金融产品', '网站运营', 'COCOS2DX', '电子工程师', '设计总监', '市场调研', '人力资源专员', '采编', '结构工程师', '面料辅料', '机器学习', 'PHP',
# '新媒体运营', '故障分析师', '景观设计', '注塑工程师', '营业员', '平面设计', '家居设计', '财会', 'Android', '架构师', '网络安全', '通信项目', '钻工',
# 'C', '仓储物料专员', '计调', '电竞讲师', '化妆师', 'PLC', '营养师', '银行销售', '税务', '美术指导', 'Oracle', '剪辑', '家政', '数控',
# '证券分析', '主编', '物业', '建筑设计', 'UI设计师', 'C#', '导医', '生产跟单', '会议活动执行', '暖通', 'html5', '摄影', '销售运营', 'IOS',
# '法务', '人力资源', '电信网络工程师', '硬件交互', '酒店', '投资VP', '抛光', 'Perl', '投后管理', '体育教师', '运维开发工程师', '大客户代表', '生产营运',
# '汽车售后服务', '人工智能', 'FAE', '材料工程师', '淘宝客服', '数据仓库', '制片人', '光通信工程师', '配送', '焊工', '仓库文员', '信用卡', '医疗器械研究',
# '焊接工程师', '电竞主持', '选址开发', 'MongoDB', 'SQLServer', '图像算法', '家电维修', '客服', '无线产品设计师', '视频编辑', '新零售', '测试开发',
# '电商运营', '售后', '撰稿人', '服务员', '化妆品', 'Shell', '风控', '铲车', '无线射频工程师', '动画设计', '总助', '数据', 'CMO', '采购助理',
# '行政总监']
# 定时自动保存已爬取的职位的间隔时间
'可自行设置,单位为秒,根据你配置的目标站点爬取的时间做大概预判,防止出现爬取完毕后数据还未保存的情况'
INTERVAL = 30
# 爬取深度,最大页码
'可自行设置,但一般情况下每个网站搜出来的职位最多就50页左右,且要大于0'
END_PAGE = 30
# 最大协程数
'可根据自己的电脑配置自行设置数量,协程池不能设置太大的量,因为协程的并发性能太强导致并发量太大,redis连接池无法承受,原则上不要用协程的方式爬取,容易报错'
GEVENT_POOL = 10
# 最大线程池数
THREAD_POOL = (os.cpu_count() or 1) * 4
# 设置日志等级
LOG_LEVEL = logging.INFO
# 日志文件名
LOG_NAME = 'crawling.log'
|
py
|
1a5bb31e1c0b71126e7c36f0b148a97a7f57d669
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBashKernel(PythonPackage):
"""A Jupyter kernel for bash."""
homepage = "https://github.com/takluyver/bash_kernel"
pypi = "bash_kernel/bash_kernel-0.7.2.tar.gz"
version('0.7.2', sha256='a08c84eddd8179de5234105821fd5cc210015671a0bd3cd0bc4f631c475e1670')
depends_on('py-flit', type='build')
depends_on('[email protected]:', type=('build', 'run'))
|
py
|
1a5bb4173224494a09f45979f6c0c4bf7c7b9c64
|
import numpy as np
import random
from keras.preprocessing.sequence import pad_sequences
def readfile(filename):
'''
read file
return format :
[ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ]
'''
f = open(filename)
sentences = []
sentence = []
for line in f:
if len(line)==0 or line.startswith('-DOCSTART') or line[0]=="\n":
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
continue
splits = line.split(' ')
sentence.append([splits[0],splits[-1]])
if len(sentence) >0:
sentences.append(sentence)
sentence = []
return sentences
def getCasing(word, caseLookup):
casing = 'other'
numDigits = 0
for char in word:
if char.isdigit():
numDigits += 1
digitFraction = numDigits / float(len(word))
if word.isdigit(): #Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif word.islower(): #All lower case
casing = 'allLower'
elif word.isupper(): #All upper case
casing = 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
casing = 'initialUpper'
elif numDigits > 0:
casing = 'contains_digit'
return caseLookup[casing]
def createBatches(data):
l = []
for i in data:
l.append(len(i[0]))
l = set(l)
batches = []
batch_len = []
z = 0
for i in l:
for batch in data:
if len(batch[0]) == i:
batches.append(batch)
z += 1
batch_len.append(z)
return batches,batch_len
def createBatches(data):
l = []
for i in data:
l.append(len(i[0]))
l = set(l)
batches = []
batch_len = []
z = 0
for i in l:
for batch in data:
if len(batch[0]) == i:
batches.append(batch)
z += 1
batch_len.append(z)
return batches,batch_len
def createMatrices(sentences, word2Idx, label2Idx, case2Idx,char2Idx):
unknownIdx = word2Idx['UNKNOWN_TOKEN']
paddingIdx = word2Idx['PADDING_TOKEN']
dataset = []
wordCount = 0
unknownWordCount = 0
for sentence in sentences:
wordIndices = []
caseIndices = []
charIndices = []
labelIndices = []
for word,char,label in sentence:
wordCount += 1
if word in word2Idx:
wordIdx = word2Idx[word]
elif word.lower() in word2Idx:
wordIdx = word2Idx[word.lower()]
else:
wordIdx = unknownIdx
unknownWordCount += 1
charIdx = []
for x in char:
charIdx.append(char2Idx[x])
#Get the label and map to int
wordIndices.append(wordIdx)
caseIndices.append(getCasing(word, case2Idx))
charIndices.append(charIdx)
labelIndices.append(label2Idx[label])
dataset.append([wordIndices, caseIndices, charIndices, labelIndices])
return dataset
def iterate_minibatches(dataset,batch_len):
start = 0
for i in batch_len:
tokens = []
caseing = []
char = []
labels = []
data = dataset[start:i]
start = i
for dt in data:
t,c,ch,l = dt
l = np.expand_dims(l,-1)
tokens.append(t)
caseing.append(c)
char.append(ch)
labels.append(l)
yield np.asarray(labels),np.asarray(tokens),np.asarray(caseing),np.asarray(char)
def addCharInformatioin(Sentences):
for i,sentence in enumerate(Sentences):
for j,data in enumerate(sentence):
chars = [c for c in data[0]]
Sentences[i][j] = [data[0],chars,data[1]]
return Sentences
def padding(Sentences):
maxlen = 52
for sentence in Sentences:
char = sentence[2]
for x in char:
maxlen = max(maxlen,len(x))
for i,sentence in enumerate(Sentences):
Sentences[i][2] = pad_sequences(Sentences[i][2],52,padding='post')
return Sentences
|
py
|
1a5bb5e7cda42ad880d648a640ddee34281c4878
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: map
.. moduleauthor:: Hendrix Demers <[email protected]>
Map used in the phase analysis module.
"""
###############################################################################
# Copyright 2016 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import logging
import os.path
import csv
# Third party modules.
import numpy as np
from PIL import Image
from scipy.ndimage import gaussian_filter
import matplotlib
import matplotlib.pyplot as plt
# Local modules.
# Project modules
# Globals and constants variables.
class PhaseMap(object):
def __init__(self, phase_map_name, phase_analysis, is_dilation_erosion=False):
self.phase_map_name = phase_map_name
self.phase_analysis = phase_analysis
self.is_dilation_erosion = is_dilation_erosion
self.phases = {}
def add_phase(self, phase, color_name, label=None):
if label is None:
label = phase.name
self.phases[label] = ([phase], color_name, True)
def add_phases(self, label, phases, color_name, union=True):
self.phases[label] = (phases, color_name, union)
def display_map(self, label=None, use_gaussian_filter=False, legend=None, display_now=True):
image = self.get_image(label)
plt.figure()
if label is not None:
plt.title(label)
plt.imshow(image, aspect='equal')
plt.axis('off')
if label is None:
if legend is None:
patches, labels = self.get_legend()
else:
patches, labels = legend
plt.figlegend(patches, labels, 'upper right')
if display_now:
self.show()
def display_no_phase_map(self, display_now=True):
image = self.get_no_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(color="black"),
matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["No phase", "Phases"]
plt.figlegend(patches, labels, 'upper right')
if display_now:
self.show()
def display_overlap_map(self, display_now=True):
image = self.get_overlap_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["Overlap phases"]
plt.figlegend(patches, labels, 'upper right')
if display_now:
self.show()
def show(self):
plt.show()
def save_map(self, figures_path, label=None, use_gaussian_filter=False, legend=None):
image = self.get_image(label)
plt.figure()
if label is not None:
plt.title(label)
plt.imshow(image, aspect='equal')
plt.axis('off')
if label is None:
if legend is None:
patches, labels = self.get_legend()
else:
patches, labels = legend
plt.figlegend(patches, labels, 'upper right')
if label is None:
label = "allphases"
file_path = os.path.join(figures_path, self.phase_map_name + label + ".png")
plt.savefig(file_path)
plt.close()
def save_no_phase_map(self, figures_path):
image = self.get_no_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(color="black"),
matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["No phase", "Phases"]
plt.figlegend(patches, labels, 'upper right')
file_path = os.path.join(figures_path, self.phase_map_name + "_nophase" + ".png")
plt.savefig(file_path)
plt.close()
def save_overlap_map(self, figures_path):
image = self.get_overlap_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["Overlap phases"]
plt.figlegend(patches, labels, 'upper right')
file_path = os.path.join(figures_path, self.phase_map_name + "_overlap" + ".png")
plt.savefig(file_path)
plt.close()
def save_phases_fraction(self, figures_path):
phase_fractions = self.get_phases_fraction()
file_path = os.path.join(figures_path, self.phase_map_name + "_phases_fraction" + ".csv")
with open(file_path, 'w', newline='\n') as output_file:
writer = csv.writer(output_file)
header_row = ["Phase", "Pixel fraction"]
writer.writerow(header_row)
for phase_name in phase_fractions:
row = []
row.append(phase_name)
row.append(phase_fractions[phase_name])
writer.writerow(row)
def get_image(self, label=None, use_gaussian_filter=False):
width, height = self.phase_analysis.get_width_height()
image_data = np.zeros((width, height, 3), dtype=np.float32)
if label is None:
for label in self.phases:
phases, color_name, union = self.phases[label]
color = self._get_rgb(color_name)
data = self.phase_analysis.get_phase_data(phases, color, self.is_dilation_erosion, union)
image_data += data
else:
phases, color_name, union = self.phases[label]
color = self._get_rgb(color_name)
data = self.phase_analysis.get_phase_data(phases, color, self.is_dilation_erosion, union)
image_data += data
image = Image.fromarray(np.uint8(image_data*255.0))
if use_gaussian_filter:
image_filtered = gaussian_filter(image, sigma=(1, 1, 0), mode='nearest', order=0)
image = Image.fromarray(image_filtered)
return image
def get_no_phase_image(self):
color = (1, 1, 1)
width, height = self.phase_analysis.get_width_height()
image_data = np.zeros((width, height, 3), dtype=np.float32)
for label in self.phases:
phases, _color_name, union = self.phases[label]
data = self.phase_analysis.get_phase_data(phases, color, self.is_dilation_erosion, union)
image_data += data
image = Image.fromarray(np.uint8(image_data*255.0))
return image
def get_overlap_phase_image(self):
color = (1, 1, 1)
width, height = self.phase_analysis.get_width_height()
image_data = np.zeros((width, height, 3), dtype=np.float32)
for label in self.phases:
phases, _color_name, union = self.phases[label]
data = self.phase_analysis.get_phase_data(phases, color, self.is_dilation_erosion, union)
image_data += data
logging.debug(image_data.shape)
logging.debug(np.min(image_data))
logging.debug(np.max(image_data))
mask = image_data > 1
logging.debug(np.min(mask))
logging.debug(np.max(mask))
image_data[~mask] = 0
logging.debug(np.min(image_data))
logging.debug(np.max(image_data))
image = Image.fromarray(np.uint8(image_data*255.0))
return image
def get_phases_fraction(self):
phase_fractions = {}
for label in self.phases:
phases, _color_name, union = self.phases[label]
phase_fraction = self.phase_analysis.get_phase_fraction(phases, self.is_dilation_erosion, union)
phase_fractions[label] = phase_fraction
return phase_fractions
def get_legend(self):
patches = []
labels = []
for label in self.phases:
labels.append(label)
_phase, color_name, _union = self.phases[label]
color = self._get_rgb(color_name)
if color == (1, 1, 1):
patches.append(matplotlib.patches.Patch(edgecolor='black', facecolor='white'))
else:
patches.append(matplotlib.patches.Patch(color=color))
return patches, labels
def _get_rgb(self, name):
rgb = matplotlib.colors.hex2color(matplotlib.colors.cnames[name])
return rgb
def save_image(self, file_path, use_gaussian_filter=False):
image = self.get_image(use_gaussian_filter)
image.save(file_path)
def show_image(self, file_path, use_gaussian_filter=False, legend=None, save_only=False):
image = self.get_image(use_gaussian_filter)
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
if legend is None:
patches, labels = self.get_legend()
else:
patches, labels = legend
plt.figlegend(patches, labels, 'upper right')
plt.savefig(file_path)
if save_only:
plt.close()
def create_no_phase_image(self, file_path):
image = self.get_no_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(color="black"), matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["No phase", "Phases"]
plt.figlegend(patches, labels, 'upper right')
plt.savefig(file_path)
def create_overlap_phase_image(self, file_path):
image = self.get_overlap_phase_image()
plt.figure()
plt.imshow(image, aspect='equal')
plt.axis('off')
patches = [matplotlib.patches.Patch(edgecolor='black', facecolor='white')]
labels = ["Overlap phases"]
plt.figlegend(patches, labels, 'upper right')
plt.savefig(file_path)
def save_phase_only(phase_map, phase, graphic_path, color):
"""
Save an png image of one phase.
.. todo:: Find why the parameter is phase_map, should we pass the width and height only?
:param phase_map: get the width and height of the image
:param phase: phase object to create a image
:param graphic_path: path to save the image
:param color: color to use for the image
"""
phase_image = PhaseMap(phase_map.width, phase_map.height)
phase_image.add_phase(phase, color)
filename = r'%s_%s_%s.png' % (phase_map.sampleName, phase_map.dataType, phase.name)
file_path = os.path.join(graphic_path, filename)
phase_image.save_image(file_path)
|
py
|
1a5bb668d598b0f5c678711eca088f9983040320
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: mul_ad"""
import akg
from akg.ops.math import mul
from akg.utils import custom_tiling as ct_util
mul_ad_set_dim_map = {
}
def mul_ad_set_dim_func(head, a, b):
key = []
key.append(tuple(a.shape))
key.append(tuple(b.shape))
key.append(a.dtype)
hash_key = str(tuple(key))
if hash_key in mul_ad_set_dim_map.keys():
return ct_util.set_dims(mul_ad_set_dim_map[hash_key]), hash_key
else:
return "", hash_key
@ct_util.reg_set_dim_func(mul_ad_set_dim_func)
def mul_ad(head, a, b):
output = mul.mul(a, b)
jacs_ = list(akg.differentiate(output, [a], head))
return jacs_[0]
|
py
|
1a5bb684315bfbe916506b32ea10c1e161ef928d
|
class AddressTranslator(object):
__slots__ = ('_rva', '_owner', )
"""
Mediates address translations between typed addresses such as RAW, RVA, LVA, MVA and VA
including address owner and its state (linked or mapped)
Semantics::
owner - object associated with the address
(any object class based on `cle.Backend`)
owner mapping state - sparse object can be either mapped or not
(actual object's image base VA to be considered valid)
RAW - offset (index) inside a file stream
VA - address inside process flat virtual memory space
RVA - address relative to the object's segment base
(segment base normalized virtual address)
LVA - linked VA (linker)
MVA - mapped VA (loader)
"""
def __init__(self, rva, owner):
"""
:param rva: virtual address relative to owner's object image base
:type rva: int
:param owner: The object owner address relates to
:type owner: cle.Backend
"""
self._rva, self._owner = rva, owner
@classmethod
def from_lva(cls, lva, owner):
"""
Loads address translator with LVA
"""
return cls(lva - owner.linked_base, owner)
@classmethod
def from_mva(cls, mva, owner):
"""
Loads address translator with MVA
"""
return cls(mva - owner.mapped_base, owner)
@classmethod
def from_rva(cls, rva, owner):
"""
Loads address translator with RVA
"""
return cls(rva, owner)
@classmethod
def from_raw(cls, raw, owner):
"""
Loads address translator with RAW address
"""
return cls(owner.offset_to_addr(raw) - (owner.mapped_base if owner._is_mapped else owner.linked_base), owner)
from_linked_va = from_lva
from_va = from_mapped_va = from_mva
from_relative_va = from_rva
def to_lva(self):
"""
VA -> LVA
:rtype: int
"""
return self._rva + self._owner.linked_base
def to_mva(self):
"""
RVA -> MVA
:rtype: int
"""
return self._rva + self._owner.mapped_base
def to_rva(self):
"""
RVA -> RVA
:rtype: int
"""
return self._rva
def to_raw(self):
"""
RVA -> RAW
:rtype: int
"""
return self._owner.addr_to_offset(
self._rva + (self._owner.mapped_base if self._owner._is_mapped else self._owner.linked_base)
)
to_linked_va = to_lva
to_va = to_mapped_va = to_mva
to_relative_va = to_rva
AT = AddressTranslator
|
py
|
1a5bb6df08fb48c16d627acacdecade91cb1567b
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flax
import jax
import jax.numpy as jnp
import numpy as np
class Optimizer(flax.optim.OptimizerDef):
"""Momentum optimizer that stores state using half-precision."""
@flax.struct.dataclass
class HyperParams:
learning_rate: np.ndarray
beta: np.ndarray
grad_norm_clip: np.ndarray
@flax.struct.dataclass
class State:
momentum: np.ndarray
def __init__(self,
learning_rate=None,
beta=0.9,
dtype='bfloat16',
grad_norm_clip=None):
hyper_params = Optimizer.HyperParams(learning_rate, beta, grad_norm_clip)
super().__init__(hyper_params)
self.dtype = dict(bfloat16=jnp.bfloat16, float32=jnp.float32)[dtype]
def init_param_state(self, param):
return Optimizer.State(jnp.zeros_like(param, dtype=self.dtype))
def apply_gradient(self, hyper_params, params, state, grads):
step = state.step
params_flat, treedef = jax.tree_flatten(params)
states_flat = treedef.flatten_up_to(state.param_states)
grads_flat = treedef.flatten_up_to(grads)
# Optionally resize the global gradient to a maximum norm.
if hyper_params.grad_norm_clip:
grads_l2 = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads_flat]))
grads_factor = jnp.minimum(1.0, hyper_params.grad_norm_clip / grads_l2)
grads_flat = jax.tree_map(lambda param: grads_factor * param, grads_flat)
out = [
self.apply_param_gradient(step, hyper_params, param, state, grad)
for param, state, grad in zip(params_flat, states_flat, grads_flat)
]
new_params_flat, new_states_flat = list(zip(*out)) if out else ((), ())
new_params = jax.tree_unflatten(treedef, new_params_flat)
new_param_states = jax.tree_unflatten(treedef, new_states_flat)
new_state = flax.optim.OptimizerState(step + 1, new_param_states)
return new_params, new_state
def apply_param_gradient(self, step, hyper_params, param, state, grad):
del step
assert hyper_params.learning_rate is not None, 'no learning rate provided.'
momentum = state.momentum
new_momentum = hyper_params.beta * momentum + grad
new_param = param - hyper_params.learning_rate * new_momentum
new_state = Optimizer.State(new_momentum.astype(self.dtype))
return new_param, new_state
|
py
|
1a5bb6feba0f9bf996e634eee863a47d89f5d245
|
from sympy.physics.wigner import wigner_3j
import time
def format_time(duration):
if duration > 1:
return f"{duration:5.7}s"
elif 1000 * duration > 1:
return f"{1000 * duration:5.7} ms"
if __name__ == "__main__":
for max_angular in [4, 8, 12]:
start = time.time()
for j1 in range(max_angular):
for j2 in range(max_angular):
for j3 in range(max_angular):
for m1 in range(-j1, j1 + 1):
for m2 in range(-j2, j2 + 1):
for m3 in range(-j3, j3 + 1):
c = wigner_3j(j1, j2, j3, m1, m2, m3)
print(f"max_angular = {max_angular} took {format_time(time.time() - start)}")
|
py
|
1a5bb77f303380e504f6542a97e98de23d774dcb
|
import numpy as np
from gym.spaces import Box
from metaworld.envs.env_util import get_asset_full_path
from metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv, _assert_task_is_set
class SawyerPlateSlideBackSideEnv(SawyerXYZEnv):
def __init__(self):
goal_low = (-0.1, 0.6, 0.015)
goal_high = (0.1, 0.6, 0.015)
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.25, 0.6, 0.02)
obj_high = (-0.25, 0.6, 0.02)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': 0.3,
'obj_init_pos': np.array([-0.25, 0.6, 0.02], dtype=np.float32),
'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),
}
self.goal = np.array([0., 0.6, 0.015])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.max_path_length = 150
self.obj_and_goal_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.observation_space = Box(
np.hstack((self.hand_low, obj_low, obj_low, goal_low)),
np.hstack((self.hand_high, obj_high, obj_high, goal_high)),
)
@property
def model_name(self):
return get_asset_full_path('sawyer_xyz/sawyer_plate_slide_sideway.xml')
@_assert_task_is_set
def step(self, action):
self.set_xyz_action(action[:3])
self.do_simulation([action[-1], -action[-1]])
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
ob = self._get_obs()
obs_dict = self._get_obs_dict()
reward, reachDist, pullDist = self.compute_reward(action, obs_dict)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew' : reward, 'pickRew':None, 'success': float(pullDist <= 0.07)}
info['goal'] = self.goal
return ob, reward, self.curr_path_length == self.max_path_length, info
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def _set_goal_marker(self, goal):
self.data.site_xpos[self.model.site_name2id('goal')] = (
goal[:3]
)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._state_goal = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
if self.random_init:
obj_pos = self._get_state_rand_vec()
self.obj_init_pos = obj_pos[:3]
goal_pos = obj_pos[3:]
self._state_goal = goal_pos
self._set_goal_marker(self._state_goal)
self.sim.model.body_pos[self.model.body_name2id('cabinet')] = self.obj_init_pos
self._set_obj_xyz(np.array([-0.2, 0.]))
self.maxDist = np.linalg.norm(self.data.get_geom_xpos('objGeom')[:-1] - self._state_goal[:-1])
self.target_reward = 1000*self.maxDist + 1000*2
return self._get_obs()
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', self.hand_init_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation([-1,1], self.frame_skip)
rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
def compute_reward(self, actions, obs):
del actions
obs = obs['state_observation']
objPos = obs[3:6]
rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger)/2
pullGoal = self._state_goal
reachDist = np.linalg.norm(objPos - fingerCOM)
pullDist = np.linalg.norm(objPos[:-1] - pullGoal[:-1])
c1 = 1000
c2 = 0.01
c3 = 0.001
if reachDist < 0.05:
pullRew = 1000*(self.maxDist - pullDist) + c1*(np.exp(-(pullDist**2)/c2) + np.exp(-(pullDist**2)/c3))
pullRew = max(pullRew, 0)
else:
pullRew = 0
reward = -reachDist + pullRew
return [reward, reachDist, pullDist]
|
py
|
1a5bb8c656defa11d418d6f23b128c5db1dd83ee
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
from datetime import datetime
import hashlib
import os
import re
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
from flask import g
from flask_babel import lazy_gettext as _
import pandas as pd
from sqlalchemy import column, DateTime, select
from sqlalchemy.engine import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.interfaces import Compiled, Dialect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import quoted_name, text
from sqlalchemy.sql.expression import ColumnClause, ColumnElement, Select, TextAsFrom
from sqlalchemy.types import TypeEngine
import sqlparse
from werkzeug.utils import secure_filename
from superset import app, db, sql_parse
from superset.utils import core as utils
class TimeGrain(NamedTuple):
name: str # TODO: redundant field, remove
label: str
function: str
duration: Optional[str]
config = app.config
QueryStatus = utils.QueryStatus
config = app.config
builtin_time_grains: Dict[Optional[str], str] = {
None: "Time Column",
"PT1S": "second",
"PT1M": "minute",
"PT5M": "5 minute",
"PT10M": "10 minute",
"PT15M": "15 minute",
"PT0.5H": "half hour",
"PT1H": "hour",
"P1D": "day",
"P1W": "week",
"P1M": "month",
"P0.25Y": "quarter",
"P1Y": "year",
"1969-12-28T00:00:00Z/P1W": "week_start_sunday",
"1969-12-29T00:00:00Z/P1W": "week_start_monday",
"P1W/1970-01-03T00:00:00Z": "week_ending_saturday",
"P1W/1970-01-04T00:00:00Z": "week_ending_sunday",
}
class TimestampExpression(ColumnClause):
def __init__(self, expr: str, col: ColumnClause, **kwargs):
"""Sqlalchemy class that can be can be used to render native column elements
respeting engine-specific quoting rules as part of a string-based expression.
:param expr: Sql expression with '{col}' denoting the locations where the col
object will be rendered.
:param col: the target column
"""
super().__init__(expr, **kwargs)
self.col = col
@property
def _constructor(self):
# Needed to ensure that the column label is rendered correctly when
# proxied to the outer query.
# See https://github.com/sqlalchemy/sqlalchemy/issues/4730
return ColumnClause
@compiles(TimestampExpression)
def compile_timegrain_expression(
element: TimestampExpression, compiler: Compiled, **kw
) -> str:
return element.name.replace("{col}", compiler.process(element.col, **kw))
class LimitMethod(object):
"""Enum the ways that limits can be applied"""
FETCH_MANY = "fetch_many"
WRAP_SQL = "wrap_sql"
FORCE_LIMIT = "force_limit"
def _create_time_grains_tuple(
time_grains: Dict[Optional[str], str],
time_grain_functions: Dict[Optional[str], str],
blacklist: List[str],
) -> Tuple[TimeGrain, ...]:
"""
function for creating a tuple of time grains based on time grains provided by
the engine and any potential additional or blacklisted grains in the config file.
:param time_grains: all time grains supported by the engine + config files
:param time_grain_functions: mapping between time grain id and sql expression
:param blacklist: list of time grain ids to be excluded
:return: final collection of time grains
"""
ret_list = []
blacklist = blacklist if blacklist else []
for duration, func in time_grain_functions.items():
if duration in time_grains and duration not in blacklist:
name = time_grains[duration]
ret_list.append(TimeGrain(name, _(name), func, duration))
return tuple(ret_list)
class BaseEngineSpec:
"""Abstract class for database engine specific configurations"""
engine = "base" # str as defined in sqlalchemy.engine.engine
time_grain_functions: Dict[Optional[str], str] = {}
time_groupby_inline = False
limit_method = LimitMethod.FORCE_LIMIT
time_secondary_columns = False
allows_joins = True
allows_subqueries = True
allows_column_aliases = True
force_column_alias_quotes = False
arraysize = 0
max_column_name_length = 0
try_remove_schema_from_table_name = True
@classmethod
def get_timestamp_expr(
cls, col: ColumnClause, pdf: Optional[str], time_grain: Optional[str]
) -> TimestampExpression:
"""
Construct a TimestampExpression to be used in a SQLAlchemy query.
:param col: Target column for the TimestampExpression
:param pdf: date format (seconds or milliseconds)
:param time_grain: time grain, e.g. P1Y for 1 year
:return: TimestampExpression object
"""
if time_grain:
time_expr = cls.time_grain_functions.get(time_grain)
if not time_expr:
raise NotImplementedError(
f"No grain spec for {time_grain} for database {cls.engine}"
)
else:
time_expr = "{col}"
# if epoch, translate to DATE using db specific conf
if pdf == "epoch_s":
time_expr = time_expr.replace("{col}", cls.epoch_to_dttm())
elif pdf == "epoch_ms":
time_expr = time_expr.replace("{col}", cls.epoch_ms_to_dttm())
return TimestampExpression(time_expr, col, type_=DateTime)
@classmethod
def get_time_grains(cls) -> Tuple[TimeGrain, ...]:
"""
Generate a tuple of time grains based on time grains provided by the engine
and any potential additional or blacklisted grains in the config file.
:return: All time grains supported by the engine
"""
blacklist: List[str] = config.get("TIME_GRAIN_BLACKLIST", [])
supported_grains = builtin_time_grains.copy()
supported_grains.update(config.get("TIME_GRAIN_ADDONS", {}))
grain_functions = cls.time_grain_functions.copy()
grain_addon_functions = config.get("TIME_GRAIN_ADDON_FUNCTIONS", {})
grain_functions.update(grain_addon_functions.get(cls.engine, {}))
return _create_time_grains_tuple(supported_grains, grain_functions, blacklist)
@classmethod
def make_select_compatible(
cls, groupby_exprs: Dict[str, ColumnElement], select_exprs: List[ColumnElement]
) -> List[ColumnElement]:
"""
Some databases will just return the group-by field into the select, but don't
allow the group-by field to be put into the select list.
:param groupby_exprs: mapping between column name and column object
:param select_exprs: all columns in the select clause
:return: columns to be included in the final select clause
"""
return select_exprs
@classmethod
def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
"""
:param cursor: Cursor instance
:param limit: Maximum number of rows to be returned by the cursor
:return: Result of query
"""
if cls.arraysize:
cursor.arraysize = cls.arraysize
if cls.limit_method == LimitMethod.FETCH_MANY:
return cursor.fetchmany(limit)
return cursor.fetchall()
@classmethod
def expand_data(
cls, columns: List[dict], data: List[dict]
) -> Tuple[List[dict], List[dict], List[dict]]:
"""
Some engines support expanding nested fields. See implementation in Presto
spec for details.
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields
"""
return columns, data, []
@classmethod
def alter_new_orm_column(cls, orm_col):
"""Allow altering default column attributes when first detected/added
For instance special column like `__time` for Druid can be
set to is_dttm=True. Note that this only gets called when new
columns are detected/created"""
# TODO: Fix circular import caused by importing TableColumn
pass
@classmethod
def epoch_to_dttm(cls) -> str:
"""
SQL expression that converts epoch (seconds) to datetime that can be used in a
query. The reference column should be denoted as `{col}` in the return
expression, e.g. "FROM_UNIXTIME({col})"
:return: SQL Expression
"""
raise NotImplementedError()
@classmethod
def epoch_ms_to_dttm(cls) -> str:
"""
SQL expression that converts epoch (milliseconds) to datetime that can be used
in a query.
:return: SQL Expression
"""
return cls.epoch_to_dttm().replace("{col}", "({col}/1000)")
@classmethod
def get_datatype(cls, type_code: Any) -> Optional[str]:
"""
Change column type code from cursor description to string representation.
:param type_code: Type code from cursor description
:return: String representation of type code
"""
if isinstance(type_code, str) and len(type_code):
return type_code.upper()
return None
@classmethod
def extra_table_metadata(
cls, database, table_name: str, schema_name: str
) -> Dict[str, Any]:
"""
Returns engine-specific table metadata
:param database: Database instance
:param table_name: Table name
:param schema_name: Schema name
:return: Engine-specific table metadata
"""
# TODO: Fix circular import caused by importing Database
return {}
@classmethod
def apply_limit_to_sql(cls, sql: str, limit: int, database) -> str:
"""
Alters the SQL statement to apply a LIMIT clause
:param sql: SQL query
:param limit: Maximum number of rows to be returned by the query
:param database: Database instance
:return: SQL query with limit clause
"""
# TODO: Fix circular import caused by importing Database
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip("\t\n ;")
qry = (
select("*")
.select_from(TextAsFrom(text(sql), ["*"]).alias("inner_qry"))
.limit(limit)
)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.get_query_with_new_limit(limit)
return sql
@classmethod
def get_limit_from_sql(cls, sql: str) -> int:
"""
Extract limit from SQL query
:param sql: SQL query
:return: Value of limit clause in query
"""
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.limit
@classmethod
def get_query_with_new_limit(cls, sql: str, limit: int) -> str:
"""
Create a query based on original query but with new limit clause
:param sql: SQL query
:param limit: New limit to insert/replace into query
:return: Query with new limit
"""
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.get_query_with_new_limit(limit)
@staticmethod
def csv_to_df(**kwargs) -> pd.DataFrame:
""" Read csv into Pandas DataFrame
:param kwargs: params to be passed to DataFrame.read_csv
:return: Pandas DataFrame containing data from csv
"""
kwargs["filepath_or_buffer"] = (
config["UPLOAD_FOLDER"] + kwargs["filepath_or_buffer"]
)
kwargs["encoding"] = "utf-8"
kwargs["iterator"] = True
chunks = pd.read_csv(**kwargs)
df = pd.concat(chunk for chunk in chunks)
return df
@classmethod
def df_to_sql(cls, df: pd.DataFrame, **kwargs):
""" Upload data from a Pandas DataFrame to a database. For
regular engines this calls the DataFrame.to_sql() method. Can be
overridden for engines that don't work well with to_sql(), e.g.
BigQuery.
:param df: Dataframe with data to be uploaded
:param kwargs: kwargs to be passed to to_sql() method
"""
df.to_sql(**kwargs)
@classmethod
def create_table_from_csv(cls, form, table):
""" Create table (including metadata in backend) from contents of a csv.
:param form: Parameters defining how to process data
:param table: Metadata of new table to be created
"""
def _allowed_file(filename: str) -> bool:
# Only allow specific file extensions as specified in the config
extension = os.path.splitext(filename)[1]
return (
extension is not None and extension[1:] in config["ALLOWED_EXTENSIONS"]
)
filename = secure_filename(form.csv_file.data.filename)
if not _allowed_file(filename):
raise Exception("Invalid file type selected")
csv_to_df_kwargs = {
"filepath_or_buffer": filename,
"sep": form.sep.data,
"header": form.header.data if form.header.data else 0,
"index_col": form.index_col.data,
"mangle_dupe_cols": form.mangle_dupe_cols.data,
"skipinitialspace": form.skipinitialspace.data,
"skiprows": form.skiprows.data,
"nrows": form.nrows.data,
"skip_blank_lines": form.skip_blank_lines.data,
"parse_dates": form.parse_dates.data,
"infer_datetime_format": form.infer_datetime_format.data,
"chunksize": 10000,
}
df = cls.csv_to_df(**csv_to_df_kwargs)
df_to_sql_kwargs = {
"df": df,
"name": form.name.data,
"con": create_engine(form.con.data.sqlalchemy_uri_decrypted, echo=False),
"schema": form.schema.data,
"if_exists": form.if_exists.data,
"index": form.index.data,
"index_label": form.index_label.data,
"chunksize": 10000,
}
cls.df_to_sql(**df_to_sql_kwargs)
table.user_id = g.user.id
table.schema = form.schema.data
table.fetch_metadata()
db.session.add(table)
db.session.commit()
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
"""
Convert DateTime object to sql expression
:param target_type: Target type of expression
:param dttm: DateTime object
:return: SQL expression
"""
return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
@classmethod
def get_all_datasource_names(
cls, db, datasource_type: str
) -> List[utils.DatasourceName]:
"""Returns a list of all tables or views in database.
:param db: Database instance
:param datasource_type: Datasource_type can be 'table' or 'view'
:return: List of all datasources in database or schema
"""
# TODO: Fix circular import caused by importing Database
schemas = db.get_all_schema_names(
cache=db.schema_cache_enabled,
cache_timeout=db.schema_cache_timeout,
force=True,
)
all_datasources: List[utils.DatasourceName] = []
for schema in schemas:
if datasource_type == "table":
all_datasources += db.get_all_table_names_in_schema(
schema=schema,
force=True,
cache=db.table_cache_enabled,
cache_timeout=db.table_cache_timeout,
)
elif datasource_type == "view":
all_datasources += db.get_all_view_names_in_schema(
schema=schema,
force=True,
cache=db.table_cache_enabled,
cache_timeout=db.table_cache_timeout,
)
else:
raise Exception(f"Unsupported datasource_type: {datasource_type}")
return all_datasources
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object"""
# TODO: Fix circular import error caused by importing sql_lab.Query
pass
@classmethod
def extract_error_message(cls, e: Exception) -> str:
"""Extract error message for queries"""
return utils.error_msg_from_exception(e)
@classmethod
def adjust_database_uri(cls, uri, selected_schema: str):
"""Based on a URI and selected schema, return a new URI
The URI here represents the URI as entered when saving the database,
``selected_schema`` is the schema currently active presumably in
the SQL Lab dropdown. Based on that, for some database engine,
we can return a new altered URI that connects straight to the
active schema, meaning the users won't have to prefix the object
names by the schema name.
Some databases engines have 2 level of namespacing: database and
schema (postgres, oracle, mssql, ...)
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.
"""
# TODO: All overrides mutate input uri; should be renamed or refactored
return uri
@classmethod
def patch(cls):
"""
TODO: Improve docstring and refactor implementation in Hive
"""
pass
@classmethod
def get_schema_names(cls, inspector: Inspector) -> List[str]:
"""
Get all schemas from database
:param inspector: SqlAlchemy inspector
:return: All schemas in the database
"""
return sorted(inspector.get_schema_names())
@classmethod
def get_table_names(cls, inspector: Inspector, schema: Optional[str]) -> List[str]:
"""
Get all tables from schema
:param inspector: SqlAlchemy inspector
:param schema: Schema to inspect. If omitted, uses default schema for database
:return: All tables in schema
"""
tables = inspector.get_table_names(schema)
if schema and cls.try_remove_schema_from_table_name:
tables = [re.sub(f"^{schema}\\.", "", table) for table in tables]
return sorted(tables)
@classmethod
def get_view_names(cls, inspector: Inspector, schema: Optional[str]) -> List[str]:
"""
Get all views from schema
:param inspector: SqlAlchemy inspector
:param schema: Schema name. If omitted, uses default schema for database
:return: All views in schema
"""
views = inspector.get_view_names(schema)
if schema and cls.try_remove_schema_from_table_name:
views = [re.sub(f"^{schema}\\.", "", view) for view in views]
return sorted(views)
@classmethod
def get_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[Dict[str, Any]]:
"""
Get all columns from a given schema and table
:param inspector: SqlAlchemy Inspector instance
:param table_name: Table name
:param schema: Schema name. If omitted, uses default schema for database
:return: All columns in table
"""
return inspector.get_columns(table_name, schema)
@classmethod
def where_latest_partition(
cls,
table_name: str,
schema: Optional[str],
database,
query: Select,
columns: Optional[List] = None,
) -> Optional[Select]:
"""
Add a where clause to a query to reference only the most recent partition
:param table_name: Table name
:param schema: Schema name
:param database: Database instance
:param query: SqlAlchemy query
:param columns: List of TableColumns
:return: SqlAlchemy query with additional where clause referencing latest
partition
"""
# TODO: Fix circular import caused by importing Database, TableColumn
return None
@classmethod
def _get_fields(cls, cols):
return [column(c.get("name")) for c in cols]
@classmethod
def select_star(
cls,
database,
table_name: str,
engine: Engine,
schema: Optional[str] = None,
limit: int = 100,
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = True,
cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Generate a "SELECT * from [schema.]table_name" query with appropriate limit.
:param database: Database instance
:param table_name: Table name
:param engine: SqlALchemy Engine instance
:param schema: Schema
:param limit: limit to impose on query
:param show_cols: Show columns in query; otherwise use "*"
:param indent: Add indentation to query
:param latest_partition: Only query latest partition
:param cols: Columns to include in query
:return: SQL query
"""
fields = "*"
cols = cols or []
if (show_cols or latest_partition) and not cols:
cols = database.get_columns(table_name, schema)
if show_cols:
fields = cls._get_fields(cols)
quote = engine.dialect.identifier_preparer.quote
if schema:
full_table_name = quote(schema) + "." + quote(table_name)
else:
full_table_name = quote(table_name)
qry = select(fields).select_from(text(full_table_name))
if limit:
qry = qry.limit(limit)
if latest_partition:
partition_query = cls.where_latest_partition(
table_name, schema, database, qry, columns=cols
)
if partition_query is not None:
qry = partition_query
sql = database.compile_sqla_query(qry)
if indent:
sql = sqlparse.format(sql, reindent=True)
return sql
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user: bool, username: str):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user is not None and username is not None:
url.username = username
@classmethod
def get_configuration_for_impersonation(
cls, uri: str, impersonate_user: bool, username: str
) -> Dict[str, str]:
"""
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI
:param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
:return: Configs required for impersonation
"""
return {}
@classmethod
def execute(cls, cursor, query: str, **kwargs):
"""
Execute a SQL query
:param cursor: Cursor instance
:param query: Query to execute
:param kwargs: kwargs to be passed to cursor.execute()
:return:
"""
if cls.arraysize:
cursor.arraysize = cls.arraysize
cursor.execute(query)
@classmethod
def make_label_compatible(cls, label: str) -> Union[str, quoted_name]:
"""
Conditionally mutate and/or quote a sqlalchemy expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select query
and query results have same case. Otherwise return the mutated label as a
regular string. If maxmimum supported column name length is exceeded,
generate a truncated label by calling truncate_label().
:param label: expected expression label/alias
:return: conditionally mutated label supported by the db engine
"""
label_mutated = cls._mutate_label(label)
if (
cls.max_column_name_length
and len(label_mutated) > cls.max_column_name_length
):
label_mutated = cls._truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated
@classmethod
def get_sqla_column_type(cls, type_: str) -> Optional[TypeEngine]:
"""
Return a sqlalchemy native column type that corresponds to the column type
defined in the data source (return None to use default type inferred by
SQLAlchemy). Needs to be overridden if column requires special handling
(see MSSQL for example of NCHAR/NVARCHAR handling).
:param type_: Column type returned by inspector
:return: SqlAlchemy column type
"""
return None
@staticmethod
def _mutate_label(label: str) -> str:
"""
Most engines support mixed case aliases that can include numbers
and special characters, like commas, parentheses etc. For engines that
have restrictions on what types of aliases are supported, this method
can be overridden to ensure that labels conform to the engine's
limitations. Mutated labels should be deterministic (input label A always
yields output label X) and unique (input labels A and B don't yield the same
output label X).
:param label: Preferred expression label
:return: Conditionally mutated label
"""
return label
@classmethod
def _truncate_label(cls, label: str) -> str:
"""
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
the original label. By default this returns an md5 hash of the original label,
conditionally truncated if the length of the hash exceeds the max column length
of the engine.
:param label: Expected expression label
:return: Truncated label
"""
label = hashlib.md5(label.encode("utf-8")).hexdigest()
# truncate hash if it exceeds max length
if cls.max_column_name_length and len(label) > cls.max_column_name_length:
label = label[: cls.max_column_name_length]
return label
@classmethod
def column_datatype_to_string(
cls, sqla_column_type: TypeEngine, dialect: Dialect
) -> str:
"""
Convert sqlalchemy column type to string representation. Can be overridden to remove
unnecessary details, especially collation info (see mysql, mssql).
:param sqla_column_type: SqlAlchemy column type
:param dialect: Sqlalchemy dialect
:return: Compiled column type
"""
return sqla_column_type.compile(dialect=dialect).upper()
|
py
|
1a5bb9b8cae4b8f8fb72ec1cbcd6297ba99c7e5b
|
"""Test suite for audlib.io.batch."""
import os
from audlib.io.batch import lsfiles
from audlib.io.audio import audioinfo
import audlib
SAMPLEDIR = os.path.join(os.path.dirname(audlib.__file__), 'samples')
def test_lsfiles():
"""Test lsfiles."""
def longer_than_3sec(fpath):
info = audioinfo(fpath)
return (info.frames / info.samplerate) > 3.
def is_audio(fpath): return fpath.endswith(('.wav', '.sph'))
assert len(lsfiles(SAMPLEDIR, filt=is_audio, relpath=True)) == 2
assert len(lsfiles(SAMPLEDIR, filt=lambda p: is_audio(p) and
longer_than_3sec(p))) == 1
if __name__ == "__main__":
test_lsfiles()
|
py
|
1a5bba874e7f818dc84890693f3bea23ff06fd46
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import pandas as pd
from pkg_resources import resource_filename
from .utils import column_exists, fixup_columns
CENSUS2000 = resource_filename(__name__, "data/census/census_2000.csv")
CENSUS2010 = resource_filename(__name__, "data/census/census_2010.csv")
CENSUS_COLS = ['pctwhite', 'pctblack', 'pctapi', 'pctaian', 'pct2prace',
'pcthispanic']
class CensusLnData():
census_df = None
@classmethod
def census_ln(cls, df, namecol, year=2000):
"""Appends additional columns from Census data to the input DataFrame
based on the last name.
Removes extra space. Checks if the name is the Census data. If it is,
outputs data from that row.
Args:
df (:obj:`DataFrame`): Pandas DataFrame containing the last name
column.
namecol (str or int): Column's name or location of the name in
DataFrame.
year (int): The year of Census data to be used. (2000 or 2010)
(default is 2000)
Returns:
DataFrame: Pandas DataFrame with additional columns 'pctwhite',
'pctblack', 'pctapi', 'pctaian', 'pct2prace', 'pcthispanic'
"""
if namecol not in df.columns:
print("No column `{0!s}` in the DataFrame".format(namecol))
return df
df['__last_name'] = df[namecol].str.strip().str.upper()
if cls.census_df is None or cls.census_year != year:
if year == 2000:
cls.census_df = pd.read_csv(CENSUS2000, usecols=['name'] +
CENSUS_COLS)
elif year == 2010:
cls.census_df = pd.read_csv(CENSUS2010, usecols=['name'] +
CENSUS_COLS)
cls.census_df.drop(cls.census_df[cls.census_df.name.isnull()]
.index, inplace=True)
cls.census_df.columns = ['__last_name'] + CENSUS_COLS
cls.census_year = year
rdf = pd.merge(df, cls.census_df, how='left', on='__last_name')
del rdf['__last_name']
return rdf
census_ln = CensusLnData.census_ln
def main(argv=sys.argv[1:]):
title = 'Appends Census columns by last name'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('input', default=None,
help='Input file')
parser.add_argument('-y', '--year', type=int, default=2000,
choices=[2000, 2010],
help='Year of Census data (default=2000)')
parser.add_argument('-o', '--output', default='census-output.csv',
help='Output file with Census data columns')
parser.add_argument('-l', '--last', required=True,
help='Name or index location of column contains '
'the last name')
args = parser.parse_args(argv)
print(args)
if not args.last.isdigit():
df = pd.read_csv(args.input)
else:
df = pd.read_csv(args.input, header=None)
args.last = int(args.last)
if not column_exists(df, args.last):
return -1
rdf = census_ln(df, args.last, args.year)
print("Saving output to file: `{0:s}`".format(args.output))
rdf.columns = fixup_columns(rdf.columns)
rdf.to_csv(args.output, index=False)
return 0
if __name__ == "__main__":
sys.exit(main())
|
py
|
1a5bbac5e5b18342e80dd5e2022f196ba92abcec
|
from tqdm import tqdm
from PIL import Image
from models import face_extractor, get_resnet
import numpy as np
import time
import torch
def getEmbedings(aligned, names, resnet):
embbedX = list()
embbedY = list()
print ("Creating embeddings for all training images")
for im, name in tqdm(zip(aligned, names), total = len(names)):
std = im.std()
mean = im.mean()
im = (im - mean) / std
emb = resnet(im.unsqueeze(0)).detach().numpy()
embbedX.append(emb)
embbedY.append(name)
return np.array(embbedX), np.array(embbedY)
def collate_fn(x):
return x[0]
def get_face_crop(image_src, device):
mtcnn = face_extractor(device)
if isinstance(image_src, np.ndarray): # When we get it from cv2
img = Image.fromarray(image_src)
elif isinstance(image_src, torch.Tensor):
img = Image.fromarray(image_src)
else:
img = Image.open(image_src)
img = mtcnn(img)
return img
def clear_buffer(cap, frame_rate = 30):
ret = True
while ret:
t1 = time.time()
ret, _ = cap.read()
if (time.time()-t1)> 1/frame_rate:
break
|
py
|
1a5bbb63cef05ed0cf44e6ef9c35221d98587754
|
"""nox-poetry configuration file."""
from calcipy.dev.noxfile import build_check, build_dist, check_safety, check_security, coverage, tests # noqa: F401
|
py
|
1a5bbb9ac50b776ae07ec721a2e757d0518c2bac
|
pkgname = "linux-pam"
pkgver = "1.5.2"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--docdir=/usr/share/doc/pam", "--disable-nis", "--disable-audit",
"--disable-selinux", "--disable-regenerate-docu", "--disable-db",
"BUILD_CFLAGS=-Os", "BUILD_LDFLAGS=", "ac_cv_search_crypt=no"
]
hostmakedepends = ["pkgconf", "gettext-tiny"]
makedepends = ["gettext-tiny-devel", "libfl-devel", "linux-headers"]
checkdepends = ["linux-pam-base"]
depends = ["linux-pam-base"]
pkgdesc = "Pluggable Authentication Modules for Linux"
maintainer = "q66 <[email protected]>"
license = "BSD-3-Clause"
url = f"https://github.com/{pkgname}/{pkgname}"
source = f"{url}/releases/download/v{pkgver}/Linux-PAM-{pkgver}.tar.xz"
sha256 = "e4ec7131a91da44512574268f493c6d8ca105c87091691b8e9b56ca685d4f94d"
suid_files = ["usr/bin/unix_chkpwd"]
def post_install(self):
self.install_license("COPYING")
self.chmod(self.destdir / "usr/bin/unix_chkpwd", 0o4755)
self.rm(self.destdir / "usr/lib/systemd", recursive = True)
for f in ["limits.d", "namespace.d"]:
self.install_dir(f"etc/security/{f}")
(self.destdir / "etc/security" / f / ".empty").touch(mode = 0o644)
@subpackage("linux-pam-devel")
def _devel(self):
return self.default_devel(man = True, extra = ["usr/share/doc"])
@subpackage("linux-pam-libs")
def _libs(self):
return self.default_libs()
|
py
|
1a5bbc91c4d33153a20e1ecf63f951ff54f4ff85
|
#Return the middle character of the word. If the word's length is odd, return the middle character. If the word's length is even, return the middle 2 characters.
def sum_two_smallest_numbers(numbers):
st = sorted(numbers)
return st[0] + st[1]
#Alternate Solution
def sum_two_smallest_numbers(numbers):
return sum(sorted(numbers)[:2])
|
py
|
1a5bbcb242f4c4f2a2b1b14cfde5967b95149010
|
__author__ = "Jeremy Lainé"
__email__ = "[email protected]"
__license__ = "BSD"
__summary__ = "An implementation of WebRTC and ORTC"
__title__ = "aiortc"
__uri__ = "https://github.com/aiortc/aiortc"
__version__ = "1.2.0"
|
py
|
1a5bbec2f863b911dc9aee82fe047bb40f0acb63
|
"""
Module: 'ujson' on micropython-v1.18-rp2
"""
# MCU: {'family': 'micropython', 'sysname': 'rp2', 'version': '1.18.0', 'build': '', 'mpy': 5637, 'port': 'rp2', 'platform': 'rp2', 'name': 'micropython', 'arch': 'armv7m', 'machine': 'Arduino Nano RP2040 Connect with RP2040', 'nodename': 'rp2', 'ver': 'v1.18', 'release': '1.18.0'}
# Stubber: 1.5.3
from typing import Any
def dump(*args, **kwargs) -> Any:
...
def dumps(*args, **kwargs) -> Any:
...
def load(*args, **kwargs) -> Any:
...
def loads(*args, **kwargs) -> Any:
...
|
py
|
1a5bbfc2533653f6458acfc1799ecfa7244d48a3
|
"""The polling mixin for controllers (poller) helps controllers to update the view based on changes in the model"""
from julesTk.controller import Controller
__author__ = "Joeri Jongbloets <[email protected]>"
class Poller(Controller):
"""A controller that does something at a given interval"""
def __init__(self, *args, **kwargs):
super(Poller, self).__init__(*args, **kwargs)
self._interval = 1 # in seconds
self._polling = False # whether the poller is active
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, v):
self._interval = v
def is_polling(self):
return self._polling is True
def set_polling(self, state):
self._polling = state is True
def _prepare(self):
raise NotImplementedError
def _start(self):
raise NotImplementedError
def run(self):
"""Runs the poller"""
self.set_polling(True)
self._update()
def execute(self):
raise NotImplementedError
def _update(self):
if self.is_polling():
try:
self.execute()
except KeyboardInterrupt:
self.set_polling(False)
self.view.after(int(self._interval * 1000), self._update)
def _stop(self):
self.set_polling(False)
super(Poller, self)._stop()
|
py
|
1a5bbfc88320c457369f2f628fa870635cef7972
|
from typing import Dict
from typing import List
from typing import Tuple
from .incompatibility import Incompatibility
from .incompatibility_cause import ConflictCause
from .incompatibility_cause import PythonCause
class SolveFailure(Exception):
def __init__(self, incompatibility): # type: (Incompatibility) -> None
self._incompatibility = incompatibility
@property
def message(self):
return str(self)
def __str__(self):
return _Writer(self._incompatibility).write()
class _Writer:
def __init__(self, root): # type: (Incompatibility) -> None
self._root = root
self._derivations = {} # type: Dict[Incompatibility, int]
self._lines = [] # type: List[Tuple[str, int]]
self._line_numbers = {} # type: Dict[Incompatibility, int]
self._count_derivations(self._root)
def write(self):
buffer = []
required_python_version = None
for incompatibility in self._root.external_incompatibilities:
if isinstance(incompatibility.cause, PythonCause):
required_python_version = incompatibility.cause.root_python_version
break
if required_python_version is not None:
buffer.append(
"The current project must support the following Python versions: {}".format(
required_python_version
)
)
buffer.append("")
if isinstance(self._root.cause, ConflictCause):
self._visit(self._root, {})
else:
self._write(
self._root, "Because {}, version solving failed.".format(self._root)
)
padding = (
0
if not self._line_numbers
else len("({}) ".format(list(self._line_numbers.values())[-1]))
)
last_was_empty = False
for line in self._lines:
message = line[0]
if not message:
if not last_was_empty:
buffer.append("")
last_was_empty = True
continue
last_was_empty = False
number = line[-1]
if number is not None:
message = "({})".format(number).ljust(padding) + message
else:
message = " " * padding + message
buffer.append(message)
return "\n".join(buffer)
def _write(
self, incompatibility, message, numbered=False
): # type: (Incompatibility, str, bool) -> None
if numbered:
number = len(self._line_numbers) + 1
self._line_numbers[incompatibility] = number
self._lines.append((message, number))
else:
self._lines.append((message, None))
def _visit(
self, incompatibility, details_for_incompatibility, conclusion=False
): # type: (Incompatibility, Dict, bool) -> None
numbered = conclusion or self._derivations[incompatibility] > 1
conjunction = "So," if conclusion or incompatibility == self._root else "And"
incompatibility_string = str(incompatibility)
cause = incompatibility.cause # type: ConflictCause
details_for_cause = {}
if isinstance(cause.conflict.cause, ConflictCause) and isinstance(
cause.other.cause, ConflictCause
):
conflict_line = self._line_numbers.get(cause.conflict)
other_line = self._line_numbers.get(cause.other)
if conflict_line is not None and other_line is not None:
self._write(
incompatibility,
"Because {}, {}.".format(
cause.conflict.and_to_string(
cause.other, details_for_cause, conflict_line, other_line
),
incompatibility_string,
),
numbered=numbered,
)
elif conflict_line is not None or other_line is not None:
if conflict_line is not None:
with_line = cause.conflict
without_line = cause.other
line = conflict_line
else:
with_line = cause.other
without_line = cause.conflict
line = other_line
self._visit(without_line, details_for_cause)
self._write(
incompatibility,
"{} because {} ({}), {}.".format(
conjunction, str(with_line), line, incompatibility_string
),
numbered=numbered,
)
else:
single_line_conflict = self._is_single_line(cause.conflict.cause)
single_line_other = self._is_single_line(cause.other.cause)
if single_line_other or single_line_conflict:
first = cause.conflict if single_line_other else cause.other
second = cause.other if single_line_other else cause.conflict
self._visit(first, details_for_cause)
self._visit(second, details_for_cause)
self._write(
incompatibility,
"Thus, {}.".format(incompatibility_string),
numbered=numbered,
)
else:
self._visit(cause.conflict, {}, conclusion=True)
self._lines.append(("", None))
self._visit(cause.other, details_for_cause)
self._write(
incompatibility,
"{} because {} ({}), {}".format(
conjunction,
str(cause.conflict),
self._line_numbers[cause.conflict],
incompatibility_string,
),
numbered=numbered,
)
elif isinstance(cause.conflict.cause, ConflictCause) or isinstance(
cause.other.cause, ConflictCause
):
derived = (
cause.conflict
if isinstance(cause.conflict.cause, ConflictCause)
else cause.other
)
ext = (
cause.other
if isinstance(cause.conflict.cause, ConflictCause)
else cause.conflict
)
derived_line = self._line_numbers.get(derived)
if derived_line is not None:
self._write(
incompatibility,
"Because {}, {}.".format(
ext.and_to_string(
derived, details_for_cause, None, derived_line
),
incompatibility_string,
),
numbered=numbered,
)
elif self._is_collapsible(derived):
derived_cause = derived.cause # type: ConflictCause
if isinstance(derived_cause.conflict.cause, ConflictCause):
collapsed_derived = derived_cause.conflict
else:
collapsed_derived = derived_cause.other
if isinstance(derived_cause.conflict.cause, ConflictCause):
collapsed_ext = derived_cause.other
else:
collapsed_ext = derived_cause.conflict
details_for_cause = {}
self._visit(collapsed_derived, details_for_cause)
self._write(
incompatibility,
"{} because {}, {}.".format(
conjunction,
collapsed_ext.and_to_string(ext, details_for_cause, None, None),
incompatibility_string,
),
numbered=numbered,
)
else:
self._visit(derived, details_for_cause)
self._write(
incompatibility,
"{} because {}, {}.".format(
conjunction, str(ext), incompatibility_string
),
numbered=numbered,
)
else:
self._write(
incompatibility,
"Because {}, {}.".format(
cause.conflict.and_to_string(
cause.other, details_for_cause, None, None
),
incompatibility_string,
),
numbered=numbered,
)
def _is_collapsible(self, incompatibility): # type: (Incompatibility) -> bool
if self._derivations[incompatibility] > 1:
return False
cause = incompatibility.cause # type: ConflictCause
if isinstance(cause.conflict.cause, ConflictCause) and isinstance(
cause.other.cause, ConflictCause
):
return False
if not isinstance(cause.conflict.cause, ConflictCause) and not isinstance(
cause.other.cause, ConflictCause
):
return False
complex = (
cause.conflict
if isinstance(cause.conflict.cause, ConflictCause)
else cause.other
)
return complex not in self._line_numbers
def _is_single_line(self, cause): # type: (ConflictCause) -> bool
return not isinstance(cause.conflict.cause, ConflictCause) and not isinstance(
cause.other.cause, ConflictCause
)
def _count_derivations(self, incompatibility): # type: (Incompatibility) -> None
if incompatibility in self._derivations:
self._derivations[incompatibility] += 1
else:
self._derivations[incompatibility] = 1
cause = incompatibility.cause
if isinstance(cause, ConflictCause):
self._count_derivations(cause.conflict)
self._count_derivations(cause.other)
|
py
|
1a5bc2a1274ae4dd75d080a4294791497e9269ee
|
from .aot_encoder_decoder import AOTEncoderDecoder
from .decoders import (DeepFillDecoder, FBADecoder, GLDecoder, IndexedUpsample,
IndexNetDecoder, PConvDecoder, PlainDecoder,
ResGCADecoder, ResNetDec, ResShortcutDec)
from .encoders import (VGG16, DeepFillEncoder, DepthwiseIndexBlock,
FBAResnetDilated, GLEncoder, HolisticIndexBlock,
IndexNetEncoder, PConvEncoder, ResGCAEncoder, ResNetEnc,
ResShortcutEnc)
from .gl_encoder_decoder import GLEncoderDecoder
from .necks import ContextualAttentionNeck, GLDilationNeck
from .pconv_encoder_decoder import PConvEncoderDecoder
from .simple_encoder_decoder import SimpleEncoderDecoder
from .two_stage_encoder_decoder import DeepFillEncoderDecoder
__all__ = [
'GLEncoderDecoder', 'SimpleEncoderDecoder', 'VGG16', 'GLEncoder',
'PlainDecoder', 'GLDecoder', 'GLDilationNeck', 'PConvEncoderDecoder',
'PConvEncoder', 'PConvDecoder', 'ResNetEnc', 'ResNetDec', 'ResShortcutEnc',
'ResShortcutDec', 'HolisticIndexBlock', 'DepthwiseIndexBlock',
'DeepFillEncoder', 'DeepFillEncoderDecoder', 'DeepFillDecoder',
'ContextualAttentionNeck', 'IndexedUpsample', 'IndexNetEncoder',
'IndexNetDecoder', 'ResGCAEncoder', 'ResGCADecoder', 'FBAResnetDilated',
'FBADecoder', 'AOTEncoderDecoder'
]
|
py
|
1a5bc357705a34ce5d897f4ed6adbd631c12ee23
|
import argparse
import glob
import hypothesis as h
import hypothesis.workflow as w
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import papermill as pm
import shutil
from hypothesis.workflow import shell
from tqdm import tqdm
from util import simulate, coverage_of_estimator, mutual_information_of_estimator
from rej_abc import build_posterior
# Argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("--redo", action="store_true", help="Executes the workflow from scratch by removing all postconditions (default: false).")
parser.add_argument("--slurm", action="store_true", help="Executes the workflow on a Slurm-enabled HPC system (default: false).")
parser.add_argument("--test", action="store_true", help="Execute the workflow with fast hyper parameters for testing (default: false).")
arguments, _ = parser.parse_known_args()
### BEGIN Pre-workflow #########################################################
# Pipeline constants
root = os.path.dirname(os.path.abspath(__file__))
logdir = root + "/sbi-logs"
outputdir = root + "/output"
if arguments.test:
num_ensembles = 2
simulations = [2 ** n for n in range(10, 11)]
#simulations = [2 ** n for n in range(15, 16)]
credible_interval_levels = [0.9, 0.95]
else:
num_ensembles = 250
simulations = [2 ** n for n in range(10, 18)]
credible_interval_levels = [x/20 for x in range(1, 20)]
# Check if everything needs to be cleaned.
if arguments.redo:
shutil.rmtree(logdir, ignore_errors=True)
shutil.rmtree(outputdir, ignore_errors=True)
### END Pre-workflow ###########################################################
### BEGIN Workflow definition ##################################################
@w.root
def main():
# Prepare the output directory
if not os.path.exists(outputdir):
logging.info("Creating the output directory.")
os.makedirs(outputdir)
def evaluate_rej_abc(simulation_budget):
storagedir = outputdir + "/" + str(simulation_budget)
@w.dependency(main)
@w.postcondition(w.at_least_num_files(storagedir + "/run-*/posterior.pkl", num_ensembles))
@w.slurm.cpu_and_memory(4, "4g")
@w.slurm.timelimit("36:00:00")
@w.tasks(num_ensembles)
def train_rej_abc(task_index):
resultdir = storagedir + "/run-" + str(task_index).zfill(5)
os.makedirs(resultdir, exist_ok=True)
if not os.path.exists(os.path.join(resultdir, "posterior.pkl")):
logging.info("Training posterior estimator ({index} / {n}) for the Weinberg problem.".format(index=task_index + 1, n=num_ensembles))
logging.info("Using the following hyper parameters:")
logging.info(" - simulations : " + str(simulation_budget))
build_posterior(simulation_budget, resultdir, task_index, num_workers=4)
@w.dependency(train_rej_abc)
@w.postcondition(w.exists(storagedir + "/coverage.npy"))
@w.slurm.cpu_and_memory(1, "4g")
@w.slurm.timelimit("12:00:00")
def coverage():
if not os.path.exists(storagedir + "/coverage.npy"):
query = storagedir + "/run-*/"
coverage = coverage_of_estimator(query, num_ensembles, cl_list=credible_interval_levels)
np.save(storagedir + "/coverage.npy", coverage)
@w.dependency(train_rej_abc)
@w.postcondition(w.exists(storagedir + "/mutual_information.npy"))
@w.slurm.cpu_and_memory(1, "4g")
@w.slurm.timelimit("12:00:00")
def mutual_information():
if not os.path.exists(storagedir + "/mutual_information.npy"):
query = storagedir + "/run-*/"
mutual_information = mutual_information_of_estimator(query, num_ensembles)
np.save(storagedir + "/mutual_information.npy", mutual_information)
for simulation_budget in simulations:
evaluate_rej_abc(simulation_budget)
### END Workflow definition ####################################################
# Execute the workflow
if __name__ == "__main__":
if arguments.slurm:
w.slurm.execute(directory=root)
else:
w.local.execute()
|
py
|
1a5bc46199c79d3f4e8ee17036f1f2b44a309520
|
import logging
def validateHeartRate(input, patientDict):
"""
Validates patient hr input, checking that fields are proper and exist
:returns: -1 if not successful, 1 if successful
"""
if(not isinstance(input, type({}))):
logging.error("input not type dict")
return -1
if "patient_id" not in input.keys():
logging.error("missing patient id")
return -1
if "heart_rate" not in input.keys():
logging.error("missing heart rate")
return -1
if input["patient_id"] not in patientDict.keys():
logging.error("patient not initialized")
return -1
try:
if(float(input["heart_rate"]) < 0):
logging.error("invalid hr")
return -1
except:
logging.error("non numeric hr")
return -1
return 1
|
py
|
1a5bc5a8eae4dafd370751aeb3aa3ecebe161301
|
from .named_tup import NamedTup
DeltaOverwrite = NamedTup('DeltaOverwrite', 'overwrite new_value')
|
py
|
1a5bc6a9793ea0612e1d9a85ee69d0568f63e4f8
|
# Copyright 2022 Ben Kehoe
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from aws_sso_util.cfn_lib import macro
handler = macro.handler
|
py
|
1a5bc960cea707bae4a2d70c424b9b54b09ef355
|
# -*- coding: utf-8 -*-
"""
Datas
https://CloudEngine.com/
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import unicode_literals
from django.db import models
from django.template.defaultfilters import slugify as djslugify
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
import hashlib, random
from channels.models import Channel
class Data(models.Model):
"""
"""
owner = models.ForeignKey('auth.User', related_name='ownerdata', on_delete=models.CASCADE)
channel = models.ForeignKey(Channel, related_name='channeldata', on_delete=models.CASCADE)
value_1 = models.CharField(_('Value 1'), max_length=10, null=True, blank=False)
value_2 = models.CharField(_('Value 2'), max_length=10, null=True, blank=False)
value_3 = models.CharField(_('Value 3'), max_length=10, null=True, blank=False)
value_4 = models.CharField(_('Value 4'), max_length=10, null=True, blank=False)
value_5 = models.CharField(_('Value 5'), max_length=10, null=True, blank=False)
value_6 = models.CharField(_('Value 6'), max_length=10, null=True, blank=False)
value_7 = models.CharField(_('Value 7'), max_length=10, null=True, blank=False)
value_8 = models.CharField(_('Value 8'), max_length=10, null=True, blank=False)
value_9 = models.CharField(_('Value 9'), max_length=10, null=True, blank=False)
value_10 = models.CharField(_('Value 10'), max_length=10, null=True, blank=False)
enable = models.BooleanField(_('Is Active'), default=True)
remote_address = models.CharField(_('Ip address'), max_length=255)
pub_date = models.DateTimeField(_('Published date'), auto_now=True)
def __str__(self):
return self.channel.channel_name
|
py
|
1a5bc966e0fcf4c8345d0213972f58fb179a4d30
|
from annotation_tools.annotation_tools import app
if __name__ == '__main__':
app.run()
|
py
|
1a5bcb8d0af756c384b491bed74bbba38cf84266
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements input and output processing from Gaussian.
"""
import re
import warnings
import numpy as np
import scipy.constants as cst
from monty.io import zopen
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Molecule
from pymatgen.core.units import Ha_to_eV
from pymatgen.electronic_structure.core import Spin
from pymatgen.util.coord import get_angle
__author__ = "Shyue Ping Ong, Germain Salvato-Vallverdu, Xin Chen"
__copyright__ = "Copyright 2013, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "8/1/15"
float_patt = re.compile(r"\s*([+-]?\d+\.\d+)")
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile(r"^([sS][cC][rR][fF])\s*=\s*(.+)")
multi_params_patt = re.compile(r"^([A-z]+[0-9]*)[\s=]+\((.*)\)$")
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if "/" in route:
tok = route.split("/")
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, "/"]:
route = route.replace(tok, "")
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2)
elif tok.upper() in ["#", "#N", "#P", "#T"]:
# does not store # in route to avoid error in input
if tok == "#":
dieze_tag = "#N"
else:
dieze_tag = tok
continue
else:
m = re.match(multi_params_patt, tok.strip("#"))
if m:
pars = {}
for par in m.group(2).split(","):
p = par.split("=")
pars[p[0]] = None if len(p) == 1 else p[1]
route_params[m.group(1)] = pars
else:
d = tok.strip("#").split("=")
route_params[d[0]] = None if len(d) == 1 else d[1]
return functional, basis_set, route_params, dieze_tag
class GaussianInput:
"""
An object representing a Gaussian input file.
"""
# Commonly used regex patterns
_zmat_patt = re.compile(r"^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
_xyz_patt = re.compile(r"^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+" r"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(
self,
mol,
charge=None,
spin_multiplicity=None,
title=None,
functional="HF",
basis_set="6-31G(d)",
route_parameters=None,
input_parameters=None,
link0_parameters=None,
dieze_tag="#P",
gen_basis=None,
):
"""
Args:
mol: Input molecule. It can either be a Molecule object,
a string giving the geometry in a format supported by Gaussian,
or ``None``. If the molecule is ``None``, you will need to use
read it in from a checkpoint. Consider adding ``CHK`` to the
``link0_parameters``.
charge: Charge of the molecule. If None, charge on molecule is used.
Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
If ``mol`` is not a Molecule object, then you must specify a charge.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons. If ``mol`` is not a Molecule object, then you
must specify the multiplicity
title: Title for run. Defaults to formula of molecule if None.
functional: Functional for run.
basis_set: Basis set for run.
route_parameters: Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
input_parameters: Additional input parameters for run as a dict. Used
for example, in PCM calculations. E.g., {"EPS":12}
link0_parameters: Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
dieze_tag: # preceding the route line. E.g. "#p"
gen_basis: allows a user-specified basis set to be used in a Gaussian
calculation. If this is not None, the attribute ``basis_set`` will
be set to "Gen".
"""
self._mol = mol
# Determine multiplicity and charge settings
if isinstance(mol, Molecule):
self.charge = charge if charge is not None else mol.charge
nelectrons = mol.charge + mol.nelectrons - self.charge
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(self.charge, spin_multiplicity)
)
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
# Get a title from the molecule name
self.title = title if title else self._mol.composition.formula
else:
self.charge = charge
self.spin_multiplicity = spin_multiplicity
# Set a title
self.title = title if title else "Restart"
# Store the remaining settings
self.functional = functional
self.basis_set = basis_set
self.link0_parameters = link0_parameters if link0_parameters else {}
self.route_parameters = route_parameters if route_parameters else {}
self.input_parameters = input_parameters if input_parameters else {}
self.dieze_tag = dieze_tag if dieze_tag[0] == "#" else "#" + dieze_tag
self.gen_basis = gen_basis
if gen_basis is not None:
self.basis_set = "Gen"
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
@staticmethod
def _parse_coords(coord_lines):
"""
Helper method to parse coordinates.
"""
paras = {}
var_pattern = re.compile(r"^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1).strip("=")] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and GaussianInput._xyz_patt.match(l):
m = GaussianInput._xyz_patt.match(l)
species.append(m.group(1))
toks = re.split(r"[,\s]+", l.strip())
if len(toks) > 4:
coords.append([float(i) for i in toks[2:5]])
else:
coords.append([float(i) for i in toks[1:4]])
elif GaussianInput._zmat_patt.match(l):
zmode = True
toks = re.split(r"[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0, 0, 0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array([0, 0, parameters[0]]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis, angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def _parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub(r"\d", "", sp_str)
return sp.capitalize()
species = [_parse_species(sp) for sp in species]
return Molecule(species, coords)
@staticmethod
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1).strip("=")] = m.group(2)
route_patt = re.compile(r"^#[sSpPnN]*.*")
route = ""
route_index = None
for i, l in enumerate(lines):
if route_patt.match(l):
route += " " + l
route_index = i
# This condition allows for route cards spanning multiple lines
elif (l == "" or l.isspace()) and route_index:
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = " ".join(title)
ind += 1
toks = re.split(r"[,\s]+", lines[route_index + ind])
charge = int(float(toks[0]))
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput._parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(
mol,
charge=charge,
spin_multiplicity=spin_mult,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_paras,
input_parameters=input_paras,
link0_parameters=link0_dict,
dieze_tag=dieze_tag,
)
@staticmethod
def from_file(filename):
"""
Creates GaussianInput from a file.
Args:
filename: Gaussian input filename
Returns:
GaussianInput object
"""
with zopen(filename, "r") as f:
return GaussianInput.from_string(f.read())
def _find_nn_pos_before_site(self, siteindex):
"""
Returns index of nearest neighbor atoms.
"""
alldist = [(self._mol.get_distance(siteindex, i), i) for i in range(siteindex)]
alldist = sorted(alldist, key=lambda x: x[0])
return [d[1] for d in alldist]
def get_zmatrix(self):
"""
Returns a z-matrix representation of the molecule.
"""
output = []
outputvar = []
for i, site in enumerate(self._mol):
if i == 0:
output.append(f"{site.specie}")
elif i == 1:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
elif i == 2:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i} {nn[1] + 1} A{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
outputvar.append(f"A{i}={angle:.6f}")
else:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2])
output.append(f"{self._mol[i].specie} {nn[0] + 1} B{i} {nn[1] + 1} A{i} {nn[2] + 1} D{i}")
outputvar.append(f"B{i}={bondlength:.6f}")
outputvar.append(f"A{i}={angle:.6f}")
outputvar.append(f"D{i}={dih:.6f}")
return "\n".join(output) + "\n\n" + "\n".join(outputvar)
def get_cart_coords(self):
"""
Return the cartesian coordinates of the molecule
"""
def to_s(x):
return f"{x:0.6f}"
outs = []
for i, site in enumerate(self._mol):
outs.append(" ".join([site.species_string, " ".join([to_s(j) for j in site.coords])]))
return "\n".join(outs)
def __str__(self):
return self.to_string()
def to_string(self, cart_coords=False):
"""
Return GaussianInput string
Option: when cart_coords is set to True return the cartesian coordinates
instead of the z-matrix
"""
def para_dict_to_string(para, joiner=" "):
para_str = []
# sorted is only done to make unittests work reliably
for par, val in sorted(para.items()):
if val is None or val == "":
para_str.append(par)
elif isinstance(val, dict):
val_str = para_dict_to_string(val, joiner=",")
para_str.append(f"{par}=({val_str})")
else:
para_str.append(f"{par}={val}")
return joiner.join(para_str)
output = []
if self.link0_parameters:
output.append(para_dict_to_string(self.link0_parameters, "\n"))
# Handle functional or basis set set to None, empty string or whitespace
func_str = "" if self.functional is None else self.functional.strip()
bset_str = "" if self.basis_set is None else self.basis_set.strip()
if func_str != "" and bset_str != "":
func_bset_str = f" {func_str}/{bset_str}"
else:
# don't use the slash if either or both are set as empty
func_bset_str = f" {func_str}{bset_str}".rstrip()
output.append(f"{self.dieze_tag}{func_bset_str} {para_dict_to_string(self.route_parameters)}")
output.append("")
output.append(self.title)
output.append("")
charge_str = "" if self.charge is None else f"{self.charge:.0f}"
multip_str = "" if self.spin_multiplicity is None else f" {self.spin_multiplicity:.0f}"
output.append(f"{charge_str}{multip_str}")
if isinstance(self._mol, Molecule):
if cart_coords is True:
output.append(self.get_cart_coords())
else:
output.append(self.get_zmatrix())
elif self._mol is not None:
output.append(str(self._mol))
output.append("")
if self.gen_basis is not None:
output.append(f"{self.gen_basis}\n")
output.append(para_dict_to_string(self.input_parameters, "\n"))
output.append("\n")
return "\n".join(output)
def write_file(self, filename, cart_coords=False):
"""
Write the input string into a file
Option: see __str__ method
"""
with zopen(filename, "w") as f:
f.write(self.to_string(cart_coords))
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"functional": self.functional,
"basis_set": self.basis_set,
"route_parameters": self.route_parameters,
"title": self.title,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"input_parameters": self.input_parameters,
"link0_parameters": self.link0_parameters,
"dieze_tag": self.dieze_tag,
}
@classmethod
def from_dict(cls, d):
"""
:param d: dict
:return: GaussianInput
"""
return GaussianInput(
mol=Molecule.from_dict(d["molecule"]),
functional=d["functional"],
basis_set=d["basis_set"],
route_parameters=d["route_parameters"],
title=d["title"],
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
input_parameters=d["input_parameters"],
link0_parameters=d["link0_parameters"],
)
class GaussianOutput:
"""
Parser for Gaussian output files.
.. note::
Still in early beta.
Attributes:
.. attribute:: structures
All structures from the calculation in the standard orientation. If the
symmetry is not considered, the standard orientation is not printed out
and the input orientation is used instead. Check the `standard_orientation`
attribute.
.. attribute:: structures_input_orientation
All structures from the calculation in the input orientation or the
Z-matrix orientation (if an opt=z-matrix was requested).
.. attribute:: opt_structures
All optimized structures from the calculation in the standard orientation,
if the attribute 'standard_orientation' is True, otherwise in the input
or the Z-matrix orientation.
.. attribute:: energies
All energies from the calculation.
.. attribute:: eigenvalues
List of eigenvalues for the last geometry
.. attribute:: MO_coefficients
Matrix of MO coefficients for the last geometry
.. attribute:: cart_forces
All cartesian forces from the calculation.
.. attribute:: frequencies
A list for each freq calculation and for each mode of a dict with
{
"frequency": freq in cm-1,
"symmetry": symmetry tag
"r_mass": Reduce mass,
"f_constant": force constant,
"IR_intensity": IR Intensity,
"mode": normal mode
}
The normal mode is a 1D vector of dx, dy dz of each atom.
.. attribute:: hessian
Matrix of second derivatives of the energy with respect to cartesian
coordinates in the **input orientation** frame. Need #P in the
route section in order to be in the output.
.. attribute:: properly_terminated
True if run has properly terminated
.. attribute:: is_pcm
True if run is a PCM run.
.. attribute:: is_spin
True if it is an unrestricted run
.. attribute:: stationary_type
If it is a relaxation run, indicates whether it is a minimum (Minimum)
or a saddle point ("Saddle").
.. attribute:: corrections
Thermochemical corrections if this run is a Freq run as a dict. Keys
are "Zero-point", "Thermal", "Enthalpy" and "Gibbs Free Energy"
.. attribute:: functional
Functional used in the run.
.. attribute:: basis_set
Basis set used in the run
.. attribute:: route
Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
.. attribute:: dieze_tag
# preceding the route line, e.g. "#P"
.. attribute:: link0
Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
.. attribute:: charge
Charge for structure
.. attribute:: spin_multiplicity
Spin multiplicity for structure
.. attribute:: num_basis_func
Number of basis functions in the run.
.. attribute:: electrons
number of alpha and beta electrons as (N alpha, N beta)
.. attribute:: pcm
PCM parameters and output if available.
.. attribute:: errors
error if not properly terminated (list to be completed in error_defs)
.. attribute:: Mulliken_charges
Mulliken atomic charges
.. attribute:: eigenvectors
Matrix of shape (num_basis_func, num_basis_func). Each column is an
eigenvectors and contains AO coefficients of an MO.
eigenvectors[Spin] = mat(num_basis_func, num_basis_func)
.. attribute:: molecular_orbital
MO development coefficients on AO in a more convenient array dict
for each atom and basis set label.
mo[Spin][OM j][atom i] = {AO_k: coeff, AO_k: coeff ... }
.. attribute:: atom_basis_labels
Labels of AO for each atoms. These labels are those used in the output
of molecular orbital coefficients (POP=Full) and in the
molecular_orbital array dict.
atom_basis_labels[iatom] = [AO_k, AO_k, ...]
.. attribute:: resumes
List of gaussian data resume given at the end of the output file before
the quotation. The resumes are given as string.
.. attribute:: title
Title of the gaussian run.
.. attribute:: standard_orientation
If True, the geometries stored in the structures are in the standard
orientation. Else, the geometries are in the input orientation.
.. attribute:: bond_orders
Dict of bond order values read in the output file such as:
{(0, 1): 0.8709, (1, 6): 1.234, ...}
The keys are the atom indexes and the values are the Wiberg bond indexes
that are printed using `pop=NBOREAD` and `$nbo bndidx $end`.
Methods:
.. method:: to_input()
Return a GaussianInput object using the last geometry and the same
calculation parameters.
.. method:: read_scan()
Read a potential energy surface from a gaussian scan calculation.
.. method:: get_scan_plot()
Get a matplotlib plot of the potential energy surface
.. method:: save_scan_plot()
Save a matplotlib plot of the potential energy surface to a file
"""
def __init__(self, filename):
"""
Args:
filename: Filename of Gaussian output file.
"""
self.filename = filename
self._parse(filename)
@property
def final_energy(self):
"""
:return: Final energy in Gaussian output.
"""
return self.energies[-1]
@property
def final_structure(self):
"""
:return: Final structure in Gaussian output.
"""
return self.structures[-1]
def _parse(self, filename):
start_patt = re.compile(r" \(Enter \S+l101\.exe\)")
route_patt = re.compile(r" #[pPnNtT]*.*")
link0_patt = re.compile(r"^\s(%.+)\s*=\s*(.+)")
charge_mul_patt = re.compile(r"Charge\s+=\s*([-\d]+)\s+" r"Multiplicity\s+=\s*(\d+)")
num_basis_func_patt = re.compile(r"([0-9]+)\s+basis functions")
num_elec_patt = re.compile(r"(\d+)\s+alpha electrons\s+(\d+)\s+beta electrons")
pcm_patt = re.compile(r"Polarizable Continuum Model")
stat_type_patt = re.compile(r"imaginary frequencies")
scf_patt = re.compile(r"E\(.*\)\s*=\s*([-\.\d]+)\s+")
mp2_patt = re.compile(r"EUMP2\s*=\s*(.*)")
oniom_patt = re.compile(r"ONIOM:\s+extrapolated energy\s*=\s*(.*)")
termination_patt = re.compile(r"(Normal|Error) termination")
error_patt = re.compile(r"(! Non-Optimized Parameters !|Convergence failure)")
mulliken_patt = re.compile(r"^\s*(Mulliken charges|Mulliken atomic charges)")
mulliken_charge_patt = re.compile(r"^\s+(\d+)\s+([A-Z][a-z]?)\s*(\S*)")
end_mulliken_patt = re.compile(r"(Sum of Mulliken )(.*)(charges)\s*=\s*(\D)")
std_orientation_patt = re.compile(r"Standard orientation")
input_orientation_patt = re.compile(r"Input orientation|Z-Matrix orientation")
orbital_patt = re.compile(r"(Alpha|Beta)\s*\S+\s*eigenvalues --(.*)")
thermo_patt = re.compile(r"(Zero-point|Thermal) correction(.*)=" r"\s+([\d\.-]+)")
forces_on_patt = re.compile(r"Center\s+Atomic\s+Forces\s+\(Hartrees/Bohr\)")
forces_off_patt = re.compile(r"Cartesian\s+Forces:\s+Max.*RMS.*")
forces_patt = re.compile(r"\s+(\d+)\s+(\d+)\s+([0-9\.-]+)\s+([0-9\.-]+)\s+([0-9\.-]+)")
freq_on_patt = re.compile(r"Harmonic\sfrequencies\s+\(cm\*\*-1\),\sIR\sintensities.*Raman.*")
normal_mode_patt = re.compile(r"\s+(\d+)\s+(\d+)\s+([0-9\.-]{4,5})\s+([0-9\.-]{4,5}).*")
mo_coeff_patt = re.compile(r"Molecular Orbital Coefficients:")
mo_coeff_name_patt = re.compile(r"\d+\s((\d+|\s+)\s+([a-zA-Z]{1,2}|\s+))\s+(\d+\S+)")
hessian_patt = re.compile(r"Force constants in Cartesian coordinates:")
resume_patt = re.compile(r"^\s1\\1\\GINC-\S*")
resume_end_patt = re.compile(r"^\s.*\\\\@")
bond_order_patt = re.compile(r"Wiberg bond index matrix in the NAO basis:")
self.properly_terminated = False
self.is_pcm = False
self.stationary_type = "Minimum"
self.corrections = {}
self.energies = []
self.pcm = None
self.errors = []
self.Mulliken_charges = {}
self.link0 = {}
self.cart_forces = []
self.frequencies = []
self.eigenvalues = []
self.is_spin = False
self.hessian = None
self.resumes = []
self.title = None
self.bond_orders = {}
read_coord = 0
read_mulliken = False
read_eigen = False
eigen_txt = []
parse_stage = 0
num_basis_found = False
terminated = False
parse_forces = False
forces = []
parse_freq = False
frequencies = []
read_mo = False
parse_hessian = False
routeline = ""
standard_orientation = False
parse_bond_order = False
input_structures = []
std_structures = []
geom_orientation = None
opt_structures = []
with zopen(filename) as f:
for line in f:
if parse_stage == 0:
if start_patt.search(line):
parse_stage = 1
elif link0_patt.match(line):
m = link0_patt.match(line)
self.link0[m.group(1)] = m.group(2)
elif route_patt.search(line) or routeline != "":
if set(line.strip()) == {"-"}:
params = read_route_line(routeline)
self.functional = params[0]
self.basis_set = params[1]
self.route_parameters = params[2]
route_lower = {k.lower(): v for k, v in self.route_parameters.items()}
self.dieze_tag = params[3]
parse_stage = 1
else:
routeline += line.strip()
elif parse_stage == 1:
if set(line.strip()) == {"-"} and self.title is None:
self.title = ""
elif self.title == "":
self.title = line.strip()
elif charge_mul_patt.search(line):
m = charge_mul_patt.search(line)
self.charge = int(m.group(1))
self.spin_multiplicity = int(m.group(2))
parse_stage = 2
elif parse_stage == 2:
if self.is_pcm:
self._check_pcm(line)
if "freq" in route_lower and thermo_patt.search(line):
m = thermo_patt.search(line)
if m.group(1) == "Zero-point":
self.corrections["Zero-point"] = float(m.group(3))
else:
key = m.group(2).strip(" to ")
self.corrections[key] = float(m.group(3))
if read_coord:
[f.readline() for i in range(3)]
line = f.readline()
sp = []
coords = []
while set(line.strip()) != {"-"}:
toks = line.split()
sp.append(Element.from_Z(int(toks[1])))
coords.append([float(x) for x in toks[3:6]])
line = f.readline()
read_coord = False
if geom_orientation == "input":
input_structures.append(Molecule(sp, coords))
elif geom_orientation == "standard":
std_structures.append(Molecule(sp, coords))
if parse_forces:
m = forces_patt.search(line)
if m:
forces.extend([float(_v) for _v in m.groups()[2:5]])
elif forces_off_patt.search(line):
self.cart_forces.append(forces)
forces = []
parse_forces = False
# read molecular orbital eigenvalues
if read_eigen:
m = orbital_patt.search(line)
if m:
eigen_txt.append(line)
else:
read_eigen = False
self.eigenvalues = {Spin.up: []}
for eigenline in eigen_txt:
if "Alpha" in eigenline:
self.eigenvalues[Spin.up] += [float(e) for e in float_patt.findall(eigenline)]
elif "Beta" in eigenline:
if Spin.down not in self.eigenvalues:
self.eigenvalues[Spin.down] = []
self.eigenvalues[Spin.down] += [float(e) for e in float_patt.findall(eigenline)]
eigen_txt = []
# read molecular orbital coefficients
if (not num_basis_found) and num_basis_func_patt.search(line):
m = num_basis_func_patt.search(line)
self.num_basis_func = int(m.group(1))
num_basis_found = True
elif read_mo:
# build a matrix with all coefficients
all_spin = [Spin.up]
if self.is_spin:
all_spin.append(Spin.down)
mat_mo = {}
for spin in all_spin:
mat_mo[spin] = np.zeros((self.num_basis_func, self.num_basis_func))
nMO = 0
end_mo = False
while nMO < self.num_basis_func and not end_mo:
f.readline()
f.readline()
self.atom_basis_labels = []
for i in range(self.num_basis_func):
line = f.readline()
# identify atom and OA labels
m = mo_coeff_name_patt.search(line)
if m.group(1).strip() != "":
iat = int(m.group(2)) - 1
# atname = m.group(3)
self.atom_basis_labels.append([m.group(4)])
else:
self.atom_basis_labels[iat].append(m.group(4))
# MO coefficients
coeffs = [float(c) for c in float_patt.findall(line)]
for j, c in enumerate(coeffs):
mat_mo[spin][i, nMO + j] = c
nMO += len(coeffs)
line = f.readline()
# manage pop=regular case (not all MO)
if nMO < self.num_basis_func and (
"Density Matrix:" in line or mo_coeff_patt.search(line)
):
end_mo = True
warnings.warn("POP=regular case, matrix coefficients not complete")
f.readline()
self.eigenvectors = mat_mo
read_mo = False
# build a more convenient array dict with MO
# coefficient of each atom in each MO.
# mo[Spin][OM j][atom i] =
# {AO_k: coeff, AO_k: coeff ... }
mo = {}
for spin in all_spin:
mo[spin] = [
[{} for iat in range(len(self.atom_basis_labels))] for j in range(self.num_basis_func)
]
for j in range(self.num_basis_func):
i = 0
for iat, labels in enumerate(self.atom_basis_labels):
for label in labels:
mo[spin][j][iat][label] = self.eigenvectors[spin][i, j]
i += 1
self.molecular_orbital = mo
elif parse_freq:
while line.strip() != "": # blank line
ifreqs = [int(val) - 1 for val in line.split()]
for ifreq in ifreqs:
frequencies.append(
{
"frequency": None,
"r_mass": None,
"f_constant": None,
"IR_intensity": None,
"symmetry": None,
"mode": [],
}
)
# read freq, intensity, masses, symmetry ...
while "Atom AN" not in line:
if "Frequencies --" in line:
freqs = map(float, float_patt.findall(line))
for ifreq, freq in zip(ifreqs, freqs):
frequencies[ifreq]["frequency"] = freq
elif "Red. masses --" in line:
r_masses = map(float, float_patt.findall(line))
for ifreq, r_mass in zip(ifreqs, r_masses):
frequencies[ifreq]["r_mass"] = r_mass
elif "Frc consts --" in line:
f_consts = map(float, float_patt.findall(line))
for ifreq, f_const in zip(ifreqs, f_consts):
frequencies[ifreq]["f_constant"] = f_const
elif "IR Inten --" in line:
IR_intens = map(float, float_patt.findall(line))
for ifreq, intens in zip(ifreqs, IR_intens):
frequencies[ifreq]["IR_intensity"] = intens
else:
syms = line.split()[:3]
for ifreq, sym in zip(ifreqs, syms):
frequencies[ifreq]["symmetry"] = sym
line = f.readline()
# read normal modes
line = f.readline()
while normal_mode_patt.search(line):
values = list(map(float, float_patt.findall(line)))
for i, ifreq in zip(range(0, len(values), 3), ifreqs):
frequencies[ifreq]["mode"].extend(values[i : i + 3])
line = f.readline()
parse_freq = False
self.frequencies.append(frequencies)
frequencies = []
elif parse_hessian:
# read Hessian matrix under "Force constants in Cartesian coordinates"
# Hessian matrix is in the input orientation framework
# WARNING : need #P in the route line
parse_hessian = False
ndf = 3 * len(input_structures[0])
self.hessian = np.zeros((ndf, ndf))
j_indices = range(5)
jndf = 0
while jndf < ndf:
for i in range(jndf, ndf):
line = f.readline()
vals = re.findall(r"\s*([+-]?\d+\.\d+[eEdD]?[+-]\d+)", line)
vals = [float(val.replace("D", "E")) for val in vals]
for jval, val in enumerate(vals):
j = j_indices[jval]
self.hessian[i, j] = val
self.hessian[j, i] = val
jndf += len(vals)
line = f.readline()
j_indices = [j + 5 for j in j_indices]
elif parse_bond_order:
# parse Wiberg bond order
line = f.readline()
line = f.readline()
nat = len(input_structures[0])
matrix = []
for iat in range(nat):
line = f.readline()
matrix.append([float(v) for v in line.split()[2:]])
self.bond_orders = {}
for iat in range(nat):
for jat in range(iat + 1, nat):
self.bond_orders[(iat, jat)] = matrix[iat][jat]
parse_bond_order = False
elif termination_patt.search(line):
m = termination_patt.search(line)
if m.group(1) == "Normal":
self.properly_terminated = True
terminated = True
elif error_patt.search(line):
error_defs = {
"! Non-Optimized Parameters !": "Optimization error",
"Convergence failure": "SCF convergence error",
}
m = error_patt.search(line)
self.errors.append(error_defs[m.group(1)])
elif num_elec_patt.search(line):
m = num_elec_patt.search(line)
self.electrons = (int(m.group(1)), int(m.group(2)))
elif (not self.is_pcm) and pcm_patt.search(line):
self.is_pcm = True
self.pcm = {}
elif "freq" in route_lower and "opt" in route_lower and stat_type_patt.search(line):
self.stationary_type = "Saddle"
elif mp2_patt.search(line):
m = mp2_patt.search(line)
self.energies.append(float(m.group(1).replace("D", "E")))
elif oniom_patt.search(line):
m = oniom_patt.matcher(line)
self.energies.append(float(m.group(1)))
elif scf_patt.search(line):
m = scf_patt.search(line)
self.energies.append(float(m.group(1)))
elif std_orientation_patt.search(line):
standard_orientation = True
geom_orientation = "standard"
read_coord = True
elif input_orientation_patt.search(line):
geom_orientation = "input"
read_coord = True
elif "Optimization completed." in line:
line = f.readline()
if " -- Stationary point found." not in line:
warnings.warn(
"\n" + self.filename + ": Optimization complete but this is not a stationary point"
)
if standard_orientation:
opt_structures.append(std_structures[-1])
else:
opt_structures.append(input_structures[-1])
elif not read_eigen and orbital_patt.search(line):
eigen_txt.append(line)
read_eigen = True
elif mulliken_patt.search(line):
mulliken_txt = []
read_mulliken = True
elif not parse_forces and forces_on_patt.search(line):
parse_forces = True
elif freq_on_patt.search(line):
parse_freq = True
[f.readline() for i in range(3)]
elif mo_coeff_patt.search(line):
if "Alpha" in line:
self.is_spin = True
read_mo = True
elif hessian_patt.search(line):
parse_hessian = True
elif resume_patt.search(line):
resume = []
while not resume_end_patt.search(line):
resume.append(line)
line = f.readline()
# security if \\@ not in one line !
if line == "\n":
break
resume.append(line)
resume = "".join([r.strip() for r in resume])
self.resumes.append(resume)
elif bond_order_patt.search(line):
parse_bond_order = True
if read_mulliken:
if not end_mulliken_patt.search(line):
mulliken_txt.append(line)
else:
m = end_mulliken_patt.search(line)
mulliken_charges = {}
for line in mulliken_txt:
if mulliken_charge_patt.search(line):
m = mulliken_charge_patt.search(line)
dic = {int(m.group(1)): [m.group(2), float(m.group(3))]}
mulliken_charges.update(dic)
read_mulliken = False
self.Mulliken_charges = mulliken_charges
# store the structures. If symmetry is considered, the standard orientation
# is used. Else the input orientation is used.
if standard_orientation:
self.structures = std_structures
self.structures_input_orientation = input_structures
else:
self.structures = input_structures
self.structures_input_orientation = input_structures
# store optimized structure in input orientation
self.opt_structures = opt_structures
if not terminated:
warnings.warn("\n" + self.filename + ": Termination error or bad Gaussian output file !")
def _check_pcm(self, line):
energy_patt = re.compile(r"(Dispersion|Cavitation|Repulsion) energy" r"\s+\S+\s+=\s+(\S*)")
total_patt = re.compile(r"with all non electrostatic terms\s+\S+\s+" r"=\s+(\S*)")
parameter_patt = re.compile(r"(Eps|Numeral density|RSolv|Eps" r"\(inf[inity]*\))\s+=\s*(\S*)")
if energy_patt.search(line):
m = energy_patt.search(line)
self.pcm[f"{m.group(1)} energy"] = float(m.group(2))
elif total_patt.search(line):
m = total_patt.search(line)
self.pcm["Total energy"] = float(m.group(1))
elif parameter_patt.search(line):
m = parameter_patt.search(line)
self.pcm[m.group(1)] = float(m.group(2))
def as_dict(self):
"""
Json-serializable dict representation.
"""
structure = self.final_structure
d = {
"has_gaussian_completed": self.properly_terminated,
"nsites": len(structure),
}
comp = structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_pcm"] = self.is_pcm
d["errors"] = self.errors
d["Mulliken_charges"] = self.Mulliken_charges
unique_symbols = sorted(list(d["unit_cell_formula"].keys()))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["charge"] = self.charge
d["spin_multiplicity"] = self.spin_multiplicity
vin = {
"route": self.route_parameters,
"functional": self.functional,
"basis_set": self.basis_set,
"nbasisfunctions": self.num_basis_func,
"pcm_parameters": self.pcm,
}
d["input"] = vin
nsites = len(self.final_structure)
vout = {
"energies": self.energies,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"molecule": structure.as_dict(),
"stationary_type": self.stationary_type,
"corrections": self.corrections,
}
d["output"] = vout
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
def read_scan(self):
"""
Read a potential energy surface from a gaussian scan calculation.
Returns:
A dict: {"energies": [ values ],
"coords": {"d1": [ values ], "A2", [ values ], ... }}
"energies" are the energies of all points of the potential energy
surface. "coords" are the internal coordinates used to compute the
potential energy surface and the internal coordinates optimized,
labelled by their name as defined in the calculation.
"""
def floatList(l):
"""return a list of float from a list of string"""
return [float(v) for v in l]
scan_patt = re.compile(r"^\sSummary of the potential surface scan:")
optscan_patt = re.compile(r"^\sSummary of Optimized Potential Surface Scan")
coord_patt = re.compile(r"^\s*(\w+)((\s*[+-]?\d+\.\d+)+)")
# data dict return
data = {"energies": [], "coords": {}}
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
while line != "":
if optscan_patt.match(line):
f.readline()
line = f.readline()
endScan = False
while not endScan:
data["energies"] += floatList(float_patt.findall(line))
line = f.readline()
while coord_patt.match(line):
icname = line.split()[0].strip()
if icname in data["coords"]:
data["coords"][icname] += floatList(float_patt.findall(line))
else:
data["coords"][icname] = floatList(float_patt.findall(line))
line = f.readline()
if not re.search(r"^\s+((\s*\d+)+)", line):
endScan = True
else:
line = f.readline()
elif scan_patt.match(line):
line = f.readline()
data["coords"] = {icname: [] for icname in line.split()[1:-1]}
f.readline()
line = f.readline()
while not re.search(r"^\s-+", line):
values = floatList(line.split())
data["energies"].append(values[-1])
for i, icname in enumerate(data["coords"]):
data["coords"][icname].append(values[i + 1])
line = f.readline()
else:
line = f.readline()
return data
def get_scan_plot(self, coords=None):
"""
Get a matplotlib plot of the potential energy surface.
Args:
coords: internal coordinate name to use as abcissa.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
d = self.read_scan()
if coords and coords in d["coords"]:
x = d["coords"][coords]
plt.xlabel(coords)
else:
x = range(len(d["energies"]))
plt.xlabel("points")
plt.ylabel("Energy (eV)")
e_min = min(d["energies"])
y = [(e - e_min) * Ha_to_eV for e in d["energies"]]
plt.plot(x, y, "ro--")
return plt
def save_scan_plot(self, filename="scan.pdf", img_format="pdf", coords=None):
"""
Save matplotlib plot of the potential energy surface to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
coords: internal coordinate name to use as abcissa.
"""
plt = self.get_scan_plot(coords)
plt.savefig(filename, format=img_format)
def read_excitation_energies(self):
"""
Read a excitation energies after a TD-DFT calculation.
Returns:
A list: A list of tuple for each transition such as
[(energie (eV), lambda (nm), oscillatory strength), ... ]
"""
transitions = []
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
td = False
while line != "":
if re.search(r"^\sExcitation energies and oscillator strengths:", line):
td = True
if td:
if re.search(r"^\sExcited State\s*\d", line):
val = [float(v) for v in float_patt.findall(line)]
transitions.append(tuple(val[0:3]))
line = f.readline()
return transitions
def get_spectre_plot(self, sigma=0.05, step=0.01):
"""
Get a matplotlib plot of the UV-visible xas. Transition are plotted
as vertical lines and as a sum of normal functions with sigma with. The
broadening is applied in energy and the xas is plotted as a function
of the wavelength.
Args:
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
Returns:
A dict: {"energies": values, "lambda": values, "xas": values}
where values are lists of abscissa (energies, lamba) and
the sum of gaussian functions (xas).
A matplotlib plot.
"""
from scipy.stats import norm
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
transitions = self.read_excitation_energies()
minval = min(val[0] for val in transitions) - 5.0 * sigma
maxval = max(val[0] for val in transitions) + 5.0 * sigma
npts = int((maxval - minval) / step) + 1
eneval = np.linspace(minval, maxval, npts) # in eV
lambdaval = [cst.h * cst.c / (val * cst.e) * 1.0e9 for val in eneval] # in nm
# sum of gaussian functions
spectre = np.zeros(npts)
for trans in transitions:
spectre += trans[2] * norm(eneval, trans[0], sigma)
spectre /= spectre.max()
plt.plot(lambdaval, spectre, "r-", label="spectre")
data = {"energies": eneval, "lambda": lambdaval, "xas": spectre}
# plot transitions as vlines
plt.vlines(
[val[1] for val in transitions],
0.0,
[val[2] for val in transitions],
color="blue",
label="transitions",
linewidth=2,
)
plt.xlabel("$\\lambda$ (nm)")
plt.ylabel("Arbitrary unit")
plt.legend()
return data, plt
def save_spectre_plot(self, filename="spectre.pdf", img_format="pdf", sigma=0.05, step=0.01):
"""
Save matplotlib plot of the spectre to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
"""
d, plt = self.get_spectre_plot(sigma, step)
plt.savefig(filename, format=img_format)
def to_input(
self,
mol=None,
charge=None,
spin_multiplicity=None,
title=None,
functional=None,
basis_set=None,
route_parameters=None,
input_parameters=None,
link0_parameters=None,
dieze_tag=None,
cart_coords=False,
):
"""
Create a new input object using by default the last geometry read in
the output file and with the same calculation parameters. Arguments
are the same as GaussianInput class.
Returns
gaunip (GaussianInput) : the gaussian input object
"""
if not mol:
mol = self.final_structure
if charge is None:
charge = self.charge
if spin_multiplicity is None:
spin_multiplicity = self.spin_multiplicity
if not title:
title = self.title
if not functional:
functional = self.functional
if not basis_set:
basis_set = self.basis_set
if not route_parameters:
route_parameters = self.route_parameters
if not link0_parameters:
link0_parameters = self.link0
if not dieze_tag:
dieze_tag = self.dieze_tag
return GaussianInput(
mol=mol,
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_parameters,
input_parameters=input_parameters,
link0_parameters=link0_parameters,
dieze_tag=dieze_tag,
)
|
py
|
1a5bcbb888719510f2e3afde4db4e6faa719f7a7
|
from decimal import (
Decimal,
)
import pytest
from eth_abi import (
encode_abi,
)
from newchain_web3._utils.filters import (
match_fn,
)
@pytest.mark.parametrize(
"data,expected,match_data_and_abi",
(
(
(-12345, 000, 111, Decimal(2) + Decimal(1) / Decimal(10)),
False,
(
("int", (-12345,)),
("uint32", (444,)),
("int", (565,)),
("ufixed256x4", (Decimal(1.66660),))
)
),
(
(-12345, 000, 111, Decimal(2) + Decimal(1) / Decimal(10)),
True,
(
("int", (-12345,)),
("uint32", None),
("int", None),
("ufixed256x4", None)
)
),
(
("aye", "bee", "sea", b"\xde\xee"),
False,
(
("string", ("eee",)),
("string", ("aye",)),
("string", ("sea",)),
("bytes", (b"\x00",))
)
),
(
("aye", "bee", "sea", b"\xde\xee"),
True,
(
("string", ("aye",)),
("string", ("bee",)),
("string", ("sea",)),
("bytes", (b"\xde\xee",))
)
),
(
("aye", "bee", "sea", b"\xde\xee"),
True,
(
("string", None),
("string", None),
("string", None),
("bytes", None)
)
),
(
(("aye", "bee"), ("sea", "dee")),
True,
(
("string[]", (("aye", "bee"),)),
("string[]", (("sea", "dee"),)),
)
),
(
(["eee", "eff"], ["gee", "eich"]),
False,
(
("string[]", (("aye", "bee"),)),
("string[]", (("sea", "dee"),)),
)
),
)
)
def test_match_fn_with_various_data_types(data, expected, match_data_and_abi):
abi_types, match_data = zip(*match_data_and_abi)
encoded_data = encode_abi(abi_types, data)
assert match_fn(match_data_and_abi, encoded_data) == expected
def test_wrong_type_match_data():
data = ("hello", "goodbye")
match_data_and_abi = (
("string", (50505050,)),
("string", (50505050,)),
)
abi_types, match_data = zip(*match_data_and_abi)
encoded_data = encode_abi(abi_types, data)
with pytest.raises(ValueError):
match_fn(match_data_and_abi, encoded_data)
|
py
|
1a5bcda4f124b4a3a8d62a49c4053d1690fd607d
|
import storageManager
from tkinter import *
from os import path
from tkinter import filedialog
from tkinter import Menu
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import messagebox
import os
#from sintactico import ejecutar_analisis
import reportes.RealizarReportes
import reportes.reportesimbolos as rs
import reportes.RealizarGramatica
from Instrucciones.TablaSimbolos.Tabla import Tabla
from Instrucciones.TablaSimbolos.Arbol import Arbol
from Instrucciones.Excepcion import Excepcion
from Instrucciones.Sql_create.CreateDatabase import CreateDatabase
from storageManager.jsonMode import *
from Codigo_3D import FuncionesPara3D
from Codigo_3D import Optimizacion
from Instrucciones.TablaSimbolos.Simbolo3D import Simbolo3d
import sintactico
import graficarArbol
import sintacticoGraph
global arbol
arbol = None
global tablaSym
tablaSym = None
'''
instruccion = CreateDatabase("bd1",None,"TRUE",None,None,None,None, 1,2)
instruccion.ejecutar(None,None)
# ---------------------------- PRUEBA DE UNA SUMA ----------------------------
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Expresiones import Primitivo, Logica
p1 = Primitivo.Primitivo(True,Tipo("",Tipo_Dato.BOOLEAN),1,1)
p2 = Primitivo.Primitivo(True,Tipo("",Tipo_Dato.BOOLEAN),1,1)
a = Arbol([])
op = Logica.Logica(p1,p2,'AND',1,2)
print('Resultado logica: ' + str(suma.ejecutar(None,a)))
# ---------------------------- PRUEBA DE UNA SUMA CON ERROR DE TIPO ----------------------------
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Expresiones import Primitivo, Aritmetica
p1 = Primitivo.Primitivo(1,Tipo("",Tipo_Dato.BOOLEAN),1,1)
p2 = Primitivo.Primitivo(2,Tipo("",Tipo_Dato.INTEGER),1,1)
a = Arbol([])
suma = Aritmetica.Aritmetica(p1,p2,'+',1,2)
suma.ejecutar(None,a)
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(a.excepciones)
'''
class interfaz():
def __init__(self):
##############################################VENTANA PRINCIPAL####################################
self.window=Tk()
#self.window.configure(background="#04DE5E")
img = PhotoImage(file='img/icons/postgesql2.png')
self.window.tk.call('wm', 'iconphoto', self.window._w, img)
#img = PhotoImage(file='img/icons/Postgresql.ico')
#self.window.tk.call('wm', 'iconphoto', self.window._w, img)
self.window.configure(background="#6a8d92")
self.window.title("Query Tool - Grupo 7")
#w, h = self.window.winfo_screenwidth()/2, self.window.winfo_screenheight()/2
w, h = 1370,670
self.window.geometry("%dx%d+0+0" % (w, h))
##############################################MENU####################################
menu = Menu(self.window)
new_item = Menu(menu,tearoff=0)
new_item.add_command(label='Abrir', command=self.abrir_click)
new_item.add_command(label='Guardar', command=self.guardar_click)
new_item.add_command(label='Guardar Como...', command=self.guardar_como_click)
#new_item.add_separator()
#new_item.add_command(label='Edit')
menu.add_cascade(label='Archivo', menu=new_item)
mnreportes = Menu(menu,tearoff=0)
mnreportes.add_command(label='Tabla de Errores', command=self.tblerrores_click)
mnreportes.add_command(label='Tabla de Simbolos', command=self.tblsimbolos_click)
mnreportes.add_command(label='AST', command=self.ast_click)
mnreportes.add_command(label='Reporte Gramatical', command=self.repDin_click)
menu.add_cascade(label='Reportes', menu=mnreportes)
menu3d = Menu(menu,tearoff=0)
menu3d.add_command(label='Traducir C3D', command=self.traducirc3d_click)
menu3d.add_command(label='Ejecutar C3D', command=self.ejecutarc3d_click)
menu3d.add_command(label='Optimizar C3D', command=self.optimizarc3d_click)
menu.add_cascade(label='3 Direcciones', menu=menu3d)
self.window.config(menu=menu)
##############################################BOTONES####################################
img2 = PhotoImage(file='img/icons/AnalyzeMP.png')
btnanalizar = Button(self.window,image=img2 , bg="#6a8d92",height=35, width=40, command=self.btnanalizar_click)
btnanalizar.place(x=20,y=4)
img3 = PhotoImage(file='img/icons/play32.png')
btnejecutar = Button(self.window,image = img3 , bg="#6a8d92",height=35, width=40,command=self.btnejecutar_click)
btnejecutar.place(x=115,y=5)
##############################################PESTAÑAS####################################
self.tab = ttk.Notebook(self.window)
self.tab.pack(fill='both',padx=20, pady=[50,20])
self.tab_frame =[]
self.txtentrada =[]
self.txtsalida =[]
self.crear_tab("","Nuevo.sql")
lblentrada= Label(self.window,text="Archivo de Entrada:",height=1, width=17,bg='#80b192')
lblentrada.place(x=20,y=80)
lblsalida= Label(self.window,text="Consola de Salida:",height=1, width=15,bg='#80b192')
lblsalida.place(x=20,y=350)
#redimensionar los elementos
#self.window.bind('<Configure>',self.resizeEvent)
#Objeto que almacena el Archivo
self.file=""
self.window.mainloop()
def ejecutar(self):
print("Hello World!")
print("Estoy ejecutando el main")
f = open("./entrada.txt", "r")
input = f.read()
#lista = "" : ""
#insert(database: "world", table: "countries", register: lista)
#print(input)
#parser.parse(input)
#Inserta "Archivo Analizado" en txtsalida
##############################################EVENTO REDIMENSIONAR LA VENTANA####################################
def resizeEvent(self, event):
print(event.width,event.height)
##############################################EVENTOS DE LOS BOTONES DEL MENU####################################
def traducirc3d_click(self):
global arbol
arbol = None
global tablaSym
dropAll()
os.system ("cls")
#Elimina el Contenido de txtsalida
self.txtsalida[self.tab.index("current")].delete(1.0,END)
input=self.txtentrada[self.tab.index("current")].get(1.0,END)
tablaGlobal = Tabla(None)
inst = sintactico.ejecutar_analisis(input)
arbol = Arbol(inst)
resultado = ""
for i in arbol.instrucciones:
res = i.traducir(tablaGlobal,arbol,"")
if isinstance(res, Simbolo3d):
resultado += res.codigo
else:
resultado += res
FuncionesPara3D.FuncionesPara3D.GenerarArchivo(resultado)
tablaSym = tablaGlobal
print("Archivo Traducido")
pass
def ejecutarc3d_click(self):
dropAll()
'''
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("CREATE DATABASE IF NOT EXISTS test\
OWNER = 'root'\
MODE = 1;")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("USE test;")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("CREATE TABLE persona (\
idpersona integer NOT NULL primary key,\
nombre varchar(15));")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("insert into persona values(1,\"Carlos\");")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("insert into persona values(2,\"Maria\");")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("insert into persona values(3,\"David\");")
FuncionesPara3D.FuncionesPara3D.ejecutarsentecia("SELECT * FROM persona;")'''
from Codigo_3D import Codigo3D
#c3d.ejecutar()
mensaje = ""
for m in FuncionesPara3D.arbol.consola:
mensaje += m + '\n'
self.txtsalida[self.tab.index("current")].insert(INSERT,mensaje)
pass
def optimizarc3d_click(self):
op = Optimizacion.Optimizacion()
op.Optimizar()
op.GenerarReporte()
pass
def abrir_click(self):
try:
self.file = filedialog.askopenfilename(initialdir= os.path.dirname(__file__))
archivo=open(self.file,"r")
entrada=archivo.read()
archivo.close()
self.crear_tab(entrada,self.file.split("/").pop())
except FileNotFoundError:
messagebox.showwarning("Abrir","No selecciono ningún Archivo.")
except UnicodeDecodeError:
messagebox.showerror('Abrir','El Archivo seleccionado no es admitido.')
def guardar_click(self):
try:
archivo=open(self.file,"w")
archivo.write(self.txtentrada[self.tab.index("current")].get(1.0,END))
messagebox.showinfo('Aviso','Se Guardo el Archivo Correctamente!')
except FileNotFoundError:
messagebox.showerror('Guardar','No abrio ningun Archivo.')
except:
messagebox.showerror("Error","Contacte al Administrador del sistema.")
def guardar_como_click(self):
self.file = filedialog.askdirectory(initialdir= path.dirname(__file__))
archivo=open(self.file+"/"+self.tab.tab(self.tab.select(),"text"),"w")
archivo.write(self.txtentrada[self.tab.index("current")].get(1.0,END))
print(self.file+"/"+self.tab.tab(self.tab.select(),"text"))
print("guardar_como")
def tblerrores_click(self):
if len(sintactico.lista_lexicos)==0:
messagebox.showinfo('Tabla de Errores','La Entrada no Contiene Errores!')
else:
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(sintactico.lista_lexicos)
def tblsimbolos_click(self):
# Función que crea el reporte de tabla de símbolos, recibe como parametro una tabla.
global arbol
global tablaSym
if arbol is not None:
rs.crear_tabla(arbol, tablaSym)
else:
rs.crear_tabla(FuncionesPara3D.arbol, FuncionesPara3D.tablaGlobal)
arbol = None
def ast_click(self):
input=self.txtentrada[self.tab.index("current")].get(1.0,END)
inst = sintacticoGraph.ejecutar_analisis(input)
if len(sintactico.lista_lexicos)>0:
messagebox.showerror('Tabla de Errores','La Entrada Contiene Errores!')
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(sintactico.lista_lexicos)
grafica = graficarArbol.GraphArbol(inst)
grafica.crearArbol()
print("ast")
def repDin_click(self):
global arbol
reportes.RealizarGramatica.RealizarGramatica.generar_reporte_gamatical(arbol.lRepDin)
arbol = None
##############################################EVENTOS DE LOS BOTONES DEL FRAME####################################
def btnanalizar_click(self):
global arbol
arbol = None
global tablaSym
dropAll()
os.system ("cls")
#Elimina el Contenido de txtsalida
self.txtsalida[self.tab.index("current")].delete(1.0,END)
#Inserta "Archivo Analizado" en txtsalida
#self.txtsalida[self.tab.index("current")].insert(INSERT,"Archivo Analizado")
#Selecciona el contenido de txt entrada
#print(self.txtentrada[self.tab.index("current")].get(1.0,END))
input=self.txtentrada[self.tab.index("current")].get(1.0,END)
tablaGlobal = Tabla(None)
inst = sintactico.ejecutar_analisis(input)
arbol = Arbol(inst)
if len(sintactico.lista_lexicos)>0:
messagebox.showerror('Tabla de Errores','La Entrada Contiene Errores!')
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(sintactico.lista_lexicos)
# Ciclo que recorrerá todas las instrucciones almacenadas por la gramática.
arbol.lRepDin.append("<init> ::= <instrucciones>")
arbol.lRepDin.append("<instrucciones> ::= <instrucciones> <instruccion>")
arbol.lRepDin.append("<instrucciones> ::= <instruccion>")
for i in arbol.instrucciones:
# La variable resultado nos permitirá saber si viene un return, break o continue fuera de sus entornos.
resultado = i.ejecutar(tablaGlobal,arbol)
# Después de haber ejecutado todas las instrucciones se verifica que no hayan errores semánticos.
if len(arbol.excepciones) != 0:
reportes.RealizarReportes.RealizarReportes.generar_reporte_lexicos(arbol.excepciones)
# Ciclo que imprimirá todos los mensajes guardados en la variable consola.
tablaSym = tablaGlobal
mensaje = ''
for m in arbol.consola:
mensaje += m + '\n'
self.txtsalida[self.tab.index("current")].insert(INSERT,mensaje)
def btnejecutar_click(self):
print("se va ejecutar el archivo")
##############################################CREA PESTAÑAS EN EL TAB####################################
def crear_tab(self,entrada,nombre):
self.tab_frame.append(Frame(self.tab,width=200, height=700,background="#80b192"))
self.tab_frame[-1].pack(fill='both', expand=1)
self.tab_frame[-1].config(bd=5)
self.tab.add(self.tab_frame[-1],text=nombre)
self.txtentrada.append(scrolledtext.ScrolledText(self.tab_frame[-1],width=162,height=15))
self.txtentrada[-1].place(x=0,y=25)
self.txtentrada[-1].insert(INSERT,entrada+"")
#self.txtentrada[-1].bind("<MouseWheel>", self.OnMouseWheel)
self.txtsalida.append(scrolledtext.ScrolledText(self.tab_frame[-1],width=162,height=15,background="#070707",foreground="#FEFDFD"))
self.txtsalida[-1].place(x=0,y=298)
#nombre del archivo
#print(self.tab.tab(self.tab.select(),"text"))
self.tab.select(int(len(self.tab_frame)-1))
#self.txtsalida[-1].insert(INSERT,entrada+"")
#def OnMouseWheel(self,event):
# print("scrool mouse")
def main():
mi_app = interfaz()
return(0)
if __name__ == '__main__':
main()
|
py
|
1a5bce4babdb2eafded4af5daa1d90492c2de2d7
|
# Copyright 2017-2019 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import codecs
import collections
import csv
import datetime
import os
import re
import shutil
import stat
import jinja2
import six
import yaml
from guild import guildfile
from guild import index2 as indexlib
from guild import run_util
from guild import util
DEFAULT_DEST_HOME = "published-runs"
DEFAULT_TEMPLATE = "default"
COPY_DEFAULT_FILES = 1
COPY_ALL_FILES = 2
class PublishError(Exception):
pass
class TemplateError(PublishError):
def __init__(self, e):
super(TemplateError, self).__init__(e)
self._e = e
def __str__(self):
e = self._e
msg = e.filename
if hasattr(e, "lineno"):
msg += ":" + str(e.lineno)
if e.message:
msg += ": " + e.message
return msg
class GenerateError(PublishError):
def __init__(self, e, template):
super(GenerateError, self).__init__(e)
self._e = e
self._template = template
def __str__(self):
return "%s: %s" % (
_format_template_files(self._template),
self._e.message)
def _format_template_files(t):
if len(t.files) == 1:
basename = t.files[0]
else:
basename = "{%s}" % ",".join(sorted(t.files))
return os.path.join(t.path, basename)
class RunFilters(object):
IMG_PATTERN = re.compile(
r"\.(png|gif|jpe?g|tiff?|bmp|webp)",
re.IGNORECASE)
def __init__(self, run_dest):
self.run_dest = run_dest
def install(self, env):
env.filters.update({
"csv_dict_rows": self.csv_dict_rows,
"empty": self.empty,
"file_size": self.file_size,
"flag_val": self.flag_val,
"nbhyph": self.nbhyph,
"nbsp": self.nbsp,
"runfile_link": self.runfile_link,
"scalar_key": self.scalar_key,
"short_id": self.short_id,
"utc_date": self.utc_date,
})
@staticmethod
def empty(val):
if val in (None, "") or isinstance(val, jinja2.Undefined):
return ""
return val
@staticmethod
def flag_val(val):
if isinstance(val, jinja2.Undefined):
return ""
return run_util.format_attr(val)
def runfile_link(self, path):
if self.run_dest is None:
raise TemplateError(
"runfile_link cannot be used in this context "
"(not publishing a run")
if not isinstance(path, six.string_types):
return ""
maybe_runfile = os.path.join(self.run_dest, "runfiles", path)
if os.path.isfile(maybe_runfile):
return "runfiles/" + path
return None
@staticmethod
def utc_date(val, unit="s"):
if not isinstance(val, (int, float) + six.string_types):
return ""
try:
val = int(val)
except (ValueError, TypeError):
return ""
else:
if unit == "s":
ts = val * 1000000
elif unit == "ms":
ts = val * 1000
elif unit == "us":
ts = val
else:
raise ValueError(
"unsupported unit %r (expected s, ms, or us)"
% unit)
return util.utcformat_timestamp(ts)
@staticmethod
def file_size(val):
if not isinstance(val, (int, float) + six.string_types):
return ""
try:
bytes = int(val)
except (ValueError, TypeError):
return ""
else:
return util.format_bytes(bytes)
@staticmethod
def scalar_key(s):
return run_util.run_scalar_key(s)
@staticmethod
def csv_dict_rows(csv_rows):
keys = csv_rows[0]
return [dict(zip(keys, row)) for row in csv_rows[1:]]
@staticmethod
def nbsp(x):
if not x:
return " "
return x
@staticmethod
def short_id(id):
if not isinstance(id, six.string_types):
return ""
return id[:8]
@staticmethod
def nbhyph(s):
if not s:
return s
return s.replace("-", "‑")
class Template(object):
def __init__(self, path, run_dest=None, filters=None):
if not os.path.exists(path):
raise RuntimeError("invalid template source: %s" % path)
self.path = path
self._file_templates = sorted(
_init_file_templates(path, run_dest, filters))
@property
def files(self):
return [t[0] for t in self._file_templates]
def generate(self, dest, vars):
util.ensure_dir(dest)
for relpath, src, template in self._file_templates:
file_dest = os.path.join(dest, relpath)
util.ensure_dir(os.path.dirname(file_dest))
if template is None:
shutil.copyfile(src, file_dest)
else:
_render_template(template, vars, file_dest)
def _init_file_templates(path, run_dest=None, filters=None):
ts = []
for root, _dirs, files in os.walk(path):
for name in files:
if name[:1] == "_":
continue
abspath = os.path.join(root, name)
relpath = os.path.relpath(abspath, path)
template = _init_file_template(abspath, run_dest, filters)
ts.append((relpath, abspath, template))
return ts
def _init_file_template(path, run_dest=None, filters=None):
if not util.is_text_file(path):
return None
dirname, basename = os.path.split(path)
templates_home = _local_path("templates")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader([dirname, templates_home]),
autoescape=jinja2.select_autoescape(['html', 'xml']))
RunFilters(run_dest).install(env)
if filters:
env.filters.update(filters)
try:
return env.get_template(basename)
except jinja2.TemplateError as e:
raise TemplateError(e)
def _render_template(template, vars, dest):
with open(dest, "w") as f:
for part in template.generate(vars):
f.write(part)
f.write(os.linesep)
PublishRunState = collections.namedtuple(
"PublishRunState", [
"run",
"opdef",
"copy_files",
"include_links",
"formatted_run",
"dest_home",
"template",
"run_dest",
"md5s",
])
def publish_run(run, dest=None, template=None, copy_files=None,
include_links=False, md5s=True, formatted_run=None):
state = _init_publish_run_state(
run,
dest,
template,
copy_files,
include_links,
md5s,
formatted_run)
_init_published_run(state)
_publish_run_guild_files(state)
_copy_sourcecode(state)
_copy_runfiles(state)
_generate_template(state)
def _init_publish_run_state(run, dest, template, copy_files, include_links,
md5s, formatted_run):
dest_home = dest or DEFAULT_DEST_HOME
opdef = _run_opdef(run)
run_dest = _published_run_dest(dest_home, run)
template = _init_template(template, opdef, run_dest)
if not formatted_run:
formatted_run = _format_run_for_publish(run)
return PublishRunState(
run,
opdef,
copy_files,
include_links,
formatted_run,
dest_home,
template,
run_dest,
md5s,
)
def _run_opdef(run):
try:
gf = guildfile.for_run(run)
except (guildfile.NoModels, TypeError):
return None
else:
assert run.opref, run.path
try:
m = gf.models[run.opref.model_name]
except KeyError:
return None
else:
return m.get_operation(run.opref.op_name)
def _init_template(template, opdef, run_dest):
template_spec = util.find_apply([
lambda: template,
lambda: _opdef_template(opdef)
])
template_path = _find_template(template_spec, opdef)
return Template(template_path, run_dest)
def _opdef_template(opdef):
return util.find_apply([
lambda: _opdef_publish_template(opdef),
lambda: DEFAULT_TEMPLATE
])
def _opdef_publish_template(opdef):
if not opdef or not opdef.publish:
return None
return opdef.publish.template
def _find_template(name, opdef):
return util.find_apply([
lambda: _abs_template(name),
lambda: _default_template(name),
lambda: _project_template(name, opdef),
lambda: _cannot_find_template_error(name)])
def _abs_template(name):
if name[:1] == "." and os.path.exists(name):
return name
return None
def _default_template(name):
if name == "default":
return _local_path("templates/publish-default")
return None
def _local_path(path):
return os.path.join(os.path.dirname(__file__), path)
def _project_template(name, opdef):
path = os.path.join(opdef.guildfile.dir, name)
if os.path.exists(path):
return path
return None
def _cannot_find_template_error(name):
raise PublishError("cannot find template %s" % name)
def _published_run_dest(dest_home, run):
return os.path.join(dest_home, run.id)
def _format_run_for_publish(run):
frun = run_util.format_run(run)
if not frun["stopped"]:
frun["duration"] = ""
return frun
def _init_published_run(state):
"""Ensure empty target directory for published run.
As a side effect, lazily creates `state.dest_home` and creates
`.guild-nocopy` to ensure that the published runs home is not
considered by Guild for source snapshots.
"""
util.ensure_dir(state.dest_home)
util.touch(os.path.join(state.dest_home, ".guild-nocopy"))
if os.path.exists(state.run_dest):
util.safe_rmtree(state.run_dest)
os.mkdir(state.run_dest)
def _publish_run_guild_files(state):
_publish_run_info(state)
_publish_flags(state)
_publish_scalars(state)
_publish_output(state)
_publish_sourcecode_list(state)
_publish_runfiles_list(state)
def _publish_run_info(state):
"""Write run.yml to run publish dest.
This function should be kept in sync with output generated by
`guild runs info` - minus system-specific values (e.g. run_dir and
pid) and flags (which are written to a separate file).
"""
run = state.run
frun = state.formatted_run
path = os.path.join(state.run_dest, "run.yml")
encode = lambda x: util.encode_yaml(x).rstrip()
fmt_ts = util.utcformat_timestamp
started = run.get("started")
stopped = run.get("stopped")
with codecs.open(path, "w", "utf-8") as f:
f.write("id: %s\n" % run.id)
f.write("operation: %s\n" % encode(frun["operation"]))
f.write("status: %s\n" % encode(frun["status"]))
f.write("started: %s\n" % fmt_ts(started))
f.write("stopped: %s\n" % fmt_ts(stopped))
f.write("time: %s\n" % _format_time(started, stopped))
f.write("marked: %s\n" % encode(frun["marked"]))
f.write("label: %s\n" % encode(run.get("label")))
f.write("command: %s\n" % encode(frun["command"]))
f.write("exit_status: %s\n" % encode(frun["exit_status"]))
def _format_time(started, stopped):
if started and stopped:
return util.format_duration(started, stopped)
return ""
def _publish_flags(state):
flags = state.run.get("flags") or {}
dest = os.path.join(state.run_dest, "flags.yml")
_save_yaml(flags, dest)
def _save_yaml(val, path):
with open(path, "w") as f:
yaml.safe_dump(
val, f,
default_flow_style=False,
indent=2,
encoding="utf-8",
allow_unicode=True)
def _publish_scalars(state):
cols = [
"prefix",
"tag",
"count",
"total",
"avg_val",
"first_val",
"first_step",
"last_val",
"last_step",
"min_val",
"min_step",
"max_val",
"max_step",
]
dest = os.path.join(state.run_dest, "scalars.csv")
scalars = _run_scalars(state)
with open(dest, "w") as f:
out = csv.writer(f, lineterminator="\n")
out.writerow(cols)
for s in scalars:
out.writerow([s[col] for col in cols])
def _run_scalars(state):
index = indexlib.RunIndex()
index.refresh([state.run], ["scalar"])
return list(index.run_scalars(state.run))
def _publish_output(state):
src = state.run.guild_path("output")
if os.path.isfile(src):
dest = os.path.join(state.run_dest, "output.txt")
shutil.copyfile(src, dest)
def _publish_sourcecode_list(state):
src = state.run.guild_path("sourcecode")
dest = os.path.join(state.run_dest, "sourcecode.csv")
paths = _dir_paths(src, skip_guildfiles=True)
with open(dest, "w") as f:
_write_paths_csv(paths, src, state.md5s, f)
def _dir_paths(dir, skip_guildfiles=False):
seen = set()
paths = []
for root, dirs, names in os.walk(dir, followlinks=True):
if skip_guildfiles:
_remove_guild_dir(dirs)
for name in dirs + names:
path = os.path.join(root, name)
abs_path = os.path.abspath(path)
if abs_path in seen:
continue
seen.add(abs_path)
paths.append(path)
paths.sort()
return paths
def _remove_guild_dir(dirs):
try:
dirs.remove(".guild")
except ValueError:
pass
def _write_paths_csv(paths, root, md5s, f):
out = csv.writer(f, lineterminator="\n")
out.writerow(["path", "type", "size", "mtime", "md5"])
for path in paths:
out.writerow(_path_row(path, root, md5s))
def _path_row(path, root, md5):
try:
st = os.stat(path)
except OSError:
st = None
try:
lst = os.lstat(path)
except OSError:
lst = None
return [
os.path.relpath(path, root),
_path_type(st, lst),
st.st_size if st else "",
_path_mtime(st),
_path_md5(path, st) if md5 else "",
]
def _path_type(st, lst):
parts = []
if st:
if stat.S_ISREG(st.st_mode):
parts.append("file")
elif stat.S_ISDIR(st.st_mode):
parts.append("dir")
else:
parts.append("other")
if lst:
if stat.S_ISLNK(lst.st_mode):
parts.append("link")
return " ".join(parts)
def _path_mtime(st):
if not st:
return ""
return int((st.st_mtime + _utc_offset()) * 1000000)
def _utc_offset():
try:
return globals()["__utc_offset"]
except KeyError:
globals()["__utc_offset"] = offset = int(round(
(datetime.datetime.now() -
datetime.datetime.utcnow()).total_seconds()))
return offset
def _path_md5(path, st):
if not st or not stat.S_ISREG(st.st_mode):
return ""
return util.file_md5(path)
def _publish_runfiles_list(state):
dest = os.path.join(state.run_dest, "runfiles.csv")
paths = _dir_paths(state.run.dir, skip_guildfiles=True)
with open(dest, "w") as f:
_write_paths_csv(paths, state.run.dir, state.md5s, f)
def _copy_sourcecode(state):
src = state.run.guild_path("sourcecode")
if not os.path.isdir(src):
return
dest = os.path.join(state.run_dest, "sourcecode")
shutil.copytree(src, dest)
class PublishRunVars(object):
def __init__(self, state):
self._state = state
self._cache = {}
self._keys = [
"flags",
"output",
"run",
"runfiles",
"scalars",
"sourcecode",
]
def keys(self):
return self._keys
def __getitem__(self, name):
try:
return self._cache[name]
except KeyError:
self._cache[name] = val = self._load(name)
return val
def _load(self, name):
return util.find_apply([
self._load_yaml,
self._load_csv,
self._load_txt], name)
def _load_yaml(self, name):
path = os.path.join(self._state.run_dest, name + ".yml")
if not os.path.exists(path):
return None
return yaml.safe_load(open(path, "r"))
def _load_csv(self, name):
path = os.path.join(self._state.run_dest, name + ".csv")
if not os.path.exists(path):
return None
with open(path, "r") as f:
return list(csv.reader(f))
def _load_txt(self, name):
path = os.path.join(self._state.run_dest, name + ".txt")
if not os.path.exists(path):
return None
return open(path, "r").read()
class CopyRunFilesFilter(object):
def __init__(self, state):
self._run_dir = state.run.dir
self._include_links = state.include_links
def delete_excluded_dirs(self, root, dirs):
self._delete_guild_dir(dirs)
self._maybe_delete_links(root, dirs)
@staticmethod
def _delete_guild_dir(dirs):
try:
dirs.remove(".guild")
except ValueError:
pass
def _maybe_delete_links(self, root, dirs):
if self._include_links:
return
for name in list(dirs):
if os.path.islink(os.path.join(root, name)):
dirs.remove(name)
def default_select_path(self, path):
if os.path.islink(path):
return self._include_links
return True
@staticmethod
def pre_copy(_to_copy):
pass
def _copy_runfiles(state):
if not state.copy_files:
return
util.select_copytree(
state.run.dir,
_runfiles_dest(state),
_copy_runfiles_config(state),
CopyRunFilesFilter(state))
def _runfiles_dest(state):
return os.path.join(state.run_dest, "runfiles")
def _copy_runfiles_config(state):
if state.copy_files == COPY_ALL_FILES or not state.opdef:
return []
return [state.opdef.publish.files]
def _generate_template(state):
template = state.template
render_vars = PublishRunVars(state)
try:
template.generate(state.run_dest, render_vars)
except jinja2.TemplateRuntimeError as e:
raise GenerateError(e, template)
except jinja2.exceptions.TemplateNotFound as e:
e.message = "template not found: %s" % e.message
raise GenerateError(e, template)
def _template_config(opdef):
if not opdef or not opdef.publish:
return {}
config = opdef.publish.get("config") or {}
return {
name.replace("-", "_"): val
for name, val in config.items()
}
def refresh_index(dest):
dest_home = dest or DEFAULT_DEST_HOME
index_template_path = _local_path("templates/runs-index/README.md")
index_template = _init_file_template(index_template_path)
assert index_template, index_template_path
index_path = os.path.join(dest_home, "README.md")
runs = _published_runs(dest_home)
_render_template(index_template, {"runs": runs}, index_path)
def _published_runs(dest_home):
runs = []
for name in os.listdir(dest_home):
run_yml = os.path.join(dest_home, name, "run.yml")
if not os.path.exists(run_yml):
continue
info = yaml.safe_load(open(run_yml, "r"))
runs.append(info)
return sorted(runs, key=lambda run: run.get("started"), reverse=True)
|
py
|
1a5bd0f3396e08eb5bde8f819643200c773aabdc
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class RangeOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsRangeOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = RangeOptions()
x.Init(buf, n + offset)
return x
@classmethod
def RangeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# RangeOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def RangeOptionsStart(builder): builder.StartObject(0)
def RangeOptionsEnd(builder): return builder.EndObject()
|
py
|
1a5bd311ffaafbd2b9942438b071353a71ff73ed
|
import argparse
import torch
import torch.backends.cudnn
import torch.utils.data
import torch.nn.functional as F
from torchvision import transforms
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
import os
import shutil
import pandas as pd
from os.path import join
import cv2
from pathlib2 import Path
from itertools import izip
import config
from vgg_unet import UnetVgg11
from vgg_unet import Vgg11a
import data_utils
from data_utils import rle_encode
from data_utils import rle_to_string
from unet import Unet
from unet import Unet5
from dataset import CARVANA
import glob
def predict(test_loader, model, threshold=0.5, dbg=False, save_probs=False, output_dir=None):
if save_probs:
if output_dir is None:
raise ValueError('Specify an output dir to save probs')
output_dir.mkdir(exist_ok=True)
TRAIN = False
model.train(TRAIN)
print '---!!!---TRAIN={}'.format(TRAIN)
original_shape = (1280, 1918)
all_rles = []
all_img_filenames = []
img_list = []
for i, (images, names) in tqdm(enumerate(test_loader), total=len(test_loader), desc='Predicting'):
images = Variable(images.cuda(), volatile=True)
outputs = model(images)
outputs = F.upsample(outputs, size=original_shape, mode='bilinear')
output_probs = F.sigmoid(outputs)
if save_probs:
probs_np = np.squeeze(output_probs.data.cpu().numpy())
if len(probs_np.shape) == 2:
probs_np = probs_np[np.newaxis, ...]
assert len(probs_np.shape) == 3, probs_np.shape
prob_images = np.asarray(np.round(probs_np * 255), dtype=np.uint8)
for probs_img, sample_name in izip(prob_images, names):
cv2.imwrite(str(output_dir.joinpath(sample_name + '.png')), probs_img)
masks = (probs_np > threshold)
else:
masks = (output_probs > threshold).data.cpu().numpy()
masks = np.squeeze(masks)
if len(masks.shape) == 2:
masks = masks[np.newaxis, ...]
assert len(masks.shape) == 3, masks.shape
for mask, sample_name in izip(masks, names):
mask = np.asarray(mask, dtype=np.bool)
rle = rle_to_string(rle_encode(mask))
all_rles.append(rle)
all_img_filenames.append(sample_name + '.jpg')
if i <= 3:
if len(mask.shape) != 3:
mask = mask[:, :, np.newaxis]
mask = mask.astype(np.float32)
img = images.data.cpu()[-1].numpy().transpose(1, 2, 0)
img = np.asarray(img * 255, dtype=np.uint8)
img = cv2.resize(img, dsize=original_shape[::-1], interpolation=cv2.INTER_LINEAR)
img_list.extend([img, mask])
if dbg and i == 3:
break
return all_rles, all_img_filenames, img_list
def load_from_files(test_loader, probs_dir=None):
if probs_dir is None or not probs_dir.exists():
raise ValueError('Dir with probs was not found! {}'.format(str(probs_dir)))
print 'Reading from', probs_dir
all_rles = []
all_img_filenames = []
for i, (images, names) in tqdm(enumerate(test_loader), total=len(test_loader), desc='Loading from files'):
for sample_name in names:
probs_img = cv2.imread(str(probs_dir.joinpath(sample_name + '.png')), cv2.IMREAD_GRAYSCALE)
mask = (probs_img >= 128)
rle = rle_to_string(rle_encode(mask))
all_rles.append(rle)
all_img_filenames.append(sample_name + '.jpg')
return all_rles, all_img_filenames
def create_submission(rles, img_paths, output_path):
print('Create submission...')
t = pd.read_csv(join(config.input_data_dir, 'sample_submission.csv'))
assert len(rles) == len(img_paths) == len(t), '{} rles'.format(len(rles))
t['rle_mask'] = rles
t['img'] = map(os.path.basename, img_paths)
print t.head(2)
t.to_csv('{}.gz'.format(output_path), index=False, compression='gzip')
print 'Saved'
def load_checkpoint(ckpt_dir, epoch=None):
if ckpt_dir is not None:
if Path(ckpt_dir).is_file():
ckpt_path = Path(ckpt_dir)
elif epoch is None:
ckpt_path = Path(ckpt_dir) / 'model_best.pth.tar'
else:
ckpt_path = Path(ckpt_dir) / 'model_best_epoch{}.pth.tar'.format(epoch)
else:
raise ValueError('ckpt_dir must be not None')
if ckpt_path.exists():
print("=> loading checkpoint '{}'".format(ckpt_path))
checkpoint = torch.load(str(ckpt_path))
print 'best_score', checkpoint['best_score']
print 'arch', checkpoint['arch']
print 'epoch', checkpoint['epoch']
if 'cur_score' in checkpoint:
print 'cur_score', checkpoint['cur_score']
print(
"=> loaded checkpoint '{}' (epoch {})".format(ckpt_path, checkpoint['epoch']))
if epoch is None:
out_path = ckpt_path.with_name('model_best_epoch{}.pth.tar'.format(checkpoint['epoch']))
if not out_path.exists():
shutil.copy(str(ckpt_path), str(out_path))
else:
raise IOError("=> no checkpoint found at '{}'".format(ckpt_path))
checkpoint['path'] = ckpt_path
return checkpoint
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('-b', '--batch_size', type=int, default=1, metavar='N',
help='input batch size for training')
parser.add_argument('--seed', type=int, default=1993, help='random seed')
parser.add_argument('--epoch', type=int, default=None, help='checkpoint epoch to use')
parser.add_argument('-imsize', '--image_size', type=int, default=1024, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('-no_cudnn', '--no_cudnn', action='store_true',
help='dont use cudnn?')
parser.add_argument('-no_hq', '--no_hq', action='store_true',
help='do not use hq images?')
parser.add_argument('-o', '--ckpt_dir', default=None)
parser.add_argument('-net', '--network', default='Unet')
parser.add_argument('-load', '--load', action='store_true')
args = parser.parse_args()
dbg = False
torch.manual_seed(args.seed)
print 'CudNN:', torch.backends.cudnn.version()
print 'Run on {} GPUs'.format(torch.cuda.device_count())
torch.backends.cudnn.benchmark = not args.no_cudnn # Enable use of CudNN
checkpoint = load_checkpoint(args.ckpt_dir, epoch=args.epoch)
filters_sizes = checkpoint['filter_sizes']
should_normalize = False
if args.network == 'Unet5':
model = torch.nn.DataParallel(Unet5(is_deconv=False, filters=filters_sizes)).cuda()
elif args.network in ['vgg11v1', 'vgg11v2']:
assert args.network[-2] == 'v'
v = int(args.network[-1:])
should_normalize = True
model = torch.nn.DataParallel(UnetVgg11(n_classes=1, num_filters=filters_sizes.item(), v=v)).cuda()
elif args.network in ['vgg11av1', 'vgg11av2']:
assert args.network[-2] == 'v'
v = int(args.network[-1:])
model = torch.nn.DataParallel(Vgg11a(n_classes=1,
num_filters=filters_sizes.item(),
v=v)).cuda()
else:
model = torch.nn.DataParallel(Unet(is_deconv=False, filters=filters_sizes)).cuda()
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded model from checkpoint (epoch {})".format(checkpoint['epoch']))
rescale_size = (args.image_size, args.image_size)
if args.image_size == -1:
print 'Use full size. Use padding'
is_full_size = True
rescale_size = (1920, 1280)
transforms_seq = [data_utils.Rescale(rescale_size),
transforms.ToTensor()]
if should_normalize:
print 'Use VGG normalization!'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transforms_seq.append(normalize)
transform = transforms.Compose(transforms_seq)
test_dataset = CARVANA(root=config.input_data_dir,
subset='test',
image_size=args.image_size,
transform=transform,
seed=args.seed,
is_hq=not args.no_hq,
v=2)
probs_output_dir = Path(config.submissions_dir) / \
'test_probs_{}_epoch{}'.format(checkpoint['path'].parts[-2], checkpoint['epoch'])
probs_calculated = list()
if not args.load and probs_output_dir.exists():
probs_calculated = glob.glob(str(probs_output_dir) + '/*.png')
print 'Num precalculated:', len(probs_calculated)
probs_calculated = set(map(lambda x: os.path.basename(x)[:-4], probs_calculated))
before = len(test_dataset.data_path)
test_dataset.data_path = filter(lambda x: os.path.basename(x)[:-4] not in probs_calculated,
test_dataset.data_path)
print 'Skipped {} images as the probs for them were already calculated'.format(before - len(test_dataset.data_path))
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=args.batch_size,
shuffle=False,
pin_memory=True,
num_workers=2)
if args.load:
rles, all_img_filenames = load_from_files(test_loader, probs_dir=probs_output_dir)
else:
rles, all_img_filenames, _ = predict(test_loader, model, threshold=0.5, dbg=dbg, save_probs=True, output_dir=probs_output_dir)
if len(probs_calculated):
test_dataset.reload_all_test_paths()
rles, all_img_filenames = load_from_files(test_loader, probs_dir=probs_output_dir)
output_path = Path(config.submissions_dir) / ('test_{}_epoch{}.csv'.format(
checkpoint['path'].parts[-2], checkpoint['epoch']))
create_submission(rles, all_img_filenames, str(output_path))
if __name__ == '__main__':
main()
|
py
|
1a5bd40f14f81977d143d3bc0ec1a77991b2efc8
|
import numpy
import pytest
import chainer
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('a_shape,b_shape', [
((), ()),
((), (2, 3)),
((0, 2), (2, 0)),
((2, 0), (0, 3)),
((0, 0), (0, 0)),
((2, 3), (3, 4)),
((1, 2, 3), (3, 4)),
((1, 2, 0), (0, 4)),
((1, 0, 3), (3, 0)),
((1, 0, 3), (3, 4)),
((1, 2, 3), (3, 0)),
((1, 2), (1, 2, 3)),
((1, 0), (1, 0, 3)),
((0, 2), (1, 2, 0)),
((0, 2), (1, 2, 3)),
((1, 2), (1, 2, 0)),
((4, 5, 2), (3, 2, 5)),
((2, 3, 4, 4), (3, 4, 2)),
((2, 2, 3, 1), (2, 1, 3, 1, 4)),
((2, 4, 3), (1, 2, 3, 2)),
((1, 2, 3, 0), (4, 0, 5)),
((1, 2, 0, 3), (4, 3, 0)),
((1, 2, 0, 3), (4, 3, 5))
])
@chainer.testing.parameterize_pytest(
'in_dtypes,chx_expected_dtype', dtype_utils.result_dtypes_two_arrays)
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestDot(op_utils.NumpyOpTest):
def setup(self):
device = chainerx.get_default_device()
a_dtype, b_dtype = self.in_dtypes
a_kind = numpy.dtype(a_dtype).kind
b_kind = numpy.dtype(b_dtype).kind
# TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
if device.name == 'cuda:0' and (a_kind != 'f' and b_kind != 'f'):
pytest.skip('non-float dot is not supported on CUDA')
# Skip backward/double-backward tests for int dtypes
if a_kind != 'f' or b_kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
# Skip backward/double-backward tests if the output will be
# disconnected.
# TODO(niboshi): Remove this skip condition after enabling backward()
# for such cases.
if self.a_shape and self.a_shape[-1] == 0:
self.skip_backward_test = True
self.skip_double_backward_test = True
if a_dtype == 'float16' or b_dtype == 'float16':
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
a_dtype, b_dtype = self.in_dtypes
a_shape = self.a_shape
b_shape = self.b_shape
a = numpy.random.uniform(-1, 1, a_shape).astype(a_dtype)
b = numpy.random.uniform(-1, 1, b_shape).astype(b_dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
if self.is_module:
y = xp.dot(a, b)
else:
y = a.dot(b)
y = dtype_utils.cast_if_numpy_array(xp, y, self.chx_expected_dtype)
return y,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('a_shape,b_shape', [
((3, 2), (1, 3)),
((4, 3, 2, 5), (6, 4, 1, 2))
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_dot_invalid(is_module, xp, device, a_shape, b_shape, dtype):
# TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
if device.name == 'cuda:0' and numpy.dtype(dtype).kind != 'f':
return chainerx.testing.ignore()
a = array_utils.create_dummy_ndarray(xp, a_shape, dtype)
b = array_utils.create_dummy_ndarray(xp, b_shape, dtype)
if is_module:
return xp.dot(a, b)
else:
return a.dot(b)
class NumpyLinalgOpTest(op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def setup(self):
device = chainerx.get_default_device()
if (device.backend.name == 'native'
and not chainerx.linalg._is_lapack_available()):
pytest.skip('LAPACK is not linked to ChainerX')
self.check_backward_options.update({'rtol': 5e-3})
self.check_double_backward_options.update({'rtol': 5e-3})
_numpy_does_not_support_0d_input113 = \
numpy.lib.NumpyVersion(numpy.__version__) < '1.13.0'
_numpy_does_not_support_0d_input116 = \
numpy.lib.NumpyVersion(numpy.__version__) < '1.16.0'
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3), (6, 6)],
'b_columns': [(), (1,), (3,), (4,)],
'dtypes': [
('float32', 'float32'),
('float64', 'float64'),
('float64', 'float32'),
('float32', 'float64')]
})
))
class TestSolve(NumpyLinalgOpTest):
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtypes[0])
b = numpy.random.random(
(self.shape[0], *self.b_columns)).astype(self.dtypes[1])
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
out = xp.linalg.solve(a, b)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(2, 3), (3, 2)],
'dtype': ['float32', 'float64']
})
))
class TestSolveFailing(NumpyLinalgOpTest):
forward_accept_errors = (numpy.linalg.LinAlgError,
chainerx.DimensionError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
b = numpy.random.random(self.shape).astype(self.dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
out = xp.linalg.solve(a, b)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape', [(3, 3)])
@chainer.testing.parameterize_pytest('dtype', ['float16'])
class TestSolveDtypeFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
b = numpy.random.random(self.shape).astype(self.dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
out = xp.linalg.solve(a, b)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3), (6, 6)],
'dtype': ['float32', 'float64']
})
))
class TestInverse(NumpyLinalgOpTest):
# For zero sized input strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.inv(a)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(2, 3), (3, 2)],
'dtype': ['float32', 'float64']
})
))
class TestInverseFailing(NumpyLinalgOpTest):
forward_accept_errors = (numpy.linalg.LinAlgError,
chainerx.DimensionError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.inv(a)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape', [(3, 3)])
@chainer.testing.parameterize_pytest('dtype', ['float16'])
class TestInverseDtypeFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.inv(a)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (6, 6)],
'dtype': ['float32', 'float64'],
'full_matrices': [False],
'compute_uv': [True]
}) + chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (6, 6)],
'dtype': ['float32', 'float64'],
'full_matrices': [True],
'compute_uv': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSVD(NumpyLinalgOpTest):
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input116 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.svd(a,
full_matrices=self.full_matrices,
compute_uv=self.compute_uv)
# NOTE: cuSOLVER's (CuPy's) and NumPy's outputs of u and v might
# differ in signs, which is not a problem mathematically
if self.compute_uv:
u, s, v = out
return xp.abs(u), s, xp.abs(v)
else:
s = out
return s,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape', [(2, 3)])
@chainer.testing.parameterize_pytest('dtype', ['float16'])
class TestSVDDtypeFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.svd(a)
u, s, v = out
return xp.abs(u), s, xp.abs(v)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (6, 6)],
'rcond': [1e-15, 0.3, 0.5, 0.6],
'dtype': ['float32', 'float64']
})
))
class TestPseudoInverse(NumpyLinalgOpTest):
# For zero sized input strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
a = a * 10 + numpy.ones(self.shape)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input113 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.pinv(a, rcond=self.rcond)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(), ],
'rcond': [1e-15, ],
'dtype': ['float32', 'float64']
})
))
class TestPseudoInverseFailing(NumpyLinalgOpTest):
forward_accept_errors = (numpy.linalg.LinAlgError,
chainerx.ChainerxError,
chainerx.DimensionError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.pinv(a, rcond=self.rcond)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape', [(2, 3)])
@chainer.testing.parameterize_pytest('dtype', ['float16'])
class TestPseudoInverseDtypeFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.pinv(a)
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# backward for 'r', 'raw' modes is not implemented
chainer.testing.product({
'shape': [(0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (6, 6)],
'in_dtypes': ['float32', 'float64'],
'mode': ['r', 'raw'],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
}) +
# backward for non-square `R` is not implemented
chainer.testing.product({
'shape': [(0, 3), (3, 0), (2, 3), (3, 2)],
'in_dtypes': ['float32', 'float64'],
'mode': ['complete', 'reduced'],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
}) +
chainer.testing.product({
'shape': [(1, 1), (6, 6)],
'in_dtypes': ['float32', 'float64'],
'mode': ['reduced', 'complete']
}) + chainer.testing.product({
'shape': [(3, 2)],
'in_dtypes': ['float32', 'float64'],
'mode': ['reduced']
})
))
class TestQR(NumpyLinalgOpTest):
# For input with shape (N, 0) strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.in_dtypes)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (numpy.lib.NumpyVersion(numpy.__version__) < '1.16.0'
and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.qr(a, mode=self.mode)
if self.mode == 'r':
r = out
return r,
if self.mode == 'raw':
if a.dtype.char == 'f':
return out[0].astype(xp.float64), out[1].astype(xp.float64)
return out
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(1, 1), (2, 3), (3, 2), (6, 6)],
'in_dtypes': ['float16'],
'mode': ['r', 'raw', 'reduced', 'complete']
})
))
class TestQRFailing(NumpyLinalgOpTest):
forward_accept_errors = (TypeError,
chainerx.DtypeError)
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.in_dtypes)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.qr(a, mode=self.mode)
return out
|
py
|
1a5bd42aa268d5c3547b33556d41101444dd009f
|
##parameters=transaction_info
##title=Undo transactions
##
from Products.CMFCore.utils import getUtilityByInterfaceName
from Products.CMFDefault.utils import Message as _
utool = getUtilityByInterfaceName('Products.CMFCore.interfaces.IUndoTool')
utool.undo(context, transaction_info)
context.setStatus(True, _(u'Transaction(s) undone.'))
context.setRedirect(context, 'object/folderContents')
|
py
|
1a5bd4d6a9d39f1632a982a3bbd5bf3a9f748f8f
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ready_shoud_28438.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py
|
1a5bd6b3433aada118ea0221fac0a0b42a7ff7a7
|
import pandas as pd
import numpy as np
from scipy.spatial import cKDTree
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import shortest_path
# from sklearn.preprocessing import OrdinalEncoder
from ..tools.Markov import DiscreteTimeMarkovChain
from ..prediction.fate import _fate
from ..vectorfield import vector_field_function
from ..tools.utils import fetch_states
from ..tools.clustering import neighbors
from .utils import (
remove_redundant_points_trajectory,
arclength_sampling,
integrate_streamline,
)
import anndata
from typing import List, Union
from ..dynamo_logger import LoggerManager, main_info, main_warning
def classify_clone_cell_type(adata, clone, clone_column, cell_type_column, cell_type_to_excluded):
"""find the dominant cell type of all the cells that are from the same clone"""
cell_ids = np.where(adata.obs[clone_column] == clone)[0]
to_check = adata[cell_ids].obs[cell_type_column].value_counts().index.isin(list(cell_type_to_excluded))
cell_type = np.where(to_check)[0]
return cell_type
def prune_transition(
adata: anndata.AnnData,
group: str,
basis: str = "umap",
n_neighbors: int = 30,
neighbor_key: Union[str, None] = None,
graph_mat: np.ndarray = None,
state_graph_method: str = "vf",
):
"""This function prune a cell group transiton graph based on cell similarity graph (kNN graph).
The pruning algorithm is as following: assuming the vf based cell-type transition graph is `m` (cell type x cell
type matrix); the `M` matrix as the cell to cell-type assignment matrix (row is the cell and column the cell type;
if i-th cell is j-th cell type, the `M_{ij}` is 1). the knn graph between cells based on the umap embedding (or
others) is `n` (number of cells x number of cells matrix). We compute `t(M) n M` to get a cell-type by cell type
connectivity graph M' (basically this propagates the cell type to cell matrix to the cell-cell knn graph and then
lump the transition down to cell-type). Lastly, `g * M'` will give pruned graph, where `g` is the vector field
based cell-type transition graph. As you can see the resultant graph considers both vector field based connection
and the similarity relationship of cells in expression space.
Parameters
----------
adata:
AnnData object.
group:
Cell graph that will be used to build transition graph and lineage tree.
basis:
The basis that will be used to build the k-nearest neighbor graph when neighbor_key is not set.
n_neighbors:
The number of neighbors that will be used to build the k-nn graph, passed to `dyn.tl.neighbors` function. Not
used when neighbor_key provided.
neighbor_key:
The nearest neighbor graph key in `adata.obsp`. This nearest neighbor graph will be used to build a
gene-expression space based cell-type level connectivity graph.
state_graph_method:
Method that will be used to build the initial state graph.
Returns
-------
M:
The pruned cell state transition graph.
"""
logger = LoggerManager.gen_logger("dynamo-prune_transition")
logger.log_time()
from patsy import dmatrix
if group not in adata.obs.columns:
raise Exception(f"group has to be in adata.obs.columns, but you have {group}. ")
data = adata.obs
groups = data[group]
uniq_grps, data[group] = groups.unique(), list(groups)
sorted_grps = np.sort(uniq_grps)
if graph_mat is not None:
if graph_mat.shape != (len(uniq_grps), len(uniq_grps)):
raise Exception(f"the input graph_mat has to have the same shape as ({len(uniq_grps), len(uniq_grps)})")
group_graph = graph_mat
else:
if group + "_graph" not in adata.uns_keys():
main_info(f"build state graph `g` via {state_graph_method}")
state_graph(adata, group=group, basis=basis, method=state_graph_method) # the markov method
group_graph = adata.uns[group + "_graph"]["group_graph"]
if neighbor_key is None:
main_info(f"build knn graph with {n_neighbors} neighbors in {basis} basis.")
neighbors(adata, basis=basis, result_prefix=basis + "_knn", n_neighbors=n_neighbors)
transition_matrix = adata.obsp[basis + "_knn_distances"]
else:
main_info(f"retrieve knn graph via {neighbor_key} ley.")
transition_matrix = adata.obsp[neighbor_key]
main_info("build cell to cell graph assignment matrix via `dmatrix` from `pasty`")
cell_membership = csr_matrix(dmatrix(f"~{group}+0", data=data))
main_info("build lumped cell group to cell group connectivity matrix via `t(M) n M`.")
membership_matrix = cell_membership.T.dot(transition_matrix).dot(cell_membership)
main_info("prune vf based cell graph transition graph via g' = `M' g")
# note that dmatrix will first sort the unique group names and then construct the design matrix, so this is needed.
membership_df = pd.DataFrame(membership_matrix.A > 0, index=sorted_grps, columns=sorted_grps)
M = (group_graph * (membership_df.loc[uniq_grps, uniq_grps].values > 0) > 0).astype(float)
logger.finish_progress(progress_name="prune_transition")
return M
def state_graph(
adata,
group,
method="vf",
transition_mat_key="pearson_transition_matrix",
approx=False,
eignum=5,
basis="umap",
layer=None,
arc_sample=False,
sample_num=100,
prune_graph=False,
**kwargs,
):
"""Estimate the transition probability between cell types using method of vector field integrations or Markov chain
lumping.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that will be used to calculate a cell type (group) transition graph.
group: `str`
The attribute to group cells (column names in the adata.obs).
method: `str` (default: 'vf')
The method that will be used to construct lumped cell state graph. Must be one of {`vf` or `markov`}
transition_mat_key: `str` (default: 'pearson_transition_matrix')
The key that corresponds to the transition graph used in the KernelMarkovChain class for lumping.
approx: `bool` (default: False)
Whether to use streamplot to get the integration lines from each cell.
eignum: `int` (default: 5)
The number of eigen-vectors when performing the eigen-decomposition to obtain the stationary
distribution. 5 should be sufficient as the stationary distribution will be the first eigenvector. This also
accelerates the calculation.
basis: `str` or None (default: `umap`)
The embedding data to use for predicting cell fate. If `basis` is either `umap` or `pca`, the reconstructed
trajectory will be projected back to high dimensional space via the `inverse_transform` function.
layer: `str` or None (default: `None`)
Which layer of the data will be used for predicting cell fate with the reconstructed vector field function.
The layer once provided, will override the `basis` argument and then predicting cell fate in high
dimensional space.
sample_num: `int` (default: 100)
The number of cells to sample in each group that will be used for calculating the transitoin graph between
cell groups. This is required for facilitating the calculation.
prune_graph: `bool` (default: `False`)
Whether to prune the transition graph based on cell similarities in `basis` bases.
kwargs:
Additional parameters that will be passed to `prune_transition` function.
Returns
-------
An updated adata object that is added with the `group + '_graph'` key, including the transition graph
and the average transition time.
"""
logger = LoggerManager.get_main_logger()
timer_logger = LoggerManager.get_temp_timer_logger()
timer_logger.log_time()
logger.info("Estimating the transition probability between cell types...")
groups, uniq_grp = adata.obs[group], list(adata.obs[group].unique())
if method.lower() in ["naive", "markov"]:
logger.info("Applying kernel Markov chain")
T = adata.obsp[transition_mat_key]
if np.isclose(T.sum(1), 1).sum() > np.isclose(T.sum(0), 1).sum():
logger.info("KernelMarkovChain assuming column sum to be 1. Transposing transition matrix")
T = T.T
if sp.issparse(T):
T = T.A
dtmc = DiscreteTimeMarkovChain(P=T, eignum=eignum, check_norm=False)
# ord_enc = OrdinalEncoder()
# labels = ord_enc.fit_transform(adata.obs[[group]])
# labels = labels.flatten().astype(int)
labels = np.zeros(len(groups), dtype=int)
for i, grp in enumerate(uniq_grp):
labels[groups == grp] = i
grp_graph = dtmc.lump(labels).T if method == "markov" else dtmc.naive_lump(T.A, labels).T
label_len, grp_avg_time = len(np.unique(labels)), None
grp_graph = grp_graph[:label_len, :label_len]
elif method == "vf":
logger.info("Applying vector field")
grp_graph = np.zeros((len(uniq_grp), len(uniq_grp)))
grp_avg_time = np.zeros((len(uniq_grp), len(uniq_grp)))
all_X, VecFld, t_end, _ = fetch_states(
adata,
init_states=None,
init_cells=adata.obs_names,
basis=basis,
layer=layer,
average=False,
t_end=None,
)
logger.report_progress(percent=0, progress_name="KDTree parameter preparation computation")
logger.log_time()
kdt = cKDTree(all_X, leafsize=30)
logger.finish_progress(progress_name="KDTree computation")
vf_dict = adata.uns["VecFld_" + basis]
for i, cur_grp in enumerate(LoggerManager.progress_logger(uniq_grp, progress_name="iterate groups")):
init_cells = adata.obs_names[groups == cur_grp]
if sample_num is not None:
cell_num = np.min((sample_num, len(init_cells)))
ind = np.random.choice(len(init_cells), cell_num, replace=False)
init_cells = init_cells[ind]
init_states, _, _, _ = fetch_states(
adata,
init_states=None,
init_cells=init_cells,
basis=basis,
layer=layer,
average=False,
t_end=None,
)
if approx and basis != "pca" and layer is None:
X_grid, V_grid = (
vf_dict["grid"],
vf_dict["grid_V"],
)
N = int(np.sqrt(V_grid.shape[0]))
X_grid, V_grid = (
np.array([np.unique(X_grid[:, 0]), np.unique(X_grid[:, 1])]),
np.array(
[
V_grid[:, 0].reshape((N, N)),
V_grid[:, 1].reshape((N, N)),
]
),
)
t, X = integrate_streamline(
X_grid[0],
X_grid[1],
V_grid[0],
V_grid[1],
integration_direction="forward",
init_states=init_states,
interpolation_num=250,
average=False,
)
else:
t, X = _fate(
lambda x: vector_field_function(x=x, vf_dict=vf_dict),
init_states,
t_end=t_end,
step_size=None,
direction="forward",
interpolation_num=250,
average=False,
)
# t, X = np.hstack(t), np.hstack(X).T
len_per_cell = None if type(t) == list else len(t)
cell_num = len(t) if type(X) == list else int(X.shape[0] / len(t))
knn_dist_, knn_ind_ = kdt.query(init_states, k=2)
dist_min, dist_threshold = (
np.max([knn_dist_[:, 1].min(), 1e-3]),
np.mean(knn_dist_[:, 1]),
)
for j in np.arange(cell_num):
if len_per_cell is not None:
cur_ind = np.arange(j * len_per_cell, (j + 1) * len_per_cell)
Y, arclength, T_bool = remove_redundant_points_trajectory(
X[cur_ind], tol=dist_min, output_discard=True
)
if arc_sample:
Y, arclength, T = arclength_sampling(Y, arclength / 1000, t=t[~T_bool])
else:
T = t[~T_bool]
else:
Y, T = X[j].T, t[j] if type(t[j]) == np.ndarray else np.array(t[j])
knn_dist, knn_ind = kdt.query(Y, k=1)
# set up a dataframe with group and time
pass_t = np.where(knn_dist < dist_threshold)[0]
pass_df = pd.DataFrame({"group": adata[knn_ind[pass_t]].obs[group], "t": T[pass_t]})
# only consider trajectory that pass at least 10 cells in group as confident pass
pass_group_counter = pass_df.group.value_counts()
pass_groups, confident_pass_check = (
pass_group_counter.index.tolist(),
np.where(pass_group_counter > 10)[0],
)
# assign the transition matrix and average transition time
if len(confident_pass_check) > 0:
ind_other_cell_type = [uniq_grp.index(k) for k in np.array(pass_groups)[confident_pass_check]]
grp_graph[i, ind_other_cell_type] += 1
grp_avg_time[i, ind_other_cell_type] += (
pass_df.groupby("group")["t"].mean()[confident_pass_check].values
)
# average across cells
grp_avg_time[i, :] /= grp_graph[i, :]
grp_graph[i, :] /= cell_num
else:
raise NotImplementedError("Only vector field (vf) or Markov chain (markov) based lumping are supported.")
if prune_graph:
grp_graph = prune_transition(
adata,
group,
basis,
graph_mat=grp_graph,
**kwargs,
)
adata.uns[group + "_graph"] = {"group_graph": grp_graph, "group_avg_time": grp_avg_time, "group_names": uniq_grp}
timer_logger.finish_progress(progress_name="State graph estimation")
return adata
def tree_model(
adata: anndata.AnnData,
group: str,
progenitor: str,
terminators: List[str],
basis: str = "umap",
n_neighbors: int = 30,
neighbor_key: Union[str, None] = None,
graph_mat: np.ndarray = None,
state_graph_method: str = "vf",
prune_graph: bool = True,
row_norm: bool = True,
) -> pd.DataFrame:
"""This function learns a tree model of cell states (types).
It is based on the shortest path from the source to target cells of the pruned vector field based cell-type
transition graph. The pruning was done by restricting cell state transition that are only between cell states that
are nearby in gene expression space (often low gene expression space).
Parameters
----------
adata:
AnnData object.
group:
Cell graph that will be used to build transition graph and lineage tree.
progenitor:
The source cell type name of the lineage tree.
terminators:
The terminal cell type names of the lineage tree.
basis:
The basis that will be used to build the k-nearest neighbor graph when neighbor_key is not set.
n_neighbors:
The number of neighbors that will be used to build the k-nn graph, passed to `dyn.tl.neighbors` function. Not
used when neighbor_key provided.
neighbor_key:
The nearest neighbor graph key in `adata.obsp`. This nearest neighbor graph will be used to build a
gene-expression space based cell-type level connectivity graph.
state_graph_method:
Method that will be used to build the initial state graph.
prune_graph: `bool` (default: `True`)
Whether to prune the transition graph based on cell similarities in `basis` bases first before learning tree
model.
row_norm: `bool` (default: `True`)
Whether to normalize each row so that each row sum up to be 1. Note that row, columns in transition matrix
correspond to source and targets in dynamo by default.
Returns
-------
res:
The final tree model of cell groups. See following example on how to visualize the tree via dynamo.
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.pancreatic_endocrinogenesis()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.cell_velocities(adata)
>>> dyn.vf.VectorField(adata, basis='umap', pot_curl_div=False)
>>> dyn.pd.state_graph(adata, group='clusters', basis='umap')
>>> res = dyn.pd.tree_model(adata, group='clusters', basis='umap')
>>> # in the following we first copy the state_graph result to a new key and then replace the `group_graph` key of
>>> # the state_graph result and visualize tree model via dynamo.
>>> adata.obs['clusters2'] = adata.obs['clusters'].copy()
>>> adata.uns['clusters2_graph'] = adata.uns['clusters_graph'].copy()
>>> adata.uns['clusters2_graph']['group_graph'] = res
>>> dyn.pl.state_graph(adata, group='clusters2', keep_only_one_direction=False, transition_threshold=None,
>>> color='clusters2', basis='umap', show_legend='on data')
"""
logger = LoggerManager.gen_logger("dynamo-tree_model")
logger.log_time()
data = adata.obs
groups = data[group]
uniq_grps, data[group] = groups.unique(), list(groups)
progenitor = progenitor[0] if type(progenitor) is not str else progenitor
if progenitor not in uniq_grps:
raise Exception(f"progenitor has to be in adata.obs[{group}], but you have {progenitor}. ")
else:
progenitor = list(uniq_grps).index(progenitor)
if not set(terminators) <= set(uniq_grps):
raise Exception(f"all terminators have to be in adata.obs[{group}], but you have {terminators}.")
else:
terminators = [list(uniq_grps).index(i) for i in terminators]
if prune_graph:
M = prune_transition(
adata,
group,
basis,
n_neighbors,
neighbor_key,
graph_mat,
state_graph_method,
)
else:
M = graph_mat
if np.any(M < 0):
main_warning("the transition graph have negative values.")
M[M < 0] = 0
M += 1e-5 - 1e-5 # ensure no -0 values existed
if row_norm:
M /= M.sum(1)
M[M > 0] = 1 - M[M > 0] # because it is shortest path, so we need to use 1 - M[M > 0]
D, Pr = shortest_path(np.copy(M, order="c"), directed=False, method="FW", return_predecessors=True)
res = np.zeros(M.shape)
# this builds the tree based on each shortest path connecting the source to each target cell type
main_info("builds the tree model based on each shortest path connecting the source to each target cell type in g'.")
for j in terminators:
p = j
while Pr[progenitor, p] != -9999:
res[Pr[progenitor, p], p] = 1
p = Pr[progenitor, p]
res = pd.DataFrame(res, index=uniq_grps, columns=uniq_grps)
logger.finish_progress(progress_name="tree_model building")
return res
|
py
|
1a5bd6d0d10d37bfb6544d40aefb838e367b90e0
|
"""
Create / compile projects for .NET version of rhino3dm
"""
import os
import sys
import fileinput
import shutil
def system(cmd):
# copied from setup.py
rv = os.system(cmd)
rc = rv if os.name == 'nt' else os.WEXITSTATUS(rv)
if (rc != 0):
raise RuntimeError('The command "{}" exited with {}'.format(cmd, rc))
def methodgen(dotnetcore):
# set up args to pass to methodgen application
dir_cpp = os.getcwd() + '/librhino3dm_native'
dir_cs = os.getcwd() + '/dotnet'
path_replace = '../lib/opennurbs'
args = ' "{0}" "{1}" "{2}"'.format(dir_cpp, dir_cs, path_replace)
if dotnetcore:
# staging and compilation occurs in the build directory
build_dir = "build/methodgen"
if not os.path.exists(build_dir):
if(not os.path.exists("build")):
os.mkdir("build")
os.mkdir(build_dir)
src_files = os.listdir('./methodgen')
for file_name in src_files:
if file_name.endswith('.cs'):
full_path = os.path.join('./methodgen', file_name)
if os.path.isfile(full_path):
shutil.copy(full_path, build_dir)
if file_name.endswith('.core'):
full_path = os.path.join('./methodgen', file_name)
if os.path.isfile(full_path):
shutil.copy(full_path, build_dir + '/methodgen.csproj')
# compile methodgen
system('dotnet build ' + './' + build_dir)
# execute methodgen
system('dotnet run --project ' + build_dir + '/methodgen.csproj ' + args)
else:
# compile methodgen
# system('msbuild ./methodgen')
# execute methodgen for Rhino3dm
app = os.getcwd() + '/methodgen/bin/Debug/methodgen.exe'
if os.name == 'nt': # windows build
system(app + args)
else:
system('mono ' + app + args)
def create_cpp_project(bitness, compile):
# staging and compilation occurs in the build directory
build_dir = "build/librhino3dm_native_{0}".format(bitness)
if not os.path.exists(build_dir):
if(not os.path.exists("build")):
os.mkdir("build")
os.mkdir(build_dir)
os.chdir(build_dir)
if os.name == 'nt': # windows build
arch = ""
if bitness == 64:
arch = "-A x64"
else:
arch = "-A Win32"
# args = '-G "Visual Studio 16 2019" -A -A Win64'.format(arch)
args = '-G "Visual Studio 16 2019" {0}'.format(arch)
system('cmake ' + args + ' ../../librhino3dm_native')
if bitness == 64:
for line in fileinput.input("librhino3dm_native.vcxproj", inplace=1):
print(line.replace("WIN32;", "WIN64;"))
if compile:
system("cmake --build . --config Release --target librhino3dm_native")
else:
system("cmake ../../librhino3dm_native")
if compile:
system("make")
os.chdir("../..")
def compilerhino3dm(dotnetcore):
if dotnetcore:
conf = '/p:Configuration=Release;OutDir="../build/dotnet"'
system('dotnet build ./dotnet/Rhino3dm.core.csproj {}'.format(conf))
else:
conf = '/p:Configuration=Release;OutDir="../build/dotnet"'
system('msbuild ./dotnet/Rhino3dm.csproj {}'.format(conf))
if __name__ == '__main__':
dotnetcore = False
if len(sys.argv) > 1 and sys.argv[1] == '--core':
dotnetcore = True
if sys.platform.startswith('linux'):
dotnetcore = True
# make the script always execute from it's directory
scriptpath = os.path.realpath(__file__)
os.chdir(os.path.dirname(scriptpath))
# always compile and run methodgen first to make sure the pinvoke
# definitions are in place
methodgen(dotnetcore)
# only create 32 bit compile on windows
# if os.name == 'nt':
# create_cpp_project(32, True)
create_cpp_project(64, True)
# compile Rhino3dm .NET project
compilerhino3dm(dotnetcore)
|
py
|
1a5bd79f4d0b5a1e67b27a35ea2183ad872c3d7e
|
"""
Helpers for managing Docker network settings.
"""
from typing import Callable, Optional, Union
import click
import docker
from docker.models.networks import Network
from ._common import docker_client
def _validate_docker_network(
ctx: click.core.Context,
param: Union[click.core.Option, click.core.Parameter],
value: Optional[Union[int, bool, str]],
) -> Network:
"""
Validate that a given network name is an existing Docker network name.
"""
# We "use" variables to satisfy linting tools.
for _ in (ctx, param):
pass
client = docker_client()
try:
return client.networks.get(network_id=value)
except docker.errors.NotFound:
message = (
'No such Docker network with the name "{value}".\n'
'Docker networks are:\n{networks}'
).format(
value=value,
networks='\n'.join(
[network.name for network in client.networks.list()],
),
)
raise click.BadParameter(message=message)
def docker_network_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for choosing a Docker network.
"""
click_option_function = click.option(
'--network',
type=str,
default='bridge',
help=(
'The Docker network containers will be connected to.'
'It may not be possible to SSH to containers on a custom network '
'on macOS. '
),
callback=_validate_docker_network,
) # type: Callable[[Callable[..., None]], Callable[..., None]]
function = click_option_function(command) # type: Callable[..., None]
return function
|
py
|
1a5bd93aebccadb6d2ad178a1166da7625c666da
|
# coding: utf-8
import os
import pytest
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import Trainable, TuneError
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.registry import _global_registry, TRAINABLE_CLASS
from ray.tune.result import TRAINING_ITERATION
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial, Checkpoint
from ray.tune.resources import Resources
from ray.cluster_utils import Cluster
from ray.tune.utils.placement_groups import PlacementGroupFactory
class TrialExecutorInsufficientResourcesTest(unittest.TestCase):
def setUp(self):
os.environ["TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S"] = "1"
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 4,
"num_gpus": 2,
})
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
# no autoscaler case, resource is not sufficient. Raise error.
def testRaiseErrorNoAutoscaler(self):
def train(config):
pass
with pytest.raises(TuneError) as cm:
tune.run(
train,
resources_per_trial={
"cpu": 5, # more than what the cluster can offer.
"gpu": 3,
})
msg = ("You asked for 5.0 cpu and 3.0 gpu per trial, "
"but the cluster only has 4.0 cpu and 2.0 gpu. "
"Stop the tuning job and "
"adjust the resources requested per trial "
"(possibly via `resources_per_trial` "
"or via `num_workers` for rllib) "
"and/or add more resources to your Ray runtime.")
assert str(cm._excinfo[1]) == msg
class RayTrialExecutorTest(unittest.TestCase):
def setUp(self):
# Wait up to five seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
os.environ["TUNE_TRIAL_RESULT_WAIT_TIME_S"] = "99999"
self.trial_executor = RayTrialExecutor(queue_trials=False)
ray.init(num_cpus=2, ignore_reinit_error=True)
_register_all() # Needed for flaky tests
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testStartStop(self):
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
running = self.trial_executor.get_running_trials()
self.assertEqual(1, len(running))
self.trial_executor.stop_trial(trial)
def testAsyncSave(self):
"""Tests that saved checkpoint value not immediately set."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
checkpoint = self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.assertEqual(checkpoint, trial.saving_to)
self.assertEqual(trial.checkpoint.value, None)
self.process_trial_save(trial)
self.assertEqual(checkpoint, trial.checkpoint)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testSaveRestore(self):
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.process_trial_save(trial)
self.trial_executor.restore(trial)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testPauseResume(self):
"""Tests that pausing works for trials in flight."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testSavePauseResumeErrorRestore(self):
"""Tests that pause checkpoint does not replace restore checkpoint."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
# Save
checkpoint = self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.assertEqual(Trial.RUNNING, trial.status)
self.assertEqual(checkpoint.storage, Checkpoint.PERSISTENT)
# Process save result (simulates trial runner)
self.process_trial_save(trial)
# Train
self.trial_executor.continue_training(trial)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
# Pause
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.assertEqual(trial.checkpoint.storage, Checkpoint.MEMORY)
# Resume
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
# Error
trial.set_status(Trial.ERROR)
# Restore
self.trial_executor.restore(trial)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testStartFailure(self):
_global_registry.register(TRAINABLE_CLASS, "asdf", None)
trial = Trial("asdf", resources=Resources(1, 0))
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.ERROR, trial.status)
def testPauseResume2(self):
"""Tests that pausing works for trials being processed."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.fetch_result(trial)
checkpoint = self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.start_trial(trial, checkpoint)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def _testPauseUnpause(self, result_buffer_length):
"""Tests that unpausing works for trials being processed."""
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = f"{result_buffer_length}"
os.environ["TUNE_RESULT_BUFFER_MIN_TIME_S"] = "1"
# Need a new trial executor so the ENV vars are parsed again
self.trial_executor = RayTrialExecutor(queue_trials=False)
base = max(result_buffer_length, 1)
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
self.assertEqual(trial.last_result.get(TRAINING_ITERATION), base)
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.unpause_trial(trial)
self.assertEqual(Trial.PENDING, trial.status)
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)[-1]
self.assertEqual(trial.last_result.get(TRAINING_ITERATION), base * 2)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testPauseUnpauseNoBuffer(self):
self._testPauseUnpause(0)
def testPauseUnpauseTrivialBuffer(self):
self._testPauseUnpause(1)
def testPauseUnpauseActualBuffer(self):
self._testPauseUnpause(8)
def testNoResetTrial(self):
"""Tests that reset handles NotImplemented properly."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {}, "modified_mock")
self.assertEqual(exists, False)
self.assertEqual(Trial.RUNNING, trial.status)
def testResetTrial(self):
"""Tests that reset works as expected."""
class B(Trainable):
def step(self):
return dict(timesteps_this_iter=1, done=True)
def reset_config(self, config):
self.config = config
return True
trials = self.generate_trials({
"run": B,
"config": {
"foo": 0
},
}, "grid_search")
trial = trials[0]
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {"hi": 1},
"modified_mock")
self.assertEqual(exists, True)
self.assertEqual(trial.config.get("hi"), 1)
self.assertEqual(trial.experiment_tag, "modified_mock")
self.assertEqual(Trial.RUNNING, trial.status)
@staticmethod
def generate_trials(spec, name):
suggester = BasicVariantGenerator()
suggester.add_configurations({name: spec})
trials = []
while not suggester.is_finished():
trial = suggester.next_trial()
if trial:
trials.append(trial)
else:
break
return trials
def process_trial_save(self, trial):
"""Simulates trial runner save."""
checkpoint = trial.saving_to
checkpoint_value = self.trial_executor.fetch_result(trial)[-1]
checkpoint.value = checkpoint_value
trial.on_checkpoint(checkpoint)
class RayExecutorQueueTest(unittest.TestCase):
def setUp(self):
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 1,
"_system_config": {
"num_heartbeats_timeout": 10
}
})
self.trial_executor = RayTrialExecutor(
queue_trials=True, refresh_period=0)
# Pytest doesn't play nicely with imports
_register_all()
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
_register_all() # re-register the evicted objects
def testQueueTrial(self):
"""Tests that reset handles NotImplemented properly."""
def create_trial(cpu, gpu=0):
return Trial("__fake", resources=Resources(cpu=cpu, gpu=gpu))
cpu_only = create_trial(1, 0)
self.assertTrue(self.trial_executor.has_resources_for_trial(cpu_only))
self.trial_executor.start_trial(cpu_only)
gpu_only = create_trial(0, 1)
self.assertTrue(self.trial_executor.has_resources_for_trial(gpu_only))
def testHeadBlocking(self):
# Once resource requests are deprecated, remove this test
os.environ["TUNE_PLACEMENT_GROUP_AUTO_DISABLED"] = "1"
def create_trial(cpu, gpu=0):
return Trial("__fake", resources=Resources(cpu=cpu, gpu=gpu))
gpu_trial = create_trial(1, 1)
self.assertTrue(self.trial_executor.has_resources_for_trial(gpu_trial))
self.trial_executor.start_trial(gpu_trial)
# TODO(rliaw): This behavior is probably undesirable, but right now
# trials with different resource requirements is not often used.
cpu_only_trial = create_trial(1, 0)
self.assertFalse(
self.trial_executor.has_resources_for_trial(cpu_only_trial))
self.cluster.add_node(num_cpus=1, num_gpus=1)
self.cluster.wait_for_nodes()
self.assertTrue(
self.trial_executor.has_resources_for_trial(cpu_only_trial))
self.trial_executor.start_trial(cpu_only_trial)
cpu_only_trial2 = create_trial(1, 0)
self.assertTrue(
self.trial_executor.has_resources_for_trial(cpu_only_trial2))
self.trial_executor.start_trial(cpu_only_trial2)
cpu_only_trial3 = create_trial(1, 0)
self.assertFalse(
self.trial_executor.has_resources_for_trial(cpu_only_trial3))
class RayExecutorPlacementGroupTest(unittest.TestCase):
def setUp(self):
self.head_cpus = 8
self.head_gpus = 4
self.head_custom = 16
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": self.head_cpus,
"num_gpus": self.head_gpus,
"resources": {
"custom": self.head_custom
},
"_system_config": {
"num_heartbeats_timeout": 10
}
})
# Pytest doesn't play nicely with imports
_register_all()
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
_register_all() # re-register the evicted objects
def testResourcesAvailableNoPlacementGroup(self):
def train(config):
tune.report(metric=0, resources=ray.available_resources())
out = tune.run(
train,
resources_per_trial={
"cpu": 1,
"gpu": 1,
"custom_resources": {
"custom": 3
},
"extra_cpu": 3,
"extra_gpu": 1,
"extra_custom_resources": {
"custom": 4
},
})
# Only `cpu`, `gpu`, and `custom_resources` will be "really" reserved,
# the extra_* will just be internally reserved by Tune.
self.assertDictEqual({
key: val
for key, val in out.trials[0].last_result["resources"].items()
if key in ["CPU", "GPU", "custom"]
}, {
"CPU": self.head_cpus - 1.0,
"GPU": self.head_gpus - 1.0,
"custom": self.head_custom - 3.0
})
def testResourcesAvailableWithPlacementGroup(self):
def train(config):
tune.report(metric=0, resources=ray.available_resources())
head_bundle = {"CPU": 1, "GPU": 0, "custom": 4}
child_bundle = {"CPU": 2, "GPU": 1, "custom": 3}
placement_group_factory = PlacementGroupFactory(
[head_bundle, child_bundle, child_bundle])
out = tune.run(train, resources_per_trial=placement_group_factory)
available = {
key: val
for key, val in out.trials[0].last_result["resources"].items()
if key in ["CPU", "GPU", "custom"]
}
if not available:
self.skipTest("Warning: Ray reported no available resources, "
"but this is an error on the Ray core side. "
"Skipping this test for now.")
self.assertDictEqual(
available, {
"CPU": self.head_cpus - 5.0,
"GPU": self.head_gpus - 2.0,
"custom": self.head_custom - 10.0
})
def testPlacementGroupFactoryEquality(self):
"""
Test that two different placement group factory objects are considered
equal and evaluate to the same hash.
"""
from collections import Counter
pgf_1 = PlacementGroupFactory([{
"CPU": 2,
"GPU": 4,
"custom": 7
}, {
"GPU": 2,
"custom": 1,
"CPU": 3
}], "PACK", "no_name", None)
pgf_2 = PlacementGroupFactory(
[{
"custom": 7,
"GPU": 4,
"CPU": 2,
}, {
"custom": 1,
"GPU": 2,
"CPU": 3
}],
strategy="PACK",
name="no_name",
lifetime=None)
self.assertEqual(pgf_1, pgf_2)
# Hash testing
counter = Counter()
counter[pgf_1] += 1
counter[pgf_2] += 1
self.assertEqual(counter[pgf_1], 2)
self.assertEqual(counter[pgf_2], 2)
class LocalModeExecutorTest(RayTrialExecutorTest):
def setUp(self):
ray.init(local_mode=True)
self.trial_executor = RayTrialExecutor(queue_trials=False)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
py
|
1a5bd93c1fe1ca469dd589108705d002311329d5
|
from collections import namedtuple
from contextlib import contextmanager
import datetime
import re
from py._code.code import TerminalRepr, ReprFileLocation
import pytest
from pytestqt.qt_compat import qt_api
from pytestqt.utils import get_marker
class QtLoggingPlugin:
"""
Plugin responsible for installing a QtMessageHandler before each
test and augment reporting if the test failed with the messages captured.
"""
LOG_FAIL_OPTIONS = ["NO", "CRITICAL", "WARNING", "DEBUG", "INFO"]
def __init__(self, config):
self.config = config
def pytest_runtest_setup(self, item):
if get_marker(item, "no_qt_log"):
return
m = get_marker(item, "qt_log_ignore")
if m:
if not set(m.kwargs).issubset({"extend"}):
raise ValueError(
"Invalid keyword arguments in {!r} for "
"qt_log_ignore mark.".format(m.kwargs)
)
if m.kwargs.get("extend", True):
config_regexes = self.config.getini("qt_log_ignore")
ignore_regexes = config_regexes + list(m.args)
else:
ignore_regexes = m.args
else:
ignore_regexes = self.config.getini("qt_log_ignore")
item.qt_log_capture = _QtMessageCapture(ignore_regexes)
item.qt_log_capture._start()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
"""Add captured Qt messages to test item report if the call failed."""
outcome = yield
if not hasattr(item, "qt_log_capture"):
return
if call.when == "call":
report = outcome.get_result()
m = get_marker(item, "qt_log_level_fail")
if m:
log_fail_level = m.args[0]
else:
log_fail_level = self.config.getini("qt_log_level_fail")
assert log_fail_level in QtLoggingPlugin.LOG_FAIL_OPTIONS
# make test fail if any records were captured which match
# log_fail_level
if report.outcome != "failed":
for rec in item.qt_log_capture.records:
is_modeltest_error = (
rec.context is not None
and rec.context.category == "qt.modeltest"
and rec.matches_level("WARNING")
)
if (
rec.matches_level(log_fail_level) and not rec.ignored
) or is_modeltest_error:
report.outcome = "failed"
if report.longrepr is None:
report.longrepr = _QtLogLevelErrorRepr(
item, log_fail_level, is_modeltest_error
)
break
# if test has failed, add recorded messages to its terminal
# representation
if not report.passed:
long_repr = getattr(report, "longrepr", None)
if hasattr(long_repr, "addsection"): # pragma: no cover
log_format = self.config.getoption("qt_log_format")
context_format = None
if log_format is None:
context_format = "{rec.context.file}:{rec.context.function}:{rec.context.line}:\n"
log_format = " {rec.type_name}: {rec.message}"
lines = []
for rec in item.qt_log_capture.records:
suffix = " (IGNORED)" if rec.ignored else ""
if (
rec.context is not None
and (
rec.context.file is not None
or rec.context.function is not None
or rec.context.line != 0
)
and context_format is not None
):
context_line = context_format.format(rec=rec)
lines.append(context_line)
else:
log_format = log_format.lstrip()
line = log_format.format(rec=rec) + suffix
lines.append(line)
if lines:
long_repr.addsection("Captured Qt messages", "\n".join(lines))
item.qt_log_capture._stop()
del item.qt_log_capture
class _QtMessageCapture:
"""
Captures Qt messages when its `handle` method is installed using
qInstallMessageHandler, and stores them into `records` attribute.
:attr _records: list of Record instances.
:attr _ignore_regexes: list of regexes (as strings) that define if a record
should be ignored.
"""
def __init__(self, ignore_regexes):
self._records = []
self._ignore_regexes = ignore_regexes or []
self._previous_handler = None
def _start(self):
"""
Start receiving messages from Qt.
"""
previous_handler = qt_api.QtCore.qInstallMessageHandler(
self._handle_with_context
)
self._previous_handler = previous_handler
def _stop(self):
"""
Stop receiving messages from Qt, restoring the previously installed
handler.
"""
qt_api.QtCore.qInstallMessageHandler(self._previous_handler)
@contextmanager
def disabled(self):
"""
Context manager that temporarily disables logging capture while
inside it.
"""
self._stop()
try:
yield
finally:
self._start()
_Context = namedtuple("_Context", "file function line category")
def _append_new_record(self, msg_type, message, context):
"""
Creates a new Record instance and stores it.
:param msg_type: Qt message typ
:param message: message string, if bytes it will be converted to str.
:param context: QMessageLogContext object or None
"""
def to_unicode(s):
if isinstance(s, bytes):
s = s.decode("utf-8", "replace")
return s
message = to_unicode(message)
ignored = False
for regex in self._ignore_regexes:
if re.search(regex, message) is not None:
ignored = True
break
if context is not None:
context = self._Context(
to_unicode(context.file),
to_unicode(context.function),
context.line,
to_unicode(context.category),
)
self._records.append(Record(msg_type, message, ignored, context))
def _handle_with_context(self, msg_type, context, message):
"""
Method to be installed using qInstallMessageHandler,
stores each message into the `_records` attribute.
"""
self._append_new_record(msg_type, message, context=context)
@property
def records(self):
"""Access messages captured so far.
:rtype: list of `Record` instances.
"""
return self._records[:]
class Record:
"""Hold information about a message sent by one of Qt log functions.
:ivar str message: message contents.
:ivar Qt.QtMsgType type: enum that identifies message type
:ivar str type_name: ``type`` as string: ``"QtInfoMsg"``, ``"QtDebugMsg"``,
``"QtWarningMsg"`` or ``"QtCriticalMsg"``.
:ivar str log_type_name:
type name similar to the logging package: ``INFO``, ``DEBUG``,
``WARNING`` and ``CRITICAL``.
:ivar datetime.datetime when: when the message was captured
:ivar bool ignored: If this record matches a regex from the "qt_log_ignore"
option.
:ivar context: a namedtuple containing the attributes ``file``,
``function``, ``line``. Can be None if no context is available for the
message.
"""
def __init__(self, msg_type, message, ignored, context):
self._type = msg_type
self._message = message
self._type_name = self._get_msg_type_name(msg_type)
self._log_type_name = self._get_log_type_name(msg_type)
self._when = datetime.datetime.now()
self._ignored = ignored
self._context = context
message = property(lambda self: self._message)
type = property(lambda self: self._type)
type_name = property(lambda self: self._type_name)
log_type_name = property(lambda self: self._log_type_name)
when = property(lambda self: self._when)
ignored = property(lambda self: self._ignored)
context = property(lambda self: self._context)
@classmethod
def _get_msg_type_name(cls, msg_type):
"""
Return a string representation of the given QtMsgType enum
value.
"""
if not getattr(cls, "_type_name_map", None):
cls._type_name_map = {
qt_api.QtCore.QtMsgType.QtDebugMsg: "QtDebugMsg",
qt_api.QtCore.QtMsgType.QtWarningMsg: "QtWarningMsg",
qt_api.QtCore.QtMsgType.QtCriticalMsg: "QtCriticalMsg",
qt_api.QtCore.QtMsgType.QtFatalMsg: "QtFatalMsg",
qt_api.QtCore.QtMsgType.QtInfoMsg: "QtInfoMsg",
}
return cls._type_name_map[msg_type]
@classmethod
def _get_log_type_name(cls, msg_type):
"""
Return a string representation of the given QtMsgType enum
value in the same style used by the builtin logging package.
"""
if not getattr(cls, "_log_type_name_map", None):
cls._log_type_name_map = {
qt_api.QtCore.QtMsgType.QtDebugMsg: "DEBUG",
qt_api.QtCore.QtMsgType.QtWarningMsg: "WARNING",
qt_api.QtCore.QtMsgType.QtCriticalMsg: "CRITICAL",
qt_api.QtCore.QtMsgType.QtFatalMsg: "FATAL",
qt_api.QtCore.QtMsgType.QtInfoMsg: "INFO",
}
return cls._log_type_name_map[msg_type]
def matches_level(self, level):
assert level in QtLoggingPlugin.LOG_FAIL_OPTIONS
if level == "NO":
return False
elif level == "INFO":
return self.log_type_name in ("INFO", "DEBUG", "WARNING", "CRITICAL")
elif level == "DEBUG":
return self.log_type_name in ("DEBUG", "WARNING", "CRITICAL")
elif level == "WARNING":
return self.log_type_name in ("WARNING", "CRITICAL")
elif level == "CRITICAL":
return self.log_type_name in ("CRITICAL",)
else: # pragma: no cover
raise ValueError(f"log_fail_level unknown: {level}")
class _QtLogLevelErrorRepr(TerminalRepr):
"""
TerminalRepr of a test which didn't fail by normal means, but emitted
messages at or above the allowed level.
"""
def __init__(self, item, level, is_modeltest_error):
if is_modeltest_error:
msg = "Qt modeltester errors"
else:
msg = "Failure: Qt messages with level {0} or above emitted"
path, line_index, _ = item.location
self.fileloc = ReprFileLocation(
path, lineno=line_index + 1, message=msg.format(level.upper())
)
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, out):
self.fileloc.toterminal(out)
for name, content, sep in self.sections:
out.sep(sep, name)
out.line(content)
|
py
|
1a5bd9c2520f910f0b37c4def9d55d3a2c203570
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""General utilities for Transport classes."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import time
from paramiko import ProxyCommand
from six.moves import range
from aiida.common.extendeddicts import FixedFieldsAttributeDict
class FileAttribute(FixedFieldsAttributeDict):
"""
A class, resembling a dictionary, to describe the attributes of a file,
that is returned by get_attribute().
Possible keys: st_size, st_uid, st_gid, st_mode, st_atime, st_mtime
"""
_valid_fields = (
'st_size',
'st_uid',
'st_gid',
'st_mode',
'st_atime',
'st_mtime',
)
class _DetachedProxyCommand(ProxyCommand):
"""Modifies paramiko's ProxyCommand by launching the process in a separate process group."""
def __init__(self, command_line): # pylint: disable=super-init-not-called
# Note that the super().__init__ MUST NOT be called here, otherwise
# two subprocesses will be created.
import os
from subprocess import Popen, PIPE
from shlex import split as shlsplit
self.cmd = shlsplit(command_line)
self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=0, preexec_fn=os.setsid)
self.timeout = None
def close(self):
try:
self.process.terminate()
# In case the process doesn't exist anymore
except OSError:
pass
for _ in range(10):
if self.process.poll() is not None:
break
time.sleep(0.2)
else:
try:
self.process.kill()
# In case the process doesn't exist anymore
except OSError:
pass
for _ in range(10):
if self.process.poll() is not None:
break
time.sleep(0.2)
def copy_from_remote_to_remote(transportsource, transportdestination, remotesource, remotedestination, **kwargs):
"""
Copy files or folders from a remote computer to another remote computer.
:param transportsource: transport to be used for the source computer
:param transportdestination: transport to be used for the destination computer
:param str remotesource: path to the remote source directory / file
:param str remotedestination: path to the remote destination directory / file
:param kwargs: keyword parameters passed to the final put,
except for 'dereference' that is passed to the initial get
.. note:: it uses the method transportsource.copy_from_remote_to_remote
"""
transportsource.copy_from_remote_to_remote(transportdestination, remotesource, remotedestination, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.