content
stringlengths 5
1.05M
|
---|
from sklearn import decomposition
from numpy import vstack
from .base import VHRMethod
class PCA(VHRMethod):
methodName = 'PCA'
def __init__(self, **kwargs):
super(PCA, self).__init__(**kwargs)
def apply(self, X):
# TODO: preproc
#X = self.__preprocess(X.T)
bvp = decomposition.PCA(n_components=3).fit_transform(X.T).T
return bvp
def __preprocess(self, X):
R = X[:,0].copy()
G = X[:,1].copy()
B = X[:,2].copy()
# -- BP pre-filtering of RGB channels
minHz = BVPsignal.minHz
maxHz = BVPsignal.maxHz
fs = self.video.frameRate
# -- filter
filteredR = BPfilter(R, minHz, maxHz, fs)
filteredG = BPfilter(G, minHz, maxHz, fs)
filteredB = BPfilter(B, minHz, maxHz, fs)
X = vstack([filteredR, filteredG, filteredB])
return X |
msg1 = 'Digite um número inteiro: '
n = int(input(msg1))
dobro = 2*n
triplo = 3*n
raiz = n**(1/2)
msg2 = 'O dobro de {} vale {}. \n'.format(n, dobro)
msg3 = 'O triplo de {} vale {}. \n'.format(n, triplo)
msg4 = 'A raiz quadrada de {} é igual a {:.2f}.'.format(n, raiz)
msg5 = msg2 + msg3 + msg4
print(msg5)
|
import setuptools
with open('README.md', 'r') as f:
readme = f.read()
setuptools.setup(
name='pdflu',
version='0.1.0',
description='Command line tool to find BibTeX for academic papers using '
'Crossref and arXiv.',
long_description=readme,
author='Steven Dahdah',
url='https://github.com/sdahdah/pdflu',
packages=setuptools.find_packages(),
entry_points={
'console_scripts': ['pdflu=pdflu.pdflu:main'],
},
install_requires=['habanero', 'pdfminer.six', 'termcolor', 'arxiv'],
)
|
import logging
_logger = logging.getLogger()
_logger.setLevel(logging.INFO)
|
'''
Created on May 2014
@author: klc
Base class for importing anaysis result files
'''
from __future__ import division
import os
import logging
import re
import copy
from datetime import datetime
from prettytable import PrettyTable
from uuid import uuid4
import elasticsearchloader.file_utils as fx
from elasticsearchloader.es_utils import ElasticSearchTools
from elasticsearchloader.es_settings import HEADER_FIELDS, REFERENCE_INDEX
from elasticsearchloader.es_settings import YAML_INDEX
from elasticsearch.exceptions import NotFoundError
class AnalysisLoader(object):
'''
Abstract class, needs to be subclassed for
each specific data type.
'''
LOAD_FACTOR = 4000
es_tools = {}
record_attributes = []
__load_id__ = ''
__paired_record_attributes__ = None
#
# ABSTRACT METHODS TO IMPLEMENT
#
def parse_line(self, line):
'''
ABSTRACT METHOD
parses 1 data line.
returns dictionary of parsed information
'''
raise NotImplementedError(
"Please implement this method when inheriting from this class")
def parse_header(self, infile_handle, custom_header=None):
'''
parses entire header.
returns dictionary of parsed header information
'''
header_values = {}
if isinstance(custom_header, dict):
header_values = copy.deepcopy(custom_header)
header_fields = copy.deepcopy(HEADER_FIELDS['common'])
# add subclass specific header fields, if any
if type(self).__name__ in HEADER_FIELDS.keys():
header_fields = dict(
header_fields.items() +
HEADER_FIELDS[type(self).__name__].items()
)
for line in infile_handle:
line = line.strip()
# Stop parsing when the first non-blank, non-comment line is reached
if line and not line.startswith("#"):
break
# Skip blank lines and comment lines that don't start with two '#'s
if not line or not re.match(r'^#{2}\w[^#]', line):
continue
[key, value] = (re.sub(r'^#+', '', line)).split('=', 1)
if key in header_fields.keys():
field = header_fields[key]['field']
header_values[field] = value
if header_fields[key]['transform'] == 'lower':
header_values[field] = value.lower()
elif header_fields[key]['transform'] == 'time':
try:
header_values[field] = datetime.strptime(
value, '%Y-%m-%dT%H:%M:%S'
)
except ValueError:
# Check whether the date is in the format returned
# by 'date' command, i.e. 'Fri Nov 21 11:35:33 2014'
try:
header_values[field] = datetime.strptime(
value, '%a %b %d %I:%M:%S %Y'
)
except ValueError:
header_values[field] = datetime.strptime(
value, '%a %b %d %H:%M:%S %Y'
)
elif header_fields[key]['transform'] == 'number':
header_values[field] = self.convert_to_number(value)
elif header_fields[key]['transform'] == 'remove':
del header_values[field]
else:
header_values[key] = value
query_values = {}
for field in ['sample_id',
'normal_sample_id',
'library_id',
'normal_library_id',
'run_id']:
if field in header_values.keys():
query_values[field] = header_values[field]
yaml_data = {}
project_data = {}
if 'run_id' in query_values.keys():
yaml_records = self.get_reference_data(YAML_INDEX, query_values)
if not yaml_records:
logging.error("Sample not found in Yaml index.")
else:
yaml_data = self.get_yaml_record(
yaml_records, infile_handle.name
)
del query_values['run_id']
project_records = self.get_reference_data(
REFERENCE_INDEX, query_values
)
if not project_records:
logging.error("Sample not found in Sample index.")
else:
project_data = project_records[0]['_source']
if "tumor_type" in project_data.keys():
project_data["tumor_type"] = project_data["tumor_type"].lower()
header_values = dict(
header_values.items() + project_data.items() + yaml_data.items()
)
header_values['file_fullname'] = os.path.abspath(infile_handle.name)
return header_values
def validate_input_file(self, filename):
'''
checks the given filename and makes sure its acceptable to load
returns true if we should parse the file. False if we should not.
'''
raise NotImplementedError(
"Please implement this method when inheriting from this class")
def validate_file_content(self, file_path):
'''
checks whether the input file contains the expected number of columns by
examining the first 50 non-comment lines
'''
if not self.record_attributes:
return True
num_fields = len(self.record_attributes)
lines_read = 0
file_is_valid = True
file_handle = open(file_path, 'r')
for line in file_handle:
if line.startswith('#'):
continue
lines_read += 1
if len(line.strip().split('\t')) != num_fields:
file_is_valid = False
logging.error(
"%s doesn't match the expected input file format.",
os.path.basename(file_path))
break
if lines_read >= 50:
break
file_handle.close()
return file_is_valid
def __init__(self, es_doc_type=None,
es_index=None, es_host=None,
es_port=None, timeout=None,
use_ssl=False, http_auth=None):
'''
sets up Elastic search connection parameters
'''
self.__load_id__ = str(uuid4())
self.es_tools = ElasticSearchTools(es_doc_type, es_index)
self.es_tools.init_host(
host=es_host,
port=es_port,
timeout=timeout,
use_ssl=use_ssl,
http_auth=http_auth)
def convert_to_number(self, number):
'''
converts strings representing numeric values into the
corresponding data types
'''
try:
value = int(number)
return value
except ValueError:
try:
value = float(number)
return value
except ValueError as ve2:
logging.debug(" %s is not a number. error is: %s", number, ve2)
return None
def parse(
self,
analysis_file=None,
custom_header=None,
analysis_data=None):
'''
parses a analysis file.
requires correct header and filename standard
'''
# initiate variables
analysis_values = {}
header_values = {}
buffered_values = []
index_cmd = self.get_index_cmd()
stats = {'skipped': 0, 'lines_read': 0, 'non_standard_chroms': 0}
self.disable_index_refresh()
if not isinstance(custom_header, dict):
custom_header = {}
try:
file_handle = open(analysis_file, 'r')
header_values = self.parse_header(file_handle, custom_header)
# Add any user provided data to the header
if isinstance(custom_header, dict):
header_values = dict(
header_values.items() + custom_header.items()
)
# Reposition the handle at the beginning of the file
file_handle.seek(0)
# read the records into the intermediate analysis_files
for line in file_handle:
stats["lines_read"] += 1
# Skip lines that begin with a # (ie don't parse comments)
if stats["lines_read"] == 1 or line.startswith(
'#') or line.startswith(' '):
stats["skipped"] += 1
continue
parsed_line = self.parse_line(line)
# line could be empty or line could be an uncommented column
# header (like titan)
if not parsed_line:
stats["skipped"] += 1
continue
if not isinstance(parsed_line, list):
parsed_line = [parsed_line]
if len(parsed_line) == 2:
record_copy = copy.deepcopy(parsed_line[0])
parsed_line[0]['paired_record'] = self.__get_paired_record(
parsed_line[1])
parsed_line[1]["paired_record"] = self.__get_paired_record(
record_copy)
for idx, parsed_line_record in enumerate(parsed_line):
if 'chrom_number' in parsed_line_record.keys():
chrom_number = str(parsed_line_record['chrom_number'])
if re.match(r'^\d{1,2}$', chrom_number):
chrom_number = chrom_number.zfill(2)
else:
chrom_number = chrom_number.upper()
if chrom_number not in ['X', 'Y']:
stats["non_standard_chroms"] += 1
stats["skipped"] += 1
# Remove from the buffer any records that are
# produced by the line being skipped and that
# might have been queueued for indexing
if idx:
buffered_values = buffered_values[:-idx*2]
break
parsed_line_record['chrom_number'] = chrom_number
analysis_values = dict(
header_values.items() +
parsed_line_record.items())
analysis_values["source_id"] = self.__load_id__
buffered_values.append(index_cmd)
buffered_values.append(analysis_values)
if len(buffered_values) >= self.LOAD_FACTOR:
self.es_tools.submit_bulk_to_es(buffered_values)
buffered_values = []
# any leftovers....
if len(buffered_values) > 0:
self.es_tools.submit_bulk_to_es(buffered_values)
file_handle.close()
except IOError as error:
logging.warn('IO Error: %s', error)
logging.warn(
"NOTE: Load failed. Check logs to verify" +
" what was actually loaded. ")
file_handle.close()
finally:
self.enable_index_refresh()
stats['inserted'] = self.es_tools.get_id()
stats['error'] = ''
return stats
def disable_index_refresh(self):
''' Temporarily disables index refreshing during bulk indexing '''
return self.es_tools.put_settings({"refresh_interval": "-1"})
def enable_index_refresh(self):
''' Re-enables index refresh '''
if self.es_tools.put_settings({"refresh_interval": "1s"}):
return self.es_tools.forcemerge()
return False
def count_input_lines(self, input_file):
'''
Counts the number of non-comment lines in an input file
'''
num_lines = os.popen(
'grep -v "^#" ' +
input_file +
' | grep -v "^$" | wc -l').read().split('\n')[0]
if re.match(r'^\s*\d+$', num_lines):
return int(num_lines)
return 0
def validate_import(self, input_file=None, input_data=None, stats=None):
'''
verifies that the expected number of records has been created
'''
validated = False
import_stats = {}
self.es_tools.refresh_index()
if not input_file:
grouping_clause = {'source_id': self.__load_id__}
num_input_lines = len(input_data)
else:
import_stats['file_fullname'] = os.path.abspath(input_file)
grouping_clause = {'file_fullname': import_stats["file_fullname"]}
num_input_lines = self.count_input_lines(input_file)
results = self.es_tools.search({'match': grouping_clause})
import_stats['index'] = self.es_tools.get_index()
import_stats['doc_type'] = self.es_tools.get_doc_type()
import_stats['timestamp'] = datetime.now()
import_stats['record_count'] = num_input_lines
# in case import stats have been provided, adjust the count
# by taking out of consideration any skipped non-standard chromosomes
if stats:
import_stats['record_count'] -= stats["non_standard_chroms"]
import_stats['imported_record_count'] = results['hits']['total']
import_stats['log'] = ''
if self.validate_record_number(
import_stats['record_count'],
import_stats['imported_record_count']):
import_stats['log'] = 'All records have been imported.'
validated = True
else:
import_stats['log'] = 'Some records from' +\
' this input file have not been imported.'
# Verify that all imported records do have a sample_id or
# normal_sample_id field
filter_query = {
'filtered': {
'query': {
'match': grouping_clause
},
'filter': {
'bool': {
'must_not': [
{
'exists': {
'field': 'sample_id'
}
},
{
'exists': {
'field': 'normal_sample_id'
}
}
]
}
}
}
}
results = self.es_tools.search(filter_query)
if results['hits']['total'] != 0:
import_stats['log'] += ' ' + str(results['hits']['total']) + \
' records do not have sample_id/normal_sample_id attribute.'
validated = False
# Verify that there are no records with sample_id or normal_sample_id
# with value an empty string
filter_query = {
'filtered': {
'query': {
'match': grouping_clause
},
'filter': {
'bool': {
'should': [
{
'terms': {
'sample_id': ['']
}
},
{
'terms': {
'normal_sample_id': ['']
}
}
]
}
}
}
}
results = self.es_tools.search(filter_query)
if results['hits']['total'] != 0:
import_stats['log'] += ' ' + str(results['hits']['total']) + \
' records have an empty string as a ' +\
'sample_id/normal_sample_id attribute value.'
validated = False
# Verify that all imported records have the following fields -
# chrom number, start, end, tumor_type, expt_type, project
for field in ['chrom_number', 'start', 'end', 'caller',
'tumor_type', 'expt_type', 'project']:
filter_query = {
'filtered': {
'query': {
'match': grouping_clause
},
'filter': {
'bool': {
'must': [
{
'exists': {
'field': field
}
}
]
}
}
}
}
results = self.es_tools.search(filter_query)
total_hits = results['hits']['total']
if total_hits != import_stats['imported_record_count']:
error_count = import_stats['imported_record_count'] - \
results['hits']['total']
import_stats['log'] += ' ' + str(error_count) + \
' records are missing field \'' + field + '\'.'
validated = False
import_stats['validated'] = validated
self.record_import_stats(import_stats)
stats = PrettyTable(['Field', 'Value'])
stats.padding_width = 1
stats.max_width['Value'] = 100
for key in import_stats.keys():
stats.add_row([key, import_stats[key]])
stats.align = 'l'
logging.info(
"File import results:\n%s", stats
)
return validated
def record_import_stats(self, data):
'''
records the per-file import statistics into a
separate Elastic search index
'''
stats_index = 'import_stats_index'
if not self.es_tools.exists(stats_index):
from elasticsearch.exceptions import RequestError
try:
self.es_tools.es.indices.create(stats_index)
except RequestError:
logging.info("% exists.", stats_index)
res = self.es_tools.es.index('import_stats_index', 'log_type', data)
return res
def validate_record_number(self, record_count, imported_count):
'''
Some loaders produce several records for each input line,
such ones should override the function below
'''
return record_count == imported_count
def get_default_mappings(self):
'''
returns the default mapping configuration to be used
when creating a new Elastic search index
'''
document_type = self.es_tools.get_doc_type()
mappings = {
'mappings': {
document_type: {
"_size": {"enabled": True},
"dynamic_templates": [
{
"string_values": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "string",
"index": "not_analyzed"
}
}
}
]
}
}
}
return mappings
def create_index(self, mappings=None):
'''
a wrapper method which invokes the corresponding es_utils method.
If the index exists and the document type doesn't, creates the
document type only
'''
if not mappings:
mappings = self.get_default_mappings()
return self.es_tools.create_index(mappings)
def get_reference_data(self, index, sample_data):
''' searches the provided index for sample related data '''
query = []
for field in sample_data.keys():
query.append({'terms': {field: [sample_data[field]]}})
query = {'bool': {'must': query}}
try:
res = self.es_tools.global_search(index, query)
except NotFoundError:
logging.error("Index %s doesn't exist.", index)
return []
return res["hits"]["hits"]
def get_yaml_record(self, results, input_file):
'''
In case of multiple results, determines and returns the correct one,
more specific implementations will be added in child classes
'''
return results[0]['_source']
def get_index_cmd(self):
return {
"index": {
"_index": self.es_tools.get_index(),
"_type": self.es_tools.get_doc_type()
}
}
def __get_paired_record(self, record):
if isinstance(self.__paired_record_attributes__, list):
paired_record = {}
for key in self.__paired_record_attributes__:
paired_record[key] = copy.deepcopy(record[key])
return paired_record
return copy.deepcopy(record)
def get_source_id(self):
'''
Returns the load ID associated with the object
'''
return self.__load_id__
|
import struct
from typing import Any, Dict
import os
import sys
from spacy.tokens import Doc, Token as SpacyToken
from spacy.tokens.graph import Graph
from .graph_token import Token, add_basic_edges, TokenId
JsonObject = Dict[str, Any]
# this is here because it needs to happen only once (per import)
Doc.set_extension("parent_graphs_per_sent", default=[])
Doc.set_extension("added_nodes", default=[])
def get_pybart(doc):
ret = []
for i, (graph, sent) in enumerate(zip(doc._.parent_graphs_per_sent, doc.sents)):
offset = sent[0].i
ret.append([])
for edge in graph.edges:
ret[i].append({
"head": doc[edge.head.i + offset] if edge.head.i < len(sent) else doc._.added_nodes[i][edge.head.i],
"tail": doc[edge.tail.i + offset] if edge.tail.i < len(sent) else doc._.added_nodes[i][edge.tail.i],
"label": edge.label_
})
return ret
Doc.set_extension("get_pybart", method=get_pybart)
def enhance_spike_doc(doc: Doc, spike_doc: JsonObject) -> JsonObject:
converted_graphs = doc._.parent_graphs_per_sent
for idx, sent in enumerate(spike_doc["sentences"]):
sent["graphs"]["universal-enhanced"] = {"edges": [], "roots": []}
for edge in converted_graphs[idx].edges:
if edge.label_.lower().startswith("root"):
sent["graphs"]["universal-enhanced"]["roots"].append(edge.tail.i) # assume we have only one token per graph node
else:
sent["graphs"]["universal-enhanced"]["edges"].append(
{"source": edge.head.i, "destination": edge.tail.i, "relation": edge.label_})
# sort the roots and edges for consistency purposes
sent["graphs"]["universal-enhanced"]["roots"] = sorted(sent["graphs"]["universal-enhanced"]["roots"])
sent["graphs"]["universal-enhanced"]["edges"] = sorted(sent["graphs"]["universal-enhanced"]["edges"],
key=lambda x: (x['source'], x['destination'], x['relation']))
return spike_doc
Doc.set_extension("enhance_spike_doc", method=enhance_spike_doc)
def parse_spacy_sent(sent):
sentence = []
offset = min(tok.i for tok in sent)
for i, tok in enumerate(sent):
# add current token to current sentence
sentence.append(Token(
TokenId(tok.i + 1 - offset), tok.text, tok.lemma_, tok.pos_, tok.tag_, "_",
TokenId((tok.head.i + 1 - offset) if tok.head.i != tok.i else 0), tok.dep_.lower(), "_", "_"))
# add root
sentence.append(Token(TokenId(0), None, None, None, None, None, None, None, None, None))
# after parsing entire sentence, add basic deprel edges,
# and add sentence to output list
add_basic_edges(sentence)
return sentence
def enhance_to_spacy_doc(orig_doc, converted_sentences, remove_enhanced_extra_info, remove_bart_extra_info):
for sent_idx, (orig_span, converted_sentence) in enumerate(zip(orig_doc.sents, converted_sentences)):
added_counter = 0
node_indices_map = dict()
nodes = []
edges = []
labels = []
orig_doc._.added_nodes.append(dict())
converted_sentence = [tok for tok in converted_sentence if tok.get_conllu_field("id") != '0']
for idx, tok in enumerate(converted_sentence):
new_id = tok.get_conllu_field("id")
node_indices_map[new_id.token_str] = idx
_ = nodes.append((idx,))
if new_id.minor != 0:
orig_doc._.added_nodes[sent_idx][idx] = tok.get_conllu_field("form") + (f"_{added_counter}" if tok.get_conllu_field("form") == "STATE" else f"[COPY_NODE_{added_counter}]")
added_counter += 1
for tok in converted_sentence:
new_id = tok.get_conllu_field("id").token_str
for head, rels in tok.get_new_relations():
for rel in rels:
head_id = head.get_conllu_field("id").token_str
edges.append((
node_indices_map[head_id if head_id != '0' else new_id],
node_indices_map[new_id]
))
_ = orig_doc.vocab[rel.to_str(remove_enhanced_extra_info, remove_bart_extra_info)] # this will push the label into the vocab if it's not there
labels.append(rel.to_str(remove_enhanced_extra_info, remove_bart_extra_info))
# Disable printing possibility: so graph creation wont print many lines
sys.stdout = open(os.devnull, 'w')
orig_doc._.parent_graphs_per_sent.append(Graph(orig_doc, name="pybart", nodes=nodes, edges=edges, labels=labels))
# Restore printing possibility
sys.stdout = sys.__stdout__
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
HERE = os.path.dirname(os.path.abspath(__file__))
|
from random import sample, shuffle
import requests
from credential import UNSPLASH, PIXABAY
import argparse
from PIL import Image
from io import BytesIO
import os
from datetime import datetime
ROOT = os.path.abspath(os.path.dirname(__file__))
class PictureSource:
UNSPLASH = 1
PIXABAY = 2
class Picture(object):
def __init__(self):
pass
class Unsplash(Picture):
def __init__(self):
self.access_key = UNSPLASH['API_KEY']
self.access_secret = UNSPLASH['API_SECRET']
self.base_url = "https://api.unsplash.com"
self.exclude_authors = ["mischievous_penguins"]
def get(self, path, params=None):
headers = {
"Authorization": f"Client-ID {self.access_key}",
"Accept-Version": "v1",
}
params = {} if params is None else params
endpoint = f"{self.base_url}{path}"
resp = requests.get(endpoint, params=params, headers=headers)
return resp.json()
def get_pictures(self, query, count, page=1):
# https://unsplash.com/documentation#search-photos
path = "/search/photos"
params = {
"page": page,
"per_page": count,
"query": query,
"orientation": "landscape",
}
pictures = self.get(path, params).get("results", [])
pictures = [
{
"display_url": p["urls"]["small"],
# use the regular because the raw download is too large
"download_url": p["urls"]["regular"],
"thumbnail_url": p["urls"]["thumb"],
} for p in pictures
]
return pictures
class Pixabay(Picture):
def __init__(self):
self.key = PIXABAY['API_KEY']
self.base_url = "https://pixabay.com/api/"
def get(self, params=None):
params = {} if params is None else params
params.update({"key": self.key})
endpoint = self.base_url
resp = requests.get(endpoint, params=params)
# breakpoint()
return resp.json()
def get_pictures(self, query, count, page=1):
# https://pixabay.com/api/docs/#api_rate_limit
params = {
"q": query,
# Accepted values: "all", "horizontal", "vertical" Default: "all"
"orientation": "horizontal",
"page": page,
"per_page": count,
}
data = self.get(params)
pictures = data.get("hits", [])
pictures = [
{
"display_url": p["webformatURL"], # previewURL
"download_url": p["largeImageURL"],
"thumbnail_url": p["previewURL"],
} for p in pictures
]
return pictures
def get_pictures(query, source=PictureSource.UNSPLASH, page=1):
if source == PictureSource.UNSPLASH:
photos_src = Unsplash()
else:
photos_src = Pixabay()
query = "+".join([q.strip() for q in query.split(",")])
pics = photos_src.get_pictures(query, 10, page)
shuffle(pics)
return pics
def download_and_process(url, keyword, overwrite=False):
"""download the image and resize
and save to the correct directory
"""
width = 1024 # the width of the image
image = requests.get(url).content
im = Image.open(BytesIO(image))
orig_width, orig_height = im.size
height = width * (orig_height / orig_width)
new_size = (width, int(height))
im.thumbnail(new_size)
orig_filename = keyword.replace(' ', '-')
filename = orig_filename
suffix = 1
while True:
path = os.path.join(ROOT, "images", filename + '.png')
if not os.path.exists(path) or overwrite:
break
filename = orig_filename + '-' + str(suffix)
suffix += 1
im.save(path)
print(f"the image has been saved to {path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Search and download picture')
parser.add_argument('--keyword',
dest='keyword',
type=str,
help='search term')
parser.add_argument('--overwrite',
dest='overwrite',
action="store_true",
default=False,
help='whether to overwrite the image if exists')
args = parser.parse_args()
keyword = args.keyword
overwrite = args.overwrite
if keyword is None:
print("keyword is required")
parser.print_help()
else:
print(f'trying to search and download for keyword: {keyword}')
pictures = get_pictures(keyword, source=PictureSource.PIXABAY)
pic = pictures[0]
url = pic['download_url']
download_and_process(url, keyword, overwrite) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this module you find the base workflow for a impurity DOS calculation and
some helper methods to do so with AiiDA
"""
from __future__ import print_function, absolute_import
from aiida.orm import Code, load_node, CalcJobNode, Float, Int, Str
from aiida.plugins import DataFactory
from aiida.engine import if_, ToContext, WorkChain, calcfunction
from aiida.common import LinkType
from aiida.common.folders import SandboxFolder
from aiida_kkr.workflows.gf_writeout import kkr_flex_wc
from aiida_kkr.workflows.kkr_imp_sub import kkr_imp_sub_wc
from aiida_kkr.workflows.dos import kkr_dos_wc
from aiida_kkr.calculations import KkrimpCalculation
import tarfile
import os
__copyright__ = (u"Copyright (c), 2019, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.5.7"
__contributors__ = (u"Fabian Bertoldo", u"Philipp Ruessmann")
#TODO: improve workflow output node structure
#TODO: generalise search for imp_info and conv_host from startpot
Dict = DataFactory('dict')
RemoteData = DataFactory('remote')
SinglefileData = DataFactory('singlefile')
XyData = DataFactory('array.xy')
class kkr_imp_dos_wc(WorkChain):
"""
Workchain of a DOS calculation for an impurity system starting from a
converged impurity calculation or workflow
:param options: (Dict), computer options
:param wf_parameters: (Dict), specifications for the DOS
:param kkr: (Code), mandatory: KKR code for gf_writeout step
:param kkrimp: (Code), mandatory: KKRimp code for DOS calculation
:param imp_host_pot: (SinglefileData), mandatory: impurity startpotential
:return workflow_info: (Dict), Information on workflow results
:return last_calc_output_parameters: (Dict), output parameters of
the last called calculation
:return last_calc_info: (Dict), information of the last called calculation
"""
_workflowversion = __version__
_wf_label = 'kkr_imp_dos_wc'
_wf_description = 'Workflow for a KKR impurity DOS calculation'
_options_default = {'queue_name' : '', # Queue name to submit jobs too
'resources': {"num_machines": 1}, # resources to allocate for the job
'max_wallclock_seconds' : 60*60, # walltime after which the job gets killed (gets parsed to KKR)}
'custom_scheduler_commands' : '', # some additional scheduler commands
'use_mpi' : True} # execute KKR with mpi or without
_wf_default = {'ef_shift': 0. , # set custom absolute E_F (in eV)
'clean_impcalc_retrieved': True, # remove output of KKRimp calculation after successful parsing of DOS files
}
# add defaults of dos_params since they are passed onto that workflow
for key, value in kkr_dos_wc.get_wf_defaults(silent=True).items():
if key == 'dos_params':
_wf_default[key] = value
@classmethod
def get_wf_defaults(self):
"""
Print and return _wf_defaults dictionary. Can be used to easily create set of wf_parameters.
returns _wf_defaults
"""
print('Version of workflow: {}'.format(self._workflowversion))
return self._wf_default
@classmethod
def define(cls, spec):
"""
Defines the outline of the workflow
"""
# Take input of the workflow or use defaults defined above
super(kkr_imp_dos_wc, cls).define(spec)
spec.input("kkr", valid_type=Code, required=False,
help="KKRhost code, needed if gf_dos_remote is not given.")
spec.input("kkrimp", valid_type=Code, required=True,
help="KKRimp code, always needed.")
spec.input("options", valid_type=Dict, required=False,
default=Dict(dict=cls._options_default),
help="Computer options (resources, quene name, etc.).")
spec.input("wf_parameters", valid_type=Dict, required=False,
default=Dict(dict=cls._wf_default),
help="DOS workflow parameters (energy range, etc.).")
spec.input("host_remote", valid_type=RemoteData, required=False,
help="RemoteData node of the (converged) host calculation.")
spec.input("gf_dos_remote", valid_type=RemoteData, required=False,
help="RemoteData node of precomputed host GF for DOS energy contour.")
spec.input("kkrimp_remote", valid_type=RemoteData, required=False,
help="RemoteData node of previous (converged) KKRimp calculation.")
spec.input("imp_pot_sfd", valid_type=SinglefileData, required=False,
help="impurity potential single file data. Needs also impurity_info node.")
spec.input("impurity_info", valid_type=Dict, required=False,
help="impurity info node that specifies the relation between imp_pot_sfd to the host system. Mandatory if imp_pot_sfd is given.")
spec.input("params_kkr_overwrite", valid_type=Dict, required=False,
help="Set some input parameters of the KKR calculation.")
# specify the outputs
spec.output('workflow_info', valid_type=Dict)
spec.output('last_calc_output_parameters', valid_type=Dict)
spec.output('last_calc_info', valid_type=Dict)
spec.output('dos_data', valid_type=XyData)
spec.output('dos_data_interpol', valid_type=XyData)
spec.output('gf_dos_remote', valid_type=XyData, required=False,
help="RemoteData node of the computed host GF.")
# Here the structure of the workflow is defined
spec.outline(
cls.start, # start and initialise workflow
if_(cls.validate_input)( # validate the given input
cls.run_gfstep, # run GF step with DOS energy contour
cls.run_imp_dos), # run DOS for the impurity problem
cls.return_results # terminate workflow and return results
)
# Define possible exit codes for the workflow
spec.exit_code(220, "ERROR_UNKNOWN_PROBLEM",
message="Unknown problem detected.")
spec.exit_code(221, "ERROR_NO_PARENT_FOUND",
message="Unable to find the parent remote_data node that led to "
"the input impurity calculation. You need to specify "
"`host_remote` and `impurity_info` nodes.")
spec.exit_code(222, "ERROR_GF_WRITEOUT_UNSUCCESFUL",
message="The gf_writeout workflow was not succesful, cannot continue.")
spec.exit_code(223, "ERROR_IMP_POT_AND_REMOTE",
message="The input nodes `imp_pot_sfd` and `kkrimp_remote` are given but are mutually exclusive")
spec.exit_code(224, "ERROR_KKR_CODE_MISSING",
message="KKRhost code node (`inputs.kkr`) is missing if gf_dos_remote is not given.")
spec.exit_code(225, "ERROR_HOST_REMOTE_MISSING",
message="`host_remote` node is missing if gf_dos_remote is not given.")
spec.exit_code(226, "ERROR_IMP_SUB_WORKFLOW_FAILURE",
message="KKRimp sub-workflow failed.")
def start(self):
"""
Initialise context and some parameters
"""
self.report('INFO: started KKR impurity DOS workflow version {}'
''.format(self._workflowversion))
# input both wf and options parameters
if 'wf_parameters' in self.inputs:
wf_dict = self.inputs.wf_parameters.get_dict()
if wf_dict == {}:
wf_dict = self._wf_default
self.report('INFO: using default wf parameters')
if 'options' in self.inputs:
options_dict = self.inputs.options.get_dict()
if options_dict == {}:
options_dict = self._options_default
self.report('INFO: using default options parameters')
# set values, or defaults
self.ctx.use_mpi = options_dict.get('use_mpi', self._options_default['use_mpi'])
self.ctx.resources = options_dict.get('resources', self._options_default['resources'])
self.ctx.max_wallclock_seconds = options_dict.get('max_wallclock_seconds', self._options_default['max_wallclock_seconds'])
self.ctx.queue = options_dict.get('queue_name', self._options_default['queue_name'])
self.ctx.custom_scheduler_commands = options_dict.get('custom_scheduler_commands', self._options_default['custom_scheduler_commands'])
self.ctx.options_params_dict = Dict(dict={'use_mpi': self.ctx.use_mpi, 'resources': self.ctx.resources,
'max_wallclock_seconds': self.ctx.max_wallclock_seconds, 'queue_name': self.ctx.queue,
'custom_scheduler_commands': self.ctx.custom_scheduler_commands})
# set workflow parameters for the KKR imputrity calculations
self.ctx.ef_shift = wf_dict.get('ef_shift', self._wf_default['ef_shift'])
self.ctx.dos_params_dict = wf_dict.get('dos_params', self._wf_default['dos_params'])
self.ctx.cleanup_impcalc_output = wf_dict.get('clean_impcalc_retrieved', self._wf_default['clean_impcalc_retrieved'])
# set workflow parameters for the KKR impurity calculation
self.ctx.nsteps = 1 # always only one step for DOS calculation
self.ctx.kkr_runmax = 1 # no restarts for DOS calculation
# set workflow label and description
self.ctx.description_wf = self.inputs.get('description', self._wf_description)
self.ctx.label_wf = self.inputs.get('label', self._wf_label)
# whether or not to compute the GF writeout step
self.ctx.skip_gfstep = False
self.report('INFO: use the following parameter:\n'
'use_mpi: {}\n'
'Resources: {}\n'
'Walltime (s): {}\n'
'queue name: {}\n'
'scheduler command: {}\n'
'description: {}\n'
'label: {}\n'.format(self.ctx.use_mpi, self.ctx.resources, self.ctx.max_wallclock_seconds,
self.ctx.queue, self.ctx.custom_scheduler_commands,
self.ctx.description_wf, self.ctx.label_wf))
# return para/vars
self.ctx.successful = True
self.ctx.errors = []
self.ctx.formula = ''
def validate_input(self):
"""
Validate input and catch possible errors
"""
inputs = self.inputs
inputs_ok = True
if 'imp_pot_sfd' in inputs:
# check if input potential has incoming return link
if len(inputs.imp_pot_sfd.get_incoming(link_type=LinkType.RETURN).all()) < 1:
self.report("input potential not from kkrimp workflow: take remote_data folder of host system from input")
if 'impurity_info' in inputs and 'host_remote' in inputs:
self.ctx.imp_info = inputs.impurity_info
self.ctx.conv_host_remote = inputs.host_remote
else:
self.report('WARNING: startpot has no parent and can not find '
'a converged host RemoteData node')
if 'impurity_info' not in inputs:
self.report("`impurity_info` optional input node not given but needed in this case.")
if 'host_remote' not in inputs:
self.report("`host_remote` optional input node not given but needed in this case.")
inputs_ok = False
self.ctx.errors.append(1)
else:
# if return ink is found get input nodes automatically
self.report('INFO: get converged host RemoteData node and '
'impurity_info node from database')
self.ctx.kkr_imp_wf = inputs.imp_pot_sfd.get_incoming().first().node
self.report('INFO: found underlying kkr impurity workflow '
'(pk: {})'.format(self.ctx.kkr_imp_wf.pk))
self.ctx.imp_info = self.ctx.kkr_imp_wf.inputs.impurity_info
self.report('INFO: found impurity_info node (pk: {})'.format(
self.ctx.imp_info.pk))
if 'remote_data' in self.ctx.kkr_imp_wf.inputs:
remote_data_gf_writeout = self.ctx.kkr_imp_wf.inputs.remote_data
gf_writeout_calc = remote_data_gf_writeout.get_incoming(node_class=CalcJobNode).first().node
self.ctx.conv_host_remote = gf_writeout_calc.inputs.parent_folder
self.report('INFO: imported converged_host_remote (pk: {}) and '
'impurity_info from database'.format(self.ctx.conv_host_remote.pk))
else:
self.ctx.conv_host_remote = self.ctx.kkr_imp_wf.inputs.gf_remote.inputs.remote_folder.inputs.parent_calc_folder.inputs.remote_folder.outputs.remote_folder
self.report('INFO: imported converged_host_remote (pk: {}) and '
'impurity_info from database'.format(self.ctx.conv_host_remote.pk))
if 'gf_dos_remote' in self.inputs:
self.ctx.skip_gfstep = True
else:
if 'kkr' not in self.inputs:
self.report("[ERROR] `kkr` input node needed if `gf_dos_remote` is not given")
inputs_ok = False
self.ctx.errors.append(3) # raises ERROR_KKR_CODE_MISSING
if 'host_remote' not in self.inputs:
self.report("[ERROR] `host_remote` input node needed if `gf_dos_remote` is not given")
inputs_ok = False
self.ctx.errors.append(4) # raises ERROR_HOST_REMOTE_MISSING
else:
self.ctx.conv_host_remote = self.inputs.host_remote
if 'imp_pot_sfd' in self.inputs and 'kkrimp_remote' in self.inputs:
self.report("[ERROR] both `imp_pot_sfd` and `kkrimp_remote` node in inputs")
inputs_ok = False
self.ctx.errors.append(2) # raises ERROR_IMP_POT_AND_REMOTE
elif 'imp_pot_sfd' in self.inputs:
self.report("[INFO] use `imp_pot_sfd` input node")
elif 'kkrimp_remote' in self.inputs:
self.report("[INFO] use `kkrimp_remote` input node")
# extract imp_info node from parent KKRimp calculation
parent_impcalc = self.inputs.kkrimp_remote.get_incoming(node_class=CalcJobNode).first().node
self.ctx.imp_info = parent_impcalc.inputs.impurity_info
else:
self.report("neither `imp_pot_sfd` nor `kkrimp_remote` node in inputs")
inputs_ok = False
self.ctx.errors.append(1) # raises ERROR_NO_PARENT_FOUND
self.report('INFO: validated input successfully: {}'.format(inputs_ok))
return inputs_ok
def run_gfstep(self):
"""
Start GF writeout step with DOS energy contour
"""
if not self.ctx.skip_gfstep:
options = self.ctx.options_params_dict
kkrcode = self.inputs.kkr
converged_host_remote = self.ctx.conv_host_remote
imp_info = self.ctx.imp_info
wf_params_gf = Dict(dict={'ef_shift':self.ctx.ef_shift, 'dos_run':True,
'dos_params':self.ctx.dos_params_dict})
label_gf = 'GF writeout for imp DOS'
description_gf = 'GF writeout step with energy contour for impurity DOS'
builder = kkr_flex_wc.get_builder()
builder.metadata.label = label_gf
builder.metadata.description = description_gf
builder.kkr = kkrcode
builder.options = options
builder.wf_parameters = wf_params_gf
builder.remote_data = converged_host_remote
builder.impurity_info = imp_info
if "params_kkr_overwrite" in self.inputs:
builder.params_kkr_overwrite = self.inputs.params_kkr_overwrite
future = self.submit(builder)
self.report('INFO: running GF writeout (pid: {})'.format(future.pk))
return ToContext(gf_writeout=future)
def run_imp_dos(self):
"""
Use previous GF step to calculate DOS for the impurity problem
"""
if not self.ctx.skip_gfstep:
# use computed gf_writeout
if not self.ctx.gf_writeout.is_finished_ok:
return self.exit_codes.ERROR_GF_WRITEOUT_UNSUCCESFUL
gf_writeout_wf = self.ctx.gf_writeout
gf_writeout_calc = load_node(self.ctx.gf_writeout.outputs.workflow_info.get_dict().get('pk_flexcalc'))
gf_writeout_remote = gf_writeout_wf.outputs.GF_host_remote
self.ctx.pk_flexcalc = self.ctx.gf_writeout.outputs.workflow_info.get_dict().get('pk_flexcalc')
else:
# use gf_writeout from input
gf_writeout_remote = self.inputs.gf_dos_remote
gf_writeout_calc = gf_writeout_remote.get_incoming(node_class=CalcJobNode).first().node
self.ctx.pk_flexcalc = gf_writeout_calc.pk
options = self.ctx.options_params_dict
kkrimpcode = self.inputs.kkrimp
if 'imp_pot_sfd' in self.inputs:
# take impurit potential SingleFileData node
impurity_pot_or_remote = self.inputs.imp_pot_sfd
elif 'kkrimp_remote' in self.inputs:
# or from RemoteData node of previous KKRimp calc
impurity_pot_or_remote = self.inputs.kkrimp_remote
imps = self.ctx.imp_info
nspin = gf_writeout_calc.outputs.output_parameters.get_dict().get('nspin')
self.ctx.nspin = nspin
self.report('nspin: {}'.format(nspin))
self.ctx.kkrimp_params_dict = Dict(dict={'nspin': nspin,
'nsteps': self.ctx.nsteps,
'kkr_runmax': self.ctx.kkr_runmax,
'dos_run': True})
kkrimp_params = self.ctx.kkrimp_params_dict
label_imp = 'KKRimp DOS (GF: {}, imp_pot: {}, Zimp: {}, ilayer_cent: {})'.format(
gf_writeout_calc.pk, impurity_pot_or_remote.pk, imps.get_dict().get('Zimp'), imps.get_dict().get('ilayer_center'))
description_imp = 'KKRimp DOS run (GF: {}, imp_pot: {}, Zimp: {}, ilayer_cent: {}, R_cut: {})'.format(
gf_writeout_calc.pk, impurity_pot_or_remote.pk, imps.get_dict().get('Zimp'), imps.get_dict().get('ilayer_center'),
imps.get_dict().get('Rcut'))
builder = kkr_imp_sub_wc.get_builder()
builder.metadata.label = label_imp
builder.metadata.description = description_imp
builder.kkrimp = kkrimpcode
builder.options = options
builder.wf_parameters = kkrimp_params
builder.remote_data = gf_writeout_remote
if 'imp_pot_sfd' in self.inputs:
builder.host_imp_startpot = impurity_pot_or_remote
else:
builder.kkrimp_remote = impurity_pot_or_remote
builder.impurity_info=imps
future = self.submit(builder)
self.report('INFO: running DOS step for impurity system (pid: {})'.format(future.pk))
return ToContext(kkrimp_dos=future)
def return_results(self):
"""
Return the results and create all of the output nodes
"""
if self.ctx.errors!=[]:
if 1 in self.ctx.errors:
return self.exit_codes.ERROR_NO_PARENT_FOUND
elif 2 in self.ctx.errors:
return self.exit_codes.ERROR_IMP_POT_AND_REMOTE
elif 3 in self.ctx.errors:
return self.exit_codes.ERROR_KKR_CODE_MISSING
elif 4 in self.ctx.errors:
return self.exit_codes.ERROR_HOST_REMOTE_MISSING
else:
return self.exit_codes.ERROR_UNKNOWN_PROBLEM
self.report('INFO: creating output nodes for the KKR imp DOS workflow ...')
if not self.ctx.kkrimp_dos.is_finished_ok:
self.report('ERROR: sub workflow for impurity calculation failed')
return self.exit_codes.ERROR_IMP_SUB_WORKFLOW_FAILURE
else:
last_calc_pk = self.ctx.kkrimp_dos.outputs.workflow_info.get_dict().get('last_calc_nodeinfo')['pk']
last_calc = load_node(last_calc_pk)
last_calc_output_params = last_calc.outputs.output_parameters
last_calc_info = self.ctx.kkrimp_dos.outputs.workflow_info
outputnode_dict = {}
outputnode_dict['impurity_info'] = self.ctx.imp_info.get_dict()
outputnode_dict['workflow_name'] = self.__class__.__name__
outputnode_dict['workflow_version'] = self._workflowversion
if not self.ctx.skip_gfstep:
outputnode_dict['used_subworkflows'] = {'gf_writeout': self.ctx.gf_writeout.pk}
else:
outputnode_dict['used_subworkflows'] = {}
outputnode_dict['used_subworkflows']['impurity_dos'] = self.ctx.kkrimp_dos.pk
outputnode_t = Dict(dict=outputnode_dict)
outputnode_t.label = 'kkr_imp_dos_wc_inform'
outputnode_t.description = 'Contains information for workflow'
outputnode_t.store()
# interpol dos file and store to XyData nodes
dos_extracted, dosXyDatas = self.extract_dos_data(last_calc)
self.report('INFO: extracted DOS data? {}'.format(dos_extracted))
if dos_extracted:
self.out('dos_data', dosXyDatas['dos_data'])
self.out('dos_data_interpol', dosXyDatas['dos_data_interpol'])
# maybe cleanup retrieved folder of DOS calculation
if self.ctx.cleanup_impcalc_output:
self.report('INFO: cleanup after storing of DOS data')
pk_impcalc = self.ctx.kkrimp_dos.outputs.workflow_info['pks_all_calcs'][0]
cleanup_kkrimp_retrieved(pk_impcalc)
self.report('INFO: workflow_info node: {}'.format(outputnode_t.uuid))
self.out('workflow_info', outputnode_t)
self.out('last_calc_output_parameters', last_calc_output_params)
self.out('last_calc_info', last_calc_info)
self.report('INFO: created output nodes for KKR imp DOS workflow.')
self.report('\n'
'|------------------------------------------------------------------------------------------------------------------|\n'
'|-------------------------------------| Done with the KKR imp DOS workflow! |--------------------------------------|\n'
'|------------------------------------------------------------------------------------------------------------------|')
def extract_dos_data(self, last_calc):
"""
Extract DOS data from retrieved folder of KKRimp calculation.
If output is compressed in tarfile take care of extracting this before parsing `out_ldos*` files.
The extraction of the DOS data is done in `self.extract_dos_data_from_folder()` calls.
returns:
:boolean dos_extracted: logical signalling if extraction of DOS files was successful
:dictionary dosXyDatas: dictionary containing dos data and interpolated dos data
"""
# here we look for the dos files or the tarball containing the dos files:
dos_retrieved = last_calc.outputs.retrieved
if KkrimpCalculation._FILENAME_TAR in dos_retrieved.list_object_names():
# deal with packed output files (overwrites dos_retrieved with sandbox into which files are extracted
# this way the unpacked files are deleted after parsing and only the tarball is kept in the retrieved directory
# for this we create a Sandbox folder
with SandboxFolder() as tempfolder:
# get abs paths of tempfolder and tarfile (needed by extract method of tarfile)
# get abs path of tempfolder
with tempfolder.open('.dummy','w') as tmpfile:
tempfolder_path = os.path.dirname(tmpfile.name)
# get path of tarfile
with dos_retrieved.open(KkrimpCalculation._FILENAME_TAR) as tf:
tfpath = tf.name
# extract file from tarfile of retrieved to tempfolder
with tarfile.open(tfpath) as tf:
tar_filenames = [ifile.name for ifile in tf.getmembers()]
for filename in tar_filenames:
if 'dos' in filename: # should extract all out_ldos*, out_lmdos* files
tf.extract(filename, tempfolder_path) # extract to tempfolder
# now files are in tempfolder from where we can extract the dos data
dos_extracted, dosXyDatas = self.extract_dos_data_from_folder(tempfolder, last_calc)
else:
# extract directly from retrieved (no tarball there)
dos_extracted, dosXyDatas = self.extract_dos_data_from_folder(dos_retrieved, last_calc)
return dos_extracted, dosXyDatas
def extract_dos_data_from_folder(self, folder, last_calc):
"""
Get DOS data and parse files.
"""
# initialize in case dos data is not extracted
dosXyDatas = None
# get list of files in directory (needed since SandboxFolder does not have `list_object_names` method)
# also extract absolute path of folder (needed by parse_impdosfiles since calcfunction does not work with SandboxFolder as input)
if isinstance(folder, SandboxFolder):
folder_abspath = folder.abspath
filelist = os.listdir(folder_abspath)
else:
filelist = folder.list_object_names()
with folder.open(filelist[0]) as tmpfile:
folder_abspath = tmpfile.name.replace(filelist[0], '')
# check if out_ldos* files are there and parse dos files
if 'out_ldos.interpol.atom=01_spin1.dat' in filelist:
# extract EF and number of atoms from kkrflex_writeout calculation
kkrflex_writeout = load_node(self.ctx.pk_flexcalc)
parent_calc_kkr_converged = kkrflex_writeout.inputs.parent_folder.get_incoming(node_class=CalcJobNode).first().node
ef = parent_calc_kkr_converged.outputs.output_parameters.get_dict().get('fermi_energy')
last_calc_output_params = last_calc.outputs.output_parameters
natom = last_calc_output_params.get_dict().get('number_of_atoms_in_unit_cell')
# parse dosfiles using nspin, EF and Natom inputs
dosXyDatas = parse_impdosfiles(Str(folder_abspath), Int(natom), Int(self.ctx.nspin), Float(ef))
dos_extracted = True
else:
dos_extracted = False
dosXyDatas = None
return dos_extracted, dosXyDatas
@calcfunction
def parse_impdosfiles(dos_abspath, natom, nspin, ef):
"""
Read `out_ldos*` files and create XyData node with l-resolved DOS (+node for interpolated DOS if files are found)
Inputs:
:param dos_abspath: absolute path to folder where `out_ldos*` files reside (AiiDA Str object)
:param natom: number of atoms (AiiDA Int object)
:param nspin: number of spin channels (AiiDA Int object)
:param ef: Fermi energy in Ry units (AiiDA Float object)
Returns:
output dictionary containing
output = {'dos_data': dosnode, 'dos_data_interpol': dosnode2}
where `dosnode` and `dosnode2` are AiiDA XyData objects
"""
from masci_tools.io.common_functions import get_Ry2eV, get_ef_from_potfile
from numpy import loadtxt, array
# add '/' if missing from path
abspath = dos_abspath.value
if abspath[-1] != '/': abspath += '/'
# read dos files
dos, dos_int = [], []
for iatom in range(1, natom.value+1):
for ispin in range(1, nspin.value+1):
with open(abspath+'out_ldos.atom=%0.2i_spin%i.dat'%(iatom, ispin)) as dosfile:
tmp = loadtxt(dosfile)
dos.append(tmp)
with open(abspath+'out_ldos.interpol.atom=%0.2i_spin%i.dat'%(iatom, ispin)) as dosfile:
tmp = loadtxt(dosfile)
dos_int.append(tmp)
dos, dos_int = array(dos), array(dos_int)
# convert to eV units
eVscale = get_Ry2eV()
dos[:,:,0] = (dos[:,:,0]-ef.value)*eVscale
dos[:,:,1:] = dos[:,:,1:]/eVscale
dos_int[:,:,0] = (dos_int[:,:,0]-ef.value)*eVscale
dos_int[:,:,1:] = dos_int[:,:,1:]/eVscale
# create output nodes
dosnode = XyData()
dosnode.label = 'dos_data'
dosnode.description = 'Array data containing uniterpolated DOS (i.e. dos at finite imaginary part of energy). 3D array with (atoms, energy point, l-channel) dimensions.'
dosnode.set_x(dos[:,:,0], 'E-EF', 'eV')
name = ['tot', 's', 'p', 'd', 'f', 'g']
name = name[:len(dos[0,0,1:])-1]+['ns']
ylists = [[],[],[]]
for l in range(len(name)):
ylists[0].append(dos[:,:,1+l])
ylists[1].append('dos '+name[l])
ylists[2].append('states/eV')
dosnode.set_y(ylists[0], ylists[1], ylists[2])
# node for interpolated DOS
dosnode2 = XyData()
dosnode2.label = 'dos_interpol_data'
dosnode2.description = 'Array data containing iterpolated DOS (i.e. dos at finite imaginary part of energy). 3D array with (atoms, energy point, l-channel) dimensions.'
dosnode2.set_x(dos_int[:,:,0], 'E-EF', 'eV')
ylists = [[],[],[]]
for l in range(len(name)):
ylists[0].append(dos_int[:,:,1+l])
ylists[1].append('interpolated dos '+name[l])
ylists[2].append('states/eV')
dosnode2.set_y(ylists[0], ylists[1], ylists[2])
output = {'dos_data': dosnode, 'dos_data_interpol': dosnode2}
return output
def cleanup_kkrimp_retrieved(pk_impcalc):
"""
remove output_all.tar.gz from retrieved of impurity calculation identified by pk_impcalc
"""
from aiida.orm import load_node
from aiida_kkr.calculations import KkrimpCalculation
# extract retrieved folder
doscalc = load_node(pk_impcalc)
ret = doscalc.outputs.retrieved
# name of tarfile
tfname = KkrimpCalculation._FILENAME_TAR
# remove tarfile from retreived dir
if tfname in ret.list_object_names():
ret.delete_object(tfname, force=True)
|
from typing import List
import numpy as np
from numpy import zeros
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import (
StressObject, StrainObject, OES_Object)
from pyNastran.f06.f06_formatting import write_float_13e
BASIC_TABLES = {
'OESATO1', 'OESCRM1', 'OESPSD1', 'OESRMS1', 'OESNO1',
'OESATO2', 'OESCRM2', 'OESPSD2', 'OESRMS2', 'OESNO2',
'OSTRATO1', 'OSTRCRM1', 'OSTRPSD1', 'OSTRRMS1', 'OSTRNO1',
'OSTRATO2', 'OSTRCRM2', 'OSTRPSD2', 'OSTRRMS2', 'OSTRNO2',
}
VM_TABLES = {'OESXRMS1', 'OESXNO1'}
class RandomPlateArray(OES_Object):
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=False) ## why???
self.element_node = None
self.fiber_curvature = None
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
#self.itime = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
@property
def is_real(self) -> bool:
return False
@property
def is_complex(self) -> bool:
return True
@property
def nnodes_per_element(self) -> int:
return get_nnodes(self)
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def build(self):
"""sizes the vectorized attributes of the ComplexPlateArray"""
if not hasattr(self, 'subtitle'):
self.subtitle = self.data_code['subtitle']
nnodes = self.nnodes_per_element
#self.names = []
self.nelements //= nnodes
#print('element_type=%r ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (
#self.element_type, self.ntimes, self.nelements, self.ntotal, self.subtitle))
#self.nelements //= self.ntimes
#self.ntotal = self.nelements # * 2
#if self.element_name == 'CTRIA3':
#print('element_type=%r ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (
#self.element_type, self.ntimes, self.nelements, self.ntotal, self.subtitle))
#print(self)
if self.is_sort1:
# old
#ntimes = self.ntimes
#nelements = self.nelements
ntimes = len(self._ntotals)
ntotal = self._ntotals[0]
nelements = ntotal // 2
#ntotal = self.ntotal
nx = ntimes
ny = nelements * 2
nlayers = nelements * 2 * nnodes
#ntotal = nelements * 2
#if self.element_name in ['CTRIA3', 'CQUAD8']:
#print(f"SORT1 ntimes={ntimes} nelements={nelements} ntotal={ntotal}")
elif self.is_sort2:
# ntotal=164
# len(_ntotals) = 4580 -> nelements=4580
# nfreqs=82
# flip this to sort1?
#ntimes = self.ntotal
#nnodes = self.ntimes
#ntotal = nnodes
#nelements = self.ntimes
#ntimes = self.nelements # // nelements
#ntotal = self.ntotal
nelements = len(self._ntotals)
ntotal = self._ntotals[0]
ntimes = ntotal // 2 // nnodes
#print(self._ntotals)
ntotal = self._ntotals[0]
#nelements = len(self._ntotals)
#ntimes = ntotal // 2
#ntimes, nelements = nelements_real, ntimes_real
#ntotal = self.ntotal
nlayers = nelements * 2 * nnodes
ny = nlayers # nelements * 2 * nnodes
nx = ntimes
#if self.element_name in ['CTRIA3', 'CQUAD8']:
#if self.element_name in ['CQUAD4']:
#print(f"SORT2 ntimes={ntimes} nelements={nelements} ntotal={ntotal} nnodes={nnodes} nlayers={nlayers}")
else: # pragma: no cover
raise RuntimeError('expected sort1/sort2\n%s' % self.code_information())
#self.ntotal
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self.build_data(ntimes, nelements, nlayers, nnodes, ntotal, nx, ny, self._times_dtype)
def build_data(self, ntimes, nelements, nlayers, nnodes, ntotal, nx, ny, dtype):
"""actually performs the build step"""
self.ntimes = ntimes
self.nelements = nelements
#ntotal = nelements * 2
self.ntotal = ntotal
#_times = zeros(ntimes, dtype=dtype)
#element = zeros(nelements, dtype='int32')
self._times = zeros(ntimes, dtype)
#self.ntotal = self.nelements * nnodes
self.element_node = zeros((nlayers, 2), 'int32')
# the number is messed up because of the offset for the element's properties
#if not self.nelements * 2 == self.ntotal:
#msg = 'ntimes=%s nelements=%s nlayers=%s ntotal=%s' % (
#self.ntimes, self.nelements, self.nelements * 2, self.ntotal)
#raise RuntimeError(msg)
self.fiber_curvature = zeros(nlayers, 'float32')
# [oxx, oyy, txy]
nresults = 3
if self.has_von_mises:
# ovm
nresults += 1
#print(f'ntimes={self.ntimes} nelements={self.nelements} ntotal={self.ntotal}')
self.data = zeros((nx, ny, nresults), 'float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
#print(f'column_names = {column_names} column_values={column_values}')
#print(self.element_node)
self.data_frame = pd.Panel(self.data, items=column_values,
major_axis=self.element_node, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['ElementID', 'Item']
return
names = ['ElementID', 'NodeID']
ipos = np.where(self.element_node[:, 0] > 0)
element_node = [
self.element_node[ipos, 0],
self.element_node[ipos, 1],
]
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers, element_node, self.data[:, ipos, :], from_tuples=False, from_array=True,
names=names,
)
#print(data_frame)
#self.dataframe = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
assert self.element_node[:, 0].min() > 0, self.element_node
assert table.element_node[:, 0].min() > 0, table.element_node
if not np.array_equal(self.element_node, table.element_node):
assert self.element_node.shape == table.element_node.shape, 'shape=%s element_node.shape=%s' % (
self.element_node.shape, table.element_node.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\nEid, Nid\n' % str(self.code_information())
for eid1, eid2 in zip(self.element, table.element):
msg += '(%s, %s), (%s, %s)\n' % (eid1, eid2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, (eid, nid) in enumerate(self.element_node):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(oxx1, oyy1, txy1) = t1
(oxx2, oyy2, txy2) = t2
#d = t1 - t2
if not np.allclose(
[oxx1, oyy1, txy1], # atol=0.0001
[oxx2, oyy2, txy2], atol=0.075):
ni = len(str(eid)) + len(str(eid))
#if not np.array_equal(t1, t2):
msg += ('%s (%s, %s, %s)\n'
'%s (%s, %s, %s)\n' % (
eid,
oxx1, oyy1, txy1,
' ' * ni,
oxx2, oyy2, txy2,
))
msg += ('%s (%s, %s, %s)\n'
% (
' ' * ni,
oxx1 - oxx2,
oyy1 - oyy2,
txy1 - txy2,
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def finalize(self):
"""Calls any OP2 objects that need to do any post matrix calcs"""
if self.is_sort1:
return
#print('finalize random plate')
self.set_as_sort1()
#print(self.get_stats())
#print(self._times, self._times.dtype)
#print(self.element_node)
#aaa
def add_sort1(self, dt, eid, nid, fd1, oxx1, oyy1, txy1, fd2, oxx2, oyy2, txy2):
assert self.is_sort1, self.sort_method
#assert self.element_node.max() == 0, self.element_node
#if self.element_name in ['CTRIA3', 'CQUAD8']:
#print(f'SORT1 {self.element_name}: itime={self.itime} ielement={self.ielement} itotal={self.itotal} dt={dt} eid={eid} nid={nid} fd={fd1} oxx={oxx1}')
#print(f'SORT1 {self.element_name}: itime={self.itime} ielement={self.ielement} itotal={self.itotal+1} dt={dt} eid={eid} nid={nid} fd={fd2} oxx={oxx2}')
#print('%s: itime=%s itotal=%s dt=%s eid=%s fd=%s oxx=%s' % (self.element_name, self.itime, self.itotal, dt, eid, fd, oxx))
self._times[self.itime] = dt
#print(self.element_types2, element_type, self.element_types2.dtype)
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self.data[self.itime, self.itotal, :] = [oxx1, oyy1, txy1]
self.element_node[self.itotal, :] = [eid, nid] # 0 is center
self.fiber_curvature[self.itotal] = fd1
#self.ielement += 1
self.itotal += 1
self.data[self.itime, self.itotal, :] = [oxx2, oyy2, txy2]
self.element_node[self.itotal, :] = [eid, nid] # 0 is center
self.fiber_curvature[self.itotal] = fd2
self.itotal += 1
def add_sort2(self, dt, eid, nid, fd1, oxx1, oyy1, txy1, fd2, oxx2, oyy2, txy2):
#if self.element_name == 'CTRIA3':
#assert self.element_node.max() == 0, self.element_node
#print(self.element_node, nid)
nnodes = self.nnodes_per_element
itime = self.ielement // nnodes
inid = self.ielement % nnodes
itotal = self.itotal
#if itime >= self.data.shape[0]:# or itotal >= self.element_node.shape[0]:
#print(f'*SORT2 {self.element_name}: itime={itime} ielement={self.itime} inid={inid} itotal={itotal} dt={dt} eid={eid} nid={nid} fd={fd1:.2f} oxx={oxx1:.2f}')
#print(f'*SORT2 {self.element_name}: itime={itime} ielement={self.itime} inid={inid} itotal={itotal+1} dt={dt} eid={eid} nid={nid} fd={fd2:.2f} oxx={oxx2:.2f}')
#print(self.data.shape)
#print(self.element_node.shape)
#else:
ielement = self.itime
#ibase = 2 * ielement # ctria3/cquad4-33
ibase = 2 * (ielement * nnodes + inid)
ie_upper = ibase
ie_lower = ibase + 1
#if self.element_name == 'CTRIAR': # and self.table_name == 'OESATO2':
debug = False
#if self.element_name == 'CTRIA3' and self.table_name in ['OSTRRMS1', 'OSTRRMS2']:
#debug = True
#print(f'SORT2 {self.table_name} {self.element_name}: itime={itime} ie_upper={ie_upper} ielement={self.itime} inid={inid} nid={nid} itotal={itotal} dt={dt} eid={eid} nid={nid} fd={fd1:.2f} oxx={oxx1:.2f}')
#print(f'SORT2 {self.table_name} {self.element_name}: itime={itime} ie_lower={ie_lower} ielement={self.itime} inid={inid} nid={nid} itotal={itotal+1} dt={dt} eid={eid} nid={nid} fd={fd2:.2f} oxx={oxx2:.2f}')
self._times[itime] = dt
#print(self.element_types2, element_type, self.element_types2.dtype)
#itime = self.ielement
#itime = self.itime
#ielement = self.itime
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
if itime == 0:
self.element_node[ie_upper, :] = [eid, nid] # 0 is center
self.element_node[ie_lower, :] = [eid, nid] # 0 is center
self.fiber_curvature[ie_upper] = fd1
self.fiber_curvature[ie_lower] = fd2
#if self.element_name == 'CQUAD4':
#print(self.element_node)
self.data[itime, ie_upper, :] = [oxx1, oyy1, txy1]
self.data[itime, ie_lower, :] = [oxx2, oyy2, txy2]
self.itotal += 2
self.ielement += 1
if debug:
print(self.element_node)
#---------------------------------------------------------------------------
def add_ovm_sort1(self, dt, eid, nid,
fd1, oxx1, oyy1, txy1, ovm1,
fd2, oxx2, oyy2, txy2, ovm2):
"""unvectorized method for adding SORT1 transient data"""
assert self.is_sort1, self.sort_method
self._times[self.itime] = dt
#print(self.element_types2, element_type, self.element_types2.dtype)
#if self.element_name == 'CTRIA3':
#print('%s itotal=%s dt=%s eid=%s nid=%-5s oxx=%s' % (self.element_name, self.itotal, dt, eid, nid, oxx1))
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self.data[self.itime, self.itotal, :] = [oxx1, oyy1, txy1, ovm1]
self.element_node[self.itotal, :] = [eid, nid] # 0 is center
self.fiber_curvature[self.itotal] = fd1
#self.ielement += 1
self.itotal += 1
self.data[self.itime, self.itotal, :] = [oxx2, oyy2, txy2, ovm2]
self.element_node[self.itotal, :] = [eid, nid] # 0 is center
self.fiber_curvature[self.itotal] = fd2
self.itotal += 1
#print(self.element_node)
def add_ovm_sort2(self, dt, eid, nid,
fd1, oxx1, oyy1, txy1, ovm1,
fd2, oxx2, oyy2, txy2, ovm2):
"""unvectorized method for adding SORT2 transient data"""
#self.add_sort2(dt, eid, nid, fd1, oxx1, oyy1, txy1, fd2, oxx2, oyy2, txy2)
self.add_ovm_sort1(dt, eid, nid,
fd1, oxx1, oyy1, txy1, ovm1,
fd2, oxx2, oyy2, txy2, ovm2)
#---------------------------------------------------------------------------
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
nnodes = self.element_node.shape[0]
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(f' type={self.class_name} ntimes={ntimes} nelements={nelements:d} nnodes={nnodes:d} table_name={self.table_name}\n')
else:
msg.append(f' type={self.class_name} nelements={nelements:d} nnodes={nnodes:d} {self.table_name}\n')
msg.append(' eType, cid\n')
msg.append(' data: [ntimes, nnodes, 3] where 3=[%s]\n' % str(', '.join(self._get_headers())))
msg.append(' element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp, unused_nnodes, unused_is_bilinear = _get_plate_msg(self, is_mag_phase, is_sort1)
ntimes = self.data.shape[0]
for itime in range(ntimes):
dt = self._times[itime]
dt_line = ' %14s = %12.5E\n' % (self.data_code['name'], dt)
header[1] = dt_line
msg = header + msg_temp
f06_file.write('\n'.join(msg))
if self.element_type == 144: # CQUAD4 bilinear
self._write_f06_quad4_bilinear_transient(f06_file, itime)
elif self.element_type == 33: # CQUAD4 linear
self._write_f06_tri3_transient(f06_file, itime)
elif self.element_type == 74: # CTRIA3
self._write_f06_tri3_transient(f06_file, itime)
elif self.element_type == 64: #CQUAD8
self._write_f06_quad4_bilinear_transient(f06_file, itime)
elif self.element_type == 82: # CQUADR
self._write_f06_quad4_bilinear_transient(f06_file, itime)
elif self.element_type == 70: # CTRIAR
self._write_f06_quad4_bilinear_transient(f06_file, itime)
elif self.element_type == 75: # CTRIA6
self._write_f06_quad4_bilinear_transient(f06_file, itime)
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def _write_f06_tri3_transient(self, f06_file, itime):
"""
CQUAD4 linear
CTRIA3
"""
fds = self.fiber_curvature
oxx = self.data[itime, :, 0]
oyy = self.data[itime, :, 1]
txy = self.data[itime, :, 2]
eids = self.element_node[:, 0]
#nids = self.element_node[:, 1]
ilayer0 = True
for eid, fd, oxx, oyy, txy in zip(eids, fds, oxx, oyy, txy):
sfd = write_float_13e(fd)
soxx = write_float_13e(oxx)
soyy = write_float_13e(oyy)
stxy = write_float_13e(txy)
if ilayer0: # TODO: assuming 2 layers?
f06_file.write('0 %-13s %6s %-13s %-13s %s\n' % (
eid, sfd, soxx, soyy, stxy))
else:
f06_file.write(' %-13s %6s %-13s %-13s %s\n' % (
'', sfd, soxx, soyy, stxy))
ilayer0 = not ilayer0
def _write_f06_quad4_bilinear_transient(self, f06_file, itime):
"""
CQUAD4 bilinear
CQUAD8
CTRIAR
CTRIA6
"""
fds = self.fiber_curvature
oxx = self.data[itime, :, 0]
oyy = self.data[itime, :, 1]
txy = self.data[itime, :, 2]
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
ilayer0 = True
for eid, nid, fd, doxx, doyy, dtxy in zip(eids, nids, fds, oxx, oyy, txy):
sfd = write_float_13e(fd)
soxx = write_float_13e(doxx)
soyy = write_float_13e(doyy)
stxy = write_float_13e(dtxy)
if ilayer0: # TODO: assuming 2 layers?
f06_file.write('0 %-13s %6s %-13s %-13s %s\n' % (
eid, sfd, soxx, soyy, stxy))
else:
f06_file.write(' %-13s %6s %-13s %-13s %s\n' % (
'', sfd, soxx, soyy, stxy))
ilayer0 = not ilayer0
@property
def has_von_mises(self):
"""what is the form of the table (NX includes Von Mises)"""
if self.table_name in BASIC_TABLES: # no von mises
has_von_mises = False
elif self.table_name in VM_TABLES:
has_von_mises = True
else:
msg = 'self.table_name=%s self.table_name_str=%s' % (self.table_name, self.table_name_str)
raise NotImplementedError(msg)
return has_von_mises
def _get_plate_msg(self, is_mag_phase=True, is_sort1=True):
#if self.is_von_mises:
#von_mises = 'VON MISES'
#else:
#von_mises = 'MAX SHEAR'
if self.is_stress:
if self.is_fiber_distance:
grid_msg_temp = [' ELEMENT FIBER - STRESSES IN ELEMENT COORDINATE SYSTEM -\n',
' ID GRID-ID DISTANCE NORMAL-X NORMAL-Y SHEAR-XY\n']
fiber_msg_temp = [' ELEMENT FIBRE - STRESSES IN ELEMENT COORDINATE SYSTEM -\n',
' ID. DISTANCE NORMAL-X NORMAL-Y SHEAR-XY\n']
else:
grid_msg_temp = [' ELEMENT FIBRE - STRESSES IN ELEMENT COORDINATE SYSTEM -\n',
' ID GRID-ID CURVATURE NORMAL-X NORMAL-Y SHEAR-XY\n']
fiber_msg_temp = [' ELEMENT FIBRE - STRESSES IN ELEMENT COORDINATE SYSTEM -\n',
' ID. CURVATURE NORMAL-X NORMAL-Y SHEAR-XY\n']
else:
if self.is_fiber_distance:
grid_msg_temp = [' ELEMENT FIBER - STRAINS IN ELEMENT COORDINATE SYSTEM -\n',
' ID GRID-ID DISTANCE NORMAL-X NORMAL-Y SHEAR-XY\n']
fiber_msg_temp = [' ELEMENT FIBRE - STRAINS IN ELEMENT COORDINATE SYSTEM -\n',
' ID. DISTANCE NORMAL-X NORMAL-Y SHEAR-XY\n']
else:
grid_msg_temp = [' ELEMENT FIBRE - STRAINS IN ELEMENT COORDINATE SYSTEM -\n',
' ID GRID-ID CURVATURE NORMAL-X NORMAL-Y SHEAR-XY\n']
fiber_msg_temp = [' ELEMENT FIBRE - STRAINS IN ELEMENT COORDINATE SYSTEM -\n',
' ID. CURVATURE NORMAL-X NORMAL-Y SHEAR-XY\n']
if is_mag_phase:
mag_real = [' (MAGNITUDE/PHASE)\n \n']
else:
mag_real = [' (REAL/IMAGINARY)\n', ' \n']
## TODO: validation on header formatting...
if self.is_stress:
cquad4_bilinear = [' C O M P L E X S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 ) OPTION = BILIN \n \n']
cquad4_linear = [' C O M P L E X S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
cquad8 = [' C O M P L E X S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 8 )\n']
cquadr = [' C O M P L E X S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D R )\n']
ctria3 = [' C O M P L E X S T R E S S E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'] # good
ctria6 = [' C O M P L E X S T R E S S E S I N T R I A N G U L A R E L E M E N T S ( T R I A 6 )\n']
ctriar = [' C O M P L E X S T R E S S E S I N T R I A N G U L A R E L E M E N T S ( T R I A R )\n']
else:
cquad4_bilinear = [' C O M P L E X S T R A I N S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 ) OPTION = BILIN \n \n']
cquad4_linear = [' C O M P L E X S T R A I N S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n']
cquad8 = [' C O M P L E X S T R A I N S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 8 )\n']
cquadr = [' C O M P L E X S T R A I N S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D R )\n']
ctria3 = [' C O M P L E X S T R A I N S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n']
ctria6 = [' C O M P L E X S T R A I N S I N T R I A N G U L A R E L E M E N T S ( T R I A 6 )\n']
ctriar = [' C O M P L E X S T R A I N S I N T R I A N G U L A R E L E M E N T S ( T R I A R )\n']
msg = []
is_bilinear = False
if self.element_type == 144: # CQUAD4
is_bilinear = True
msg += cquad4_linear + mag_real + grid_msg_temp
elif self.element_type == 33: # CQUAD4
is_bilinear = False
msg += cquad4_bilinear + mag_real + fiber_msg_temp
elif self.element_type == 64: #CQUAD8
msg += cquad8 + mag_real + grid_msg_temp
is_bilinear = True
elif self.element_type == 82: # CQUADR
msg += cquadr + mag_real + grid_msg_temp
is_bilinear = True
elif self.element_type == 74: # CTRIA3
msg += ctria3 + mag_real + fiber_msg_temp
elif self.element_type == 75: # CTRIA6
msg += ctria6 + mag_real + grid_msg_temp
is_bilinear = True
elif self.element_type == 70: # CTRIAR
msg += ctriar + mag_real + grid_msg_temp
is_bilinear = True
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
nnodes = get_nnodes(self)
msg = [
' S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'
' ( POWER SPECTRAL DENSITY FUNCTION )\n'
' \n'
' FIBER - STRESSES IN ELEMENT COORDINATE SYSTEM -\n'
' FREQUENCY DISTANCE NORMAL-X NORMAL-Y SHEAR-XY\n'
#'0 2.000000E+01 -5.000000E-02 1.925767E-05 1.404795E-04 1.097896E-03'
#' 5.000000E-02 1.925766E-05 1.404794E-04 1.097896E-03'
]
return msg, nnodes, is_bilinear
def get_nnodes(self):
if self.element_type in [64, 82, 144]: # ???, CQUADR, CQUAD4 bilinear
nnodes = 4 + 1 # centroid
elif self.element_type in [70, 75]: #???, CTRIA6
nnodes = 3 + 1 # centroid
elif self.element_type in [144, 74, 33]: # CTRIA3, CQUAD4 linear
nnodes = 1
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
return nnodes
class RandomPlateStressArray(RandomPlateArray, StressObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RandomPlateArray.__init__(self, data_code, is_sort1, isubcase, dt)
StressObject.__init__(self, data_code, isubcase)
str(self.has_von_mises)
def _get_headers(self):
headers = ['oxx', 'oyy', 'txy']
if self.has_von_mises:
headers.append('ovm')
return headers
def get_headers(self) -> List[str]:
return self._get_headers()
class RandomPlateStrainArray(RandomPlateArray, StrainObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RandomPlateArray.__init__(self, data_code, is_sort1, isubcase, dt)
StrainObject.__init__(self, data_code, isubcase)
str(self.has_von_mises)
assert self.is_strain, self.stress_bits
def _get_headers(self):
headers = ['exx', 'eyy', 'exy']
if self.has_von_mises:
headers.append('evm')
return headers
def get_headers(self) -> List[str]:
return self._get_headers()
|
import sys, atexit, cv2, time, glob, os, math
from functools import partial
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import *
from PyQt5 import uic
running = True
import camera1, camera2, camera3, camera4
frame1 = None
frame2 = None
frame3 = None
frame4 = None
ret = None
rgbImage = None
outputFrame1 = None
outputFrame2 = None
outputFrame3 = None
outputFrame4 = None
class QLabelClickable(QLabel):
clicked = pyqtSignal(str)
def __init__(self, parent=None):
super(QLabelClickable, self).__init__(parent)
def mousePressEvent(self, event):
self.ultimo = "Clic"
def mouseReleaseEvent(self, event):
if self.ultimo == "Clic":
QTimer.singleShot(QApplication.instance().doubleClickInterval(), self.performSingleClickAction)
else:
self.clicked.emit(self.ultimo)
def mouseDoubleClickEvent(self, event):
self.ultimo = "Doble Clic"
def performSingleClickAction(self):
if self.ultimo == "Clic": self.clicked.emit(self.ultimo)
def enterEvent(self, event):
self.setStyleSheet(
"color: black; background-color: black; border-radius: 3px; border-style: none; border: 1px solid lime;")
def leaveEvent(self, event):
self.setStyleSheet(
"color: black; background-color: black; border-radius: 3px; border-style: none; border: 1px solid black;")
class Overlay(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
palette = QPalette(self.palette())
palette.setColor(palette.Background, Qt.transparent)
self.setPalette(palette)
self.setFixedSize(1024, 600)
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), QBrush(QColor(255, 255, 255, 187)))
amount_of_circles = 12
for i in range(amount_of_circles):
painter.setPen(QPen(Qt.NoPen))
if (self.counter) % amount_of_circles == i:
painter.setBrush(QBrush(QColor(56, 165, 59)))
elif (self.counter) % amount_of_circles == i + 4:
painter.setBrush(QBrush(QColor(56, 165, 59)))
elif (self.counter) % amount_of_circles == i + 8:
painter.setBrush(QBrush(QColor(56, 165, 59)))
elif (self.counter) % amount_of_circles == i - 4:
painter.setBrush(QBrush(QColor(56, 165, 59)))
elif (self.counter) % amount_of_circles == i - 8:
painter.setBrush(QBrush(QColor(56, 165, 59)))
else:
painter.setBrush(QBrush(QColor(205 - (i * 10), 56, 59)))
# else: painter.setBrush(QBrush(QColor(127, 127, 127)))
painter.drawEllipse(
self.width() / 2 + 50 * math.cos(2 * math.pi * i / amount_of_circles) - 20,
self.height() / 2.2 + 50 * math.sin(2 * math.pi * i / amount_of_circles) - 20,
20, 20)
painter.setPen(QPen(QColor(127, 127, 127), 1))
painter.setFont(QFont(None, 22, 1, False))
if (self.counter) % amount_of_circles == i:
w = "Starting."
elif (self.counter) % amount_of_circles == i + (amount_of_circles / 2 - 1):
w = "Starting.."
elif (self.counter) % amount_of_circles == i + (amount_of_circles - 3):
w = "Starting..."
else:
w = 'Starting'
painter.drawText(int(self.width() / 2 - 55), int(self.height() / 1.5), 160, 50, Qt.AlignLeft | Qt.AlignLeft,
w)
painter.end()
def showEvent(self, event):
self.timer = self.startTimer(100)
self.counter = 0
def timerEvent(self, event):
self.counter += 1
self.update()
if self.counter == 20:
self.killTimer(self.timer)
self.close()
class mainwindowUI(QMainWindow):
def __init__(self):
super(mainwindowUI, self).__init__()
self.show()
self.setFixedSize(1024, 600)
self.overlay = Overlay(self)
self.overlay.show()
self.timer = self.startTimer(50)
self.counter = 0
self.isCamViewFullScreen = False
self.center()
def center(self):
frameGm = self.frameGeometry()
screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())
centerPoint = QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
def timerEvent(self, event):
self.counter += 1
self.update()
if self.counter == 60:
self.overlay.hide()
self.load_UI()
self.start_cameras()
def start_cameras(self):
for i in range(len(self.lblCameras)):
# if self.testDevice(i):
if i == 0:
th1 = Thread1(self.lblCameras[i], i)
th1.changePixmap.connect(self.setImageCam)
th1.start()
elif i == 1:
th2 = Thread2(self.lblCameras[i], i)
th2.changePixmap.connect(self.setImageCam)
th2.start()
elif i == 2:
th3 = Thread3(self.lblCameras[i], i)
th3.changePixmap.connect(self.setImageCam)
th3.start()
elif i == 3:
th4 = Thread4(self.lblCameras[i], i)
th4.changePixmap.connect(self.setImageCam)
th4.start()
def load_UI(self):
uic.loadUi('main.ui', self)
'---------------------TOOL BAR---------------------'
self.btnBackToMenu = self.findChild(QPushButton, 'btnBackToMenu')
self.btnBackToMenu.clicked.connect(self.backToMenu)
self.btnBackToCameras.clicked.connect(self.backToCameras)
self.btnBackToCameras = self.findChild(QPushButton, 'btnBackToCameras')
self.btnBackToMenu.setHidden(True)
self.btnBackToCameras.setHidden(True)
'---------------------CAMERAS---------------------'
self.frameCamera = self.findChild(QFrame, 'frameCameras')
self.cameraGrid = self.findChild(QGridLayout, 'cameraGrid')
self.lblCam1 = QLabelClickable(self)
self.lblCam2 = QLabelClickable(self)
self.lblCam3 = QLabelClickable(self)
self.lblCam4 = QLabelClickable(self)
self.lblCameras = [self.lblCam1, self.lblCam2, self.lblCam3, self.lblCam4]
for index, cam in enumerate(self.lblCameras):
cam.setStyleSheet(
"color: black; background-color: black; border-radius: 3px; border-style: none; border: 1px solid black;")
cam.setAlignment(Qt.AlignCenter)
cam.clicked.connect(partial(self.lblCamClicked, cam, index, True))
colSize = 2
# for i, camera in enumerate(glob.glob("/dev/video?") + 1): self.cameraGrid.addWidget(self.lblCameras[i], int(i / colSize), int(i % colSize))
'''
_______________
| 0,0 | 0,1 |
| 1 | 3 |
|______|______|
| 1,0 | 1,1 |
| 2 | 4 |
|______|______|
'''
self.cameraGrid.addWidget(self.lblCam1, 0, 0)
self.cameraGrid.addWidget(self.lblCam2, 1, 0)
self.cameraGrid.addWidget(self.lblCam3, 0, 1)
self.cameraGrid.addWidget(self.lblCam4, 1, 1)
'---------------------MAIN MENU---------------------'
self.frameMainMenu = self.findChild(QFrame, 'frameMainMenu')
self.btnCameras = self.findChild(QPushButton, 'btnCameras')
self.btnCameras.clicked.connect(self.btnCamerasClicked)
'---------------------MUSIC---------------------'
self.frameMusic = self.findChild(QFrame, 'frameMusic')
self.treeViewMusic = self.findChild(QTreeView, 'treeViewMusic')
self.btnMusic = self.findChild(QPushButton, 'btnMusic')
self.btnMusic.clicked.connect(self.btnMusicClicked)
path = MUSIC_FOLDER
self.dirModel = QFileSystemModel()
self.dirModel.setRootPath(QDir.rootPath())
self.dirModel.setFilter(QDir.NoDotAndDotDot | QDir.AllEntries | QDir.Dirs | QDir.Files)
self.fileModel = QFileSystemModel()
self.fileModel.setFilter(QDir.NoDotAndDotDot | QDir.AllEntries | QDir.Dirs | QDir.Files)
self.treeview = QTreeView()
self.treeViewMusic.setModel(self.dirModel)
self.treeViewMusic.setRootIndex(self.dirModel.index(path))
self.listview = QListView()
self.listview.setModel(self.fileModel)
self.listview.setRootIndex(self.fileModel.index(path))
self.treeViewMusic.clicked.connect(self.treeViewClicked)
'---------------------DEFAULT SETTING---------------------'
self.frameCamera.setHidden(True)
self.frameMusic.setHidden(True)
self.frameMainMenu.setHidden(False)
def backToCameras(self):
for i in range(len(self.lblCameras)): self.lblCameras[i].setHidden(False)
self.btnCamerasClicked()
self.isCamViewFullScreen = False
def lblCamClicked(self, lbl, index, viewFullScreen):
# print(index)
if viewFullScreen:
self.isCamViewFullScreen = True
lbl.clicked.connect(partial(self.lblCamClicked, lbl, index, False))
self.btnBackToMenu.setHidden(True)
self.btnBackToCameras.setHidden(False)
for i in range(len(self.lblCameras)): self.lblCameras[i].setHidden(True)
lbl.setHidden(False)
else:
self.isCamViewFullScreen = False
self.backToCameras()
lbl.clicked.connect(partial(self.lblCamClicked, lbl, index, True))
def btnCamerasClicked(self):
self.frameCamera.setHidden(False)
self.frameMainMenu.setHidden(True)
self.frameMusic.setHidden(True)
self.btnBackToMenu.setHidden(False)
self.btnBackToCameras.setHidden(True)
def btnMusicClicked(self):
self.frameCamera.setHidden(True)
self.frameMainMenu.setHidden(True)
self.frameMusic.setHidden(False)
self.btnBackToMenu.setHidden(False)
self.btnBackToCameras.setHidden(True)
def treeViewClicked(self, index):
path = self.dirModel.fileInfo(index).absoluteFilePath()
print(path)
self.listview.setRootIndex(self.fileModel.setRootPath(path))
def backToMenu(self):
self.frameCamera.setHidden(True)
self.frameMainMenu.setHidden(False)
self.frameMusic.setHidden(True)
self.btnBackToMenu.setHidden(True)
self.btnBackToCameras.setHidden(True)
# @pyqtSlot(QObject, QImage, int)
def setImageCam(self, label, image, index, name=None):
if name == '404.png':
self.cameraGrid.removeWidget(self.lblCameras[index])
self.lblCameras[index].deleteLater()
self.lblCameras[index] = None
self.lblCameras.pop(index)
return
if self.isCamViewFullScreen:
image = image.scaled(640, 480, Qt.KeepAspectRatio, Qt.FastTransformation)
else:
image = image.scaled(320, 240, Qt.KeepAspectRatio, Qt.FastTransformation)
label.setFixedSize(image.width(), image.height())
label.setPixmap(QPixmap.fromImage(image))
class Thread1(QThread):
changePixmap = pyqtSignal(object, QImage, int, str)
def __init__(self, lblCam, port):
QThread.__init__(self)
self.lblCam = lblCam
self.port = port
camera1.start_cam()
@pyqtSlot()
def run(self):
while running:
if running: frame1 = camera1.camRun()
try:
rgbImage = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
self.changePixmap.emit(self.lblCam, convertToQtFormat, self.port, '')
except:
self.lblCam.setFixedSize(1, 1)
image = QImage('404.png')
self.changePixmap.emit(self.lblCam, image, self.port, '404.png')
break
class Thread2(QThread):
changePixmap = pyqtSignal(object, QImage, int, str)
def __init__(self, lblCam, port):
QThread.__init__(self)
self.lblCam = lblCam
self.port = port
camera2.start_cam()
@pyqtSlot()
def run(self):
while running:
if running: frame2 = camera2.camRun()
try:
rgbImage = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
self.changePixmap.emit(self.lblCam, convertToQtFormat, self.port, '')
except:
self.lblCam.setFixedSize(1, 1)
image = QImage('404.png')
self.changePixmap.emit(self.lblCam, image, self.port, '404.png')
break
class Thread3(QThread):
changePixmap = pyqtSignal(object, QImage, int, str)
def __init__(self, lblCam, port):
QThread.__init__(self)
self.lblCam = lblCam
self.port = port
camera3.start_cam()
@pyqtSlot()
def run(self):
while running:
if running: frame3 = camera3.camRun()
try:
rgbImage = cv2.cvtColor(frame3, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
self.changePixmap.emit(self.lblCam, convertToQtFormat, self.port, '')
except:
self.lblCam.setFixedSize(1, 1)
image = QImage('404.png')
self.changePixmap.emit(self.lblCam, image, self.port, '404.png')
break
class Thread4(QThread):
changePixmap = pyqtSignal(object, QImage, int, str)
def __init__(self, lblCam, port):
QThread.__init__(self)
self.lblCam = lblCam
self.port = port
camera4.start_cam()
@pyqtSlot()
def run(self):
while running:
if running: frame4 = camera4.camRun()
try:
rgbImage = cv2.cvtColor(frame4, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
self.changePixmap.emit(self.lblCam, convertToQtFormat, self.port, '')
except:
self.lblCam.setFixedSize(1, 1)
image = QImage('404.png')
self.changePixmap.emit(self.lblCam, image, self.port, '404.png')
break
# def load_config_file(*args):
# global config_json
# for i, j in enumerate(args):
# j.clear()
# with open(config_file) as file:
# config_json = json.load(file)
# for d in config_json[0]['Default']: args[0].append(d)
# for da in config_json[0]['Dark']: args[1].append(da)
# for l in config_json[0]['Light']: args[2].append(l)
# for c in config_json[0]['CSS']: args[3].append(c)
# for g in config_json[0]['Last Genre']: args[4].append(g)
# for al in config_json[0]['Last Algorithm']: args[5].append(al)
# for th in config_json[0]['Last Theme']: args[6].append(th)
# for dexp in config_json[0]['Default Export Path']: args[7].append(dexp)
# for ts in config_json[0]['Toggle Sound On']: args[8].append(ts)
def exit_handler(): sys.exit()
MUSIC_FOLDER = os.path.dirname(os.path.realpath(__file__)) + '/Music/'
if not os.path.exists(MUSIC_FOLDER): os.mkdir(MUSIC_FOLDER)
if __name__ == '__main__':
# load_config_file(DefaultMode, DarkMode, LightMode, CSSOn,
# lastSelectedGenre, lastSelectedAlgorithm,
# lastSelectedTheme, defaultExportPath, toggleSoundOn)
atexit.register(exit_handler)
app = QApplication(sys.argv)
window = mainwindowUI()
app.exec_()
|
import os
import pytest
from conans import load
from conans.model.ref import ConanFileReference, PackageReference
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient, NO_SETTINGS_PACKAGE_ID
@pytest.fixture
def conanfile():
conan_file = str(GenConanfile().with_import("from conans import tools").with_import("import os").
with_require("base/1.0"))
# FIXME: The configure is not valid to change the layout, we need the settings and options
# ready
conan_file += """
no_copy_sources = True
def configure(self):
self.layout.source.folder = "my_sources"
self.layout.build.folder = "my_build"
def source(self):
self.output.warn("Source folder: {}".format(self.source_folder))
tools.save("source.h", "foo")
def build(self):
self.output.warn("Build folder: {}".format(self.build_folder))
tools.save("build.lib", "bar")
def package(self):
self.output.warn("Package folder: {}".format(self.package_folder))
tools.save(os.path.join(self.package_folder, "LICENSE"), "bar")
self.copy("*.h", dst="include")
self.copy("*.lib", dst="lib")
def package_info(self):
# This will be easier when the layout declares also the includedirs etc
self.cpp_info.includedirs = ["include"]
self.cpp_info.libdirs = ["lib"]
"""
return conan_file
def test_cache_in_layout(conanfile):
"""The layout in the cache is used too, always relative to the "base" folders that the cache
requires. But by the default, the "package" is not followed
"""
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . base/1.0@")
client.save({"conanfile.py": conanfile})
ref = ConanFileReference.loads("lib/1.0@")
pref = PackageReference(ref, "58083437fe22ef1faaa0ab4bb21d0a95bf28ae3d")
sf = client.cache.package_layout(ref).source()
bf = client.cache.package_layout(ref).build(pref)
pf = client.cache.package_layout(ref).package(pref)
source_folder = os.path.join(sf, "my_sources")
build_folder = os.path.join(bf, "my_build")
client.run("create . lib/1.0@")
# Check folders match with the declared by the layout
assert "Source folder: {}".format(source_folder) in client.out
assert "Build folder: {}".format(build_folder) in client.out
# Check the source folder
assert os.path.exists(os.path.join(source_folder, "source.h"))
# Check the build folder
assert os.path.exists(os.path.join(build_folder, "build.lib"))
# Check the conaninfo
assert os.path.exists(os.path.join(pf, "conaninfo.txt"))
# Search the package in the cache
client.run("search lib/1.0@")
assert "Package_ID: 58083437fe22ef1faaa0ab4bb21d0a95bf28ae3d" in client.out
# Install the package and check the build info
client.run("install lib/1.0@ -g txt")
binfopath = os.path.join(client.current_folder, "conanbuildinfo.txt")
content = load(binfopath).replace("\r\n", "\n")
assert "[includedirs]\n{}".format(os.path.join(pf, "include")
.replace("\\", "/")) in content
assert "[libdirs]\n{}".format(os.path.join(pf, "lib")
.replace("\\", "/")) in content
def test_same_conanfile_local(conanfile):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . base/1.0@")
client.save({"conanfile.py": conanfile})
source_folder = os.path.join(client.current_folder, "my_sources")
build_folder = os.path.join(client.current_folder, "my_build")
client.run("install . lib/1.0@ -if=install")
client.run("source . -if=install")
assert "Source folder: {}".format(source_folder) in client.out
assert os.path.exists(os.path.join(source_folder, "source.h"))
client.run("build . -if=install")
assert "Build folder: {}".format(build_folder) in client.out
assert os.path.exists(os.path.join(build_folder, "build.lib"))
client.run("package . -if=install")
# By default, the "package" folder is still used (not breaking)
pf = os.path.join(client.current_folder, "package")
assert "Package folder: {}".format(pf) in client.out
assert os.path.exists(os.path.join(pf, "LICENSE"))
|
import subprocess
import sys
import tempfile
from os import path
import yaml
from jock.constant import GIT, COMMAND_PATH, IMPORTS, ADDRESS, REPOSITORIES, GROUPS, DATA
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def load_config():
try:
with open(path.expanduser('~/.jockrc'), 'r') as file:
config = yaml.load(file, Loader=Loader)
validate_config(config)
return config
except IOError:
exit_with_message(1, 'Could not read configuration at ~/.jockrc')
def validate_config(config):
if config is None:
exit_with_message(1, 'Config is empty')
assert_config_has_key(config, REPOSITORIES)
def assert_config_has_key(config, key):
key_merged = merge_config_and_import_key(config, key)
if len(key_merged.keys()) == 0:
exit_with_message(1, 'No ' + key + ' found in config')
def merge_config_and_import_key(config, key):
merged = dict({})
if key in config:
merged = {**merged, **config[key]}
if IMPORTS in config:
imports = config[IMPORTS]
for import_name in imports:
imp = imports[import_name]
if DATA in imp and key in imp[DATA]:
merged = {**merged, **imp[DATA][key]}
return merged
def get_tmp_path():
return path.join(tempfile.gettempdir(), 'jock-imports')
def fetch_remote_rc(import_name, address):
temp_dir = get_tmp_path()
temp_path = path.join(temp_dir, import_name)
subprocess_steps(
success_message='Imported "' + import_name + '"',
error='Import "' + import_name + '" could not be retrieved from "' + address + '"',
steps=[
(GIT, 'clone', '--no-checkout', address, temp_path),
(GIT, COMMAND_PATH, temp_path, 'reset'),
(GIT, COMMAND_PATH, temp_path, 'checkout', '.jockrc')
])
def merge_config_and_imported(config, import_name, imported):
if DATA not in config[IMPORTS][import_name]:
config[IMPORTS][import_name][DATA] = dict({})
config[IMPORTS][import_name][DATA][REPOSITORIES] = imported[REPOSITORIES]
config[IMPORTS][import_name][DATA][GROUPS] = imported[GROUPS]
return config
def fetch_and_merge_remote(config, imports, import_name, temp_dir):
fetch_remote_rc(import_name, imports[import_name][ADDRESS])
with open(path.join(temp_dir, import_name, '.jockrc'), 'r') as imported_file:
imported = yaml.load(imported_file, Loader=Loader)
config = merge_config_and_imported(config, import_name, imported)
with open(path.expanduser('~/.jockrc'), 'w') as config_file:
yaml.dump(config, config_file, sort_keys=False)
def fetch_and_merge_remotes(config, imports, temp_dir):
for import_name in imports:
fetch_and_merge_remote(config, imports, import_name, temp_dir)
def import_config():
config = load_config()
imports = config[IMPORTS]
temp_dir = get_tmp_path()
subprocess.run(('rm', '-rf', temp_dir))
fetch_and_merge_remotes(config, imports, temp_dir)
subprocess.run(('rm', '-rf', temp_dir))
def get_selected_repositories(selected_repositories, selected_groups):
if len(selected_repositories) + len(selected_groups) == 0:
exit_with_message(1, 'No repositories/groups provided')
config = load_config()
repositories = merge_config_and_import_key(config, REPOSITORIES)
groups = merge_config_and_import_key(config, GROUPS)
selected = dict({})
for repo_name in selected_repositories:
if repo_name in repositories:
selected[repo_name] = repositories[repo_name]
else:
exit_with_message(1, 'Repository "' + repo_name + '" not found in config')
if config.get(GROUPS) is not None:
for group_name in selected_groups:
if group_name not in groups:
exit_with_message(1, 'Group "' + group_name + '" not found in config')
elif REPOSITORIES not in groups[group_name]:
exit_with_message(1, 'Group "' + group_name + '" has no repository field')
else:
for repo_name in groups[group_name][REPOSITORIES]:
selected[repo_name] = repositories[repo_name]
if len(selected) == 0:
exit_with_message(1, 'No repositories selected')
return selected
def exit_with_message(exit_code, message):
print(message)
sys.exit(exit_code)
def quiet_subprocess(args):
return subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT).returncode
def subprocess_steps(steps, error=None, success_message=None):
for args in steps:
exit_code = quiet_subprocess(args)
if exit_code > 0:
if error is not None:
exit_with_message(1, error)
return
if success_message is not None:
print(success_message)
|
class CycleSetupCommon:
"""
realization needs self.h_int, self.gloc, self.g0, self.se, self.mu, self.global_moves,
self.quantum_numbers
"""
def initialize_cycle(self):
return {'h_int': self.h_int,'g_local': self.gloc, 'weiss_field': self.g0,
'self_energy': self.se, 'mu': self.mu, 'global_moves': self.global_moves,
'quantum_numbers': self.quantum_numbers}
def set_data(self, storage, load_mu = True):
"""
loads the data of g_imp_iw, g_weiss_iw, se_imp_iw, mu from storage into the corresponding
objects
The data is copied, storage returns objects that are all BlockGf's and can not init
a selfconsistency cycle
"""
g = storage.load('g_imp_iw')
self.gloc << g
try: # TODO backward compatibility
self.g0 << storage.load('g_weiss_iw')
except KeyError:
pass
self.se << storage.load('se_imp_iw')
if load_mu:
self.mu = storage.load('mu')
|
import re
import sanic
def pytest_collection_modifyitems(session, config, items):
base_port = sanic.testing.PORT
worker_id = getattr(config, 'slaveinput', {}).get('slaveid', 'master')
m = re.search(r'[0-9]+', worker_id)
if m:
num_id = int(m.group(0)) + 1
else:
num_id = 0
new_port = base_port + num_id
def new_test_client(app, port=new_port):
return sanic.testing.SanicTestClient(app, port)
sanic.Sanic.test_port = new_port
sanic.Sanic.test_client = property(new_test_client)
app = sanic.Sanic()
assert app.test_client.port == new_port
|
from __future__ import print_function
import datetime
import glob
import json
import multiprocessing
import os
import pickle
import sys
import warnings
from collections import Counter, defaultdict
from string import digits
import re
import plotly.plotly as py
from plotly.graph_objs import *
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dateutil import parser
from gensim.models import KeyedVectors
from joblib import Parallel, delayed
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.model_selection import train_test_split
from sklearn import metrics
sys.path.insert(0, os.path.dirname(__file__) + '../2_helpers')
sys.path.insert(0, os.path.dirname(__file__) + '../5_fact_checking_models')
from decoder import decoder
from metrics import ndcg_score
warnings.filterwarnings("ignore", category=DeprecationWarning)
DIR = os.path.dirname(__file__) + '../../3_Data/'
WNL = WordNetLemmatizer()
NLTK_STOPWORDS = set(stopwords.words('english'))
num_cores = multiprocessing.cpu_count()
num_jobs = round(num_cores * 3 / 4)
fact_to_words = {}
# word_vectors = KeyedVectors.load_word2vec_format('model_data/word2vec_twitter_model/word2vec_twitter_model.bin', binary=True, unicode_errors='ignore')
def datetime_converter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def tokenize_text(text, only_retweets=False):
tokenizer = RegexpTokenizer(r'\w+')
if only_retweets:
text = text
# if 'RT' not in text: return None
mentions = []
while True:
if '@' not in text: break
mention = text[text.find('@'):]
if ' ' in mention: mention = mention[:mention.find(' ')]
mentions.append(mention)
text = text.replace(mention, '')
retweeted_to = [rt.replace('@', '').replace(':', '').lower() for rt in mentions if '@' in rt]
return retweeted_to
return [WNL.lemmatize(i.lower()) for i in tokenizer.tokenize(text) if
i.lower() not in NLTK_STOPWORDS]
def get_data():
fact_file = glob.glob(DIR + 'facts.json')[0]
transactions_file = glob.glob(DIR + 'factTransaction.json')[0]
facts = json.load(open(fact_file), object_hook=decoder)
transactions = json.load(open(transactions_file), object_hook=decoder)
return facts, transactions
def get_users():
user_files = glob.glob(DIR + 'user_tweets/' + 'user_*.json')
print('{} users'.format(len(user_files)))
if len(user_files) < 10: print('WRONG DIR?')
users = []
for user_file in user_files:
user = json.loads(open(user_file).readline(), object_hook=decoder)
if int(user.was_correct) != -1:
users.append(user)
print('Kept {} users'.format(len(users)))
return users
def get_relevant_tweets(user):
relevant_tweets = []
user_fact_words = fact_to_words[user.fact]
for tweet in user.tweets:
distance_to_topic = []
tokens = tokenize_text(tweet['text'], only_retweets=False)
for token in tokens:
if token not in word_vectors.vocab: continue
increment = np.average(word_vectors.distances(token, other_words=user_fact_words))
distance_to_topic.append(increment)
if np.average(np.asarray(distance_to_topic)) < 0.8:
relevant_tweets.append(tweet)
return relevant_tweets
def build_fact_topics():
print("Build fact topics")
fact_file = glob.glob(DIR + 'facts_annotated.json')[0]
facts_df = pd.read_json(fact_file)
remove_digits = str.maketrans('', '', digits)
facts_df['text_parsed'] = facts_df['text'].map(lambda t: tokenize_text(t.translate(remove_digits)))
facts_df['entities_parsed'] = facts_df['entities'].map(lambda ents:
[item for sublist in
[e['surfaceForm'].lower().split() for e in ents if
e['similarityScore'] >= 0.6]
for item in sublist])
facts_df['topic'] = facts_df['topic'].map(lambda t: [t])
facts_df['fact_terms'] = facts_df['text_parsed'] + facts_df['entities_parsed'] + facts_df['topic']
return facts_df
def get_user_edges(users):
user_to_links = []
y = []
i = 0
for user in users:
user_links = []
# relevant_tweets = get_relevant_tweets(user)
for tweet in user.tweets:
mentions = tokenize_text(tweet['text'], only_retweets=True)
for rt in mentions:
user_links.append(rt)
if len(user_links) <= 1: continue
user_to_links.append([user.user_id, user_links])
y.append([user.user_id, user.was_correct + 0.01])
i += 1
return user_to_links, np.asarray(y)
def build_graph(user_to_links, user_to_weight):
G = nx.DiGraph()
all_nodes = [u[0] for u in user_to_links] + list(
set([e for sublist in [u[1] for u in user_to_links] for e in sublist]))
print(len(all_nodes))
G.add_nodes_from(all_nodes)
G.add_edges_from([(userlinks[0], v) for userlinks in user_to_links for v in userlinks[1]])
# G.add_weighted_edges_from([(userlinks[0],v,user_to_weight[i]) for i, userlinks in enumerate(user_to_links) for v in userlinks[1]])
obsolete_nodes = [k for k, v in dict(nx.degree(G)).items() if v <= 1]
G.remove_nodes_from(obsolete_nodes)
return G
def get_ranks(user_to_links, G, pageRank, alpha=0.85):
user_to_pr = []
for user, links in user_to_links:
pr_sum = sum([pageRank[l] / G.degree(l) for l in links if l in pageRank])
pr_user = (1 - alpha) / alpha + alpha * pr_sum
user_to_pr.append(pr_user)
return user_to_pr
def graph_plot(G):
print(len(G.nodes()))
obsolete_nodes = [k for k, v in dict(nx.degree(G)).items() if v <= 10]
G.remove_nodes_from(obsolete_nodes)
print(len(G.nodes()))
pos = nx.kamada_kawai_layout(G)
N = len(G.nodes())
Xv = [pos[k][0] for k in range(N)]
Yv = [pos[k][1] for k in range(N)]
Xed = []
Yed = []
for edge in G.edges():
Xed += [pos[edge[0]][0], pos[edge[1]][0], None]
Yed += [pos[edge[0]][1], pos[edge[1]][1], None]
trace3 = Scatter(x=Xed, y=Yed, mode='lines', line=Line(color='rgb(210,210,210)', width=1), hoverinfo='none')
trace4 = Scatter(x=Xv, y=Yv, mode='markers', name='net',
marker=Marker(symbol='dot', size=5, color='#6959CD', line=Line(color='rgb(50,50,50)', width=0.5)),
text=labels, hoverinfo='text')
annot = "This networkx.Graph has the Fruchterman-Reingold layout<br>Code:" + \
"<a href='http://nbviewer.ipython.org/gist/empet/07ea33b2e4e0b84193bd'> [2]</a>"
data1 = Data([trace3, trace4])
fig1 = Figure(data=data1, layout=layout)
fig1['layout']['annotations'][0]['text'] = annot
plot(py.iplot(fig1, filename='Coautorship-network-nx'))
def rank_users(users):
global fact_to_words
print("Creating nodes")
user_to_links, user_to_weight = get_user_edges(users)
X_train, X_test, y_train, y_test = train_test_split(user_to_links, user_to_weight)
print("Building graph..")
G = build_graph(user_to_links, user_to_weight)
graph_plot(G)
pr = nx.pagerank(G)
pr_cred_users = {u: v for u, v in list(pr.items()) if u in user_to_links}
# print(sorted([(v,y[1]) for u,v in pr_cred_users.items() for y in user_to_weight if u == y[0]], reverse=True, key=lambda x: x[0]))
pred = get_ranks(X_test, G, pr)
print(sorted(np.asarray([e for e in zip(pred, [y[1] for y in y_test])]), reverse=True, key=lambda x: x[0]))
ndgc = ndcg_score([y[1] for y in y_test], pred)
print("NDCG: {}".format(ndgc))
users = get_users()
rank_users(users)
|
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class GetContactListRequest(Request):
def __init__(self):
super(GetContactListRequest, self).__init__(
'monitor', 'qcloudcliV1', 'GetContactList', 'monitor.api.qcloud.com')
def get_page(self):
return self.get_params().get('page')
def set_page(self, page):
self.add_param('page', page)
def get_pageSize(self):
return self.get_params().get('pageSize')
def set_pageSize(self, pageSize):
self.add_param('pageSize', pageSize)
|
import os
import sys
# import re
# Environment variables to configure the AWS CLI
os.environ["AWS_ACCESS_KEY_ID"] = os.environ['INPUT_ACCESS_KEY']
os.environ["AWS_SECRET_ACCESS_KEY"] = os.environ['INPUT_SECRET_ACCESS_KEY']
os.environ["AWS_DEFAULT_REGION"] = os.environ['INPUT_REGION']
# Source and destination locations
source = os.environ['INPUT_SOURCE']
destination_bucket = os.environ['INPUT_DESTINATION_BUCKET']
destination_prefix = os.environ['INPUT_DESTINATION_PREFIX']
# Flags
# Will keep adding more ...
exclude = os.environ['INPUT_EXCLUDE']
delete = os.environ['INPUT_DELETE']
quiet = os.environ['INPUT_QUIET']
args = ['aws s3 sync']
# Checks
# Check if the source directory is provided and is in the current workspace
if source and source != ".":
if source not in os.listdir(os.environ['GITHUB_WORKSPACE']):
print(f'Source "{source}" does not exist in the workspace. Please check and try again ...')
print("Below are the available files/directories in the current workspace:")
print(os.listdir(os.environ['GITHUB_WORKSPACE']))
sys.exit(1)
else:
args.append(source)
else:
args.append(".")
# Check if a destination prefix was provided:
if destination_prefix:
destination = f's3://{destination_bucket}/{destination_prefix}/'
else:
destination = f's3://{destination_bucket}/'
args.append(destination)
# Check if 'exclude' flag is used:
if exclude:
args.append(f'--exclude "{exclude}"')
# Check if 'delete' flag is used:
if delete.lower() == "true":
args.append("--delete")
# Check if 'quiet' flag is used:
if quiet.lower() == "true":
args.append("--quiet")
cmd = ' '.join(args)
print("Running the AWS Sync command with the following arguments...")
print(cmd)
try:
os.system(cmd)
except:
print("Error executing the sync command !!!")
sys.exit(1) |
import types
from sqlalchemy import Column, String, BigInteger, Integer, DateTime, ForeignKey, Sequence
import datetime
from sqlalchemy.orm import relationship, backref
from models.BaseModel import BaseModel
#from models.EventsRelated.EventGroupModel import EventGroupModel
#from models.EventsRelated.EventUserModel import EventUserModel
#from models.EventsRelated.EventRoomModel import EventRoomModel
class EventModel(BaseModel):
__tablename__ = 'events'
id = Column(BigInteger, Sequence('all_id_seq'), primary_key=True)
start = Column(DateTime)
end = Column(DateTime)
label = Column(String)
externalId = Column(String, index=True)
lastchange = Column(DateTime, default=datetime.datetime.now)
#userlinks = relationship('EventUserModel')
#groupslinks = relationship('EventGroupModel')
#roomlinks = relationship('EventRoomModel')
#users = relationship('UserModel', secondary=EventUserModel)#, back_populates='events') #relationship(lazy='dynamic')
#users = relationship('UserModel',
# primaryjoin="EventModel.id==EventUserModel.event_id",
# secondaryjoin="and_(EventUserModel.event_id==EventModel.id, EventUserModel.id==EventUserModel.id)")
#users = relationship('EventUserModel')
#primaryjoin="events.id==events_users.event_id")
#secondaryjoin="and_(events_users.event_id==events.id, events_users.user_id==users.id)")
#rooms = relationship('RoomModel', secondary=EventGroupModel)#, back_populates='events') #relationship(lazy='dynamic')
#groups = relationship('GroupModel', secondary=EventGroupModel, back_populates='events') #relationship(lazy='dynamic')
#groups = relationship('GroupModel', secondary=EventGroupModel)#, back_populates='events') #relationship(lazy='dynamic')
|
import os
from functools import lru_cache
class DayTen():
def __init__(self):
self.lines = []
self.read_list()
def read_list(self):
self.operations = []
with open('./_data/data_10.txt') as f:
contents = f.read().split(os.linesep)
self.lines = [int(line) for line in contents]
def part_one(self):
self.lines.sort()
data = self.lines
differences_3 = 0
differences_1 = 0
for p, n in zip(data[:-1], data[1:]):
if n - p == 3:
differences_3 += 1
elif n - p == 1:
differences_1 += 1
return (differences_1 + 1) * (differences_3 + 1)
@lru_cache
def calculate_combinations(self, position, target):
if position == target:
return 1
count = 0
for i in range(1, 4):
if position + i not in self.lines:
continue
count += self.calculate_combinations(position + i, target)
return count
def part_two(self):
max_number = max(self.lines)
return self.calculate_combinations(0, max_number)
day_ten = DayTen()
print("What is the first number that does not have this property?")
print(day_ten.part_one())
print("=========================================")
print("What is the total number of distinct ways you \
can arrange the adapters to connect the charging \
outlet to your device?")
print(day_ten.part_two())
|
# This is a handy reverses the endianess of a given binary string in HEX
input = "020000000001017c037e163f8dfee4632a8cf6c87187d3cb61224e6dae8f4b0ed0fae3a38008570000000017160014c5729e3aaacb6a160fa79949a8d7f1e5cd1fbc51feffffff0288102c040000000017a914ed649576ad657747835d116611981c90113c074387005a62020000000017a914e62a29e7d756eb30c453ae022f315619fe8ddfbb8702483045022100b40db3a574a7254d60f8e64335d9bab60ff986ad7fe1c0ad06dcfc4ba896e16002201bbf15e25b0334817baa34fd02ebe90c94af2d65226c9302a60a96e8357c0da50121034f889691dacb4b7152f42f566095a8c2cec6482d2fc0a16f87f59691e7e37824df000000"
def test():
assert reverse("") == ""
assert reverse("F") == "F"
assert reverse("FF") == "FF"
assert reverse("00FF") == "FF00"
assert reverse("AA00FF") == "FF00AA"
assert reverse("AB01EF") == "EF01AB"
assert reverse("b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f1963") == "63194f18be0af63f2c6bc9dc0f777cbefed3d9415c4af83f3ee3a3d669c00cb5"
def reverse(input):
res = "".join(reversed([input[i:i+2] for i in range(0, len(input), 2)]))
return res
if __name__ == "__main__":
test()
print(reverse(input))
|
"""
Design a data structure that follows the constraints of a Least Recently Used (LRU) cache.
Implement the LRUCache class:
- LRUCache(int capacity) Initialize the LRU cache with positive size capacity.
- get(int key) Return the value of the key if the key exists, otherwise return -1.
- put(int key, int value) Update the value of the key if the key exists.
Otherwise, add the key-value pair to the cache.
If the number of keys exceeds the capacity from this operation, evict the least recently used key.
The functions get and put must each run in O(1) average time complexity.
Example 1:
Input
["LRUCache", "put", "put", "get", "put", "get", "put", "get", "get", "get"]
[[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]
Output
[null, null, null, 1, null, -1, null, -1, 3, 4]
Explanation
LRUCache lRUCache = new LRUCache(2);
lRUCache.put(1, 1); // cache is {1=1}
lRUCache.put(2, 2); // cache is {1=1, 2=2}
lRUCache.get(1); // return 1
lRUCache.put(3, 3); // LRU key was 2, evicts key 2, cache is {1=1, 3=3}
lRUCache.get(2); // returns -1 (not found)
lRUCache.put(4, 4); // LRU key was 1, evicts key 1, cache is {4=4, 3=3}
lRUCache.get(1); // return -1 (not found)
lRUCache.get(3); // return 3
lRUCache.get(4); // return 4
"""
"""
We can most easily implement this with Python's OrderedDict which has two methods very useful for this kind of need:
move_to_end, and popitem. All operations are O(1) due to acting on the underlying dict object.
"""
from collections import OrderedDict
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.data = OrderedDict()
def get(self, key):
if key in self.data:
self.data.move_to_end(key)
return self.data[key]
return -1
def put(self, key, value):
self.data[key] = value
self.data.move_to_end(key)
if len(self.data) > self.capacity:
self.data.popitem(False)
|
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from aries_cloudagent.storage.error import StorageNotFoundError
from .. import routes as test_module
class TestBasicMessageRoutes(AsyncTestCase):
async def test_connections_send_message(self):
mock_request = async_mock.MagicMock()
mock_request.json = async_mock.CoroutineMock()
mock_request.app = {
"outbound_message_router": async_mock.CoroutineMock(),
"request_context": "context",
}
with async_mock.patch.object(
test_module, "ConnectionRecord", autospec=True
) as mock_connection_record, async_mock.patch.object(
test_module, "BasicMessage", autospec=True
) as mock_basic_message, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_connection_record.retrieve_by_id = async_mock.CoroutineMock()
res = await test_module.connections_send_message(mock_request)
mock_response.assert_called_once_with({})
mock_basic_message.assert_called_once()
async def test_connections_send_message_no_conn_record(self):
mock_request = async_mock.MagicMock()
mock_request.json = async_mock.CoroutineMock()
mock_request.app = {
"outbound_message_router": async_mock.CoroutineMock(),
"request_context": "context",
}
with async_mock.patch.object(
test_module, "ConnectionRecord", autospec=True
) as mock_connection_record, async_mock.patch.object(
test_module, "BasicMessage", autospec=True
) as mock_basic_message:
# Emulate storage not found (bad connection id)
mock_connection_record.retrieve_by_id = async_mock.CoroutineMock(
side_effect=StorageNotFoundError
)
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.connections_send_message(mock_request)
async def test_connections_send_message_not_ready(self):
mock_request = async_mock.MagicMock()
mock_request.json = async_mock.CoroutineMock()
mock_request.app = {
"outbound_message_router": async_mock.CoroutineMock(),
"request_context": "context",
}
with async_mock.patch.object(
test_module, "ConnectionRecord", autospec=True
) as mock_connection_record, async_mock.patch.object(
test_module, "BasicMessage", autospec=True
) as mock_basic_message:
# Emulate connection not ready
mock_connection_record.retrieve_by_id = async_mock.CoroutineMock()
mock_connection_record.retrieve_by_id.return_value.is_ready = False
await test_module.connections_send_message(mock_request)
mock_basic_message.assert_not_called()
|
'''Simple test file to test whether loading caffefeatures works properly. Selecting percentiles, selecting rows and giving error messages.
@author: Diede Kemper'''
from IO import Input
features = Input.load_validationset_caffefeatures()
print features.shape
print 'should be: 8061x3983'
features = Input.load_traindata_caffefeatures(userows=range(3000,5500))
print features.shape
print 'should be: 2500x3983'
features = Input.load_validationset_caffefeatures(featureSelectionMethod='chi2', Percentile=100)
print features.shape
print 'should be: 8061x3983'
features = Input.load_validationset_caffefeatures(featureSelectionMethod='hoi', Percentile=90)
print features.shape
print 'should print error message'
features = Input.load_validationset_caffefeatures(featureSelectionMethod='chi2', Percentile=210)
print features.shape
print 'should print error message'
features = Input.load_traindata_caffefeatures(featureSelectionMethod='chi2', Percentile=5)
print features.shape
print 'should be: 22424x200'
features = Input.load_testdata_caffefeatures(featureSelectionMethod='chi2', Percentile=2, userows=range(20200,30200))
print features.shape
print 'should be: 10000x80'
|
# Training loop:
# 1 Sets up the environment and data
# 2 Builds the generator (g) and discriminator (d) networks
# 3 Manages the training process
# 4 Runs periodic evaluations on specified metrics
# 5 Produces sample images over the course of training
# It supports training over an image directory dataset, prepared by prepare_data.py
# Labels can optionally be provided although not essential
# If provided, image will be generated conditioned on a chosen label
import os
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import glob
import dnnlib
from torch_utils import misc as torch_misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from training import visualize
from training import misc
import loader
from metrics import metric_main
from metrics import metric_utils
# Data processing
# ----------------------------------------------------------------------------
# Load dataset
def load_dataset(dataset_args, batch_size, rank, num_gpus, log):
misc.log("Loading training set...", "white", log)
dataset = dnnlib.util.construct_class_by_name(**dataset_args) # subclass of training.datasetDataset
dataset_sampler = torch_misc.InfiniteSampler(dataset = dataset, rank = rank, num_replicas = num_gpus)
dataset_iter = iter(torch.utils.data.DataLoader(dataset = dataset, sampler = dataset_sampler,
batch_size = batch_size//num_gpus, **dataset_args.loader_args))
misc.log(f"Num images: {misc.bcolored(len(dataset), 'blue')}", log = log)
misc.log(f"Image shape: {misc.bcolored(dataset.image_shape, 'blue')}", log = log)
misc.log(f"Label shape: {misc.bcolored(dataset.label_shape, 'blue')}", log = log)
return dataset, dataset_iter
# Fetch real images and their corresponding labels, and sample latents/labels
def fetch_data(dataset, dataset_iter, input_shape, drange_net, device, batches_num, batch_size, batch_gpu):
with torch.autograd.profiler.record_function("data_fetch"):
real_img, real_c = next(dataset_iter)
real_img = real_img.to(device).to(torch.float32)
real_img = misc.adjust_range(real_img, [0, 255], drange_net).split(batch_gpu)
real_c = real_c.to(device).split(batch_gpu)
gen_zs = torch.randn([batches_num * batch_size, *input_shape[1:]], device = device)
gen_zs = [gen_zs.split(batch_gpu) for gen_z in gen_zs.split(batch_size)]
gen_cs = [dataset.get_label(np.random.randint(len(dataset))) for _ in range(batches_num * batch_size)]
gen_cs = torch.from_numpy(np.stack(gen_cs)).pin_memory().to(device)
gen_cs = [gen_c.split(batch_gpu) for gen_c in gen_cs.split(batch_size)]
return real_img, real_c, gen_zs, gen_cs
# Networks (construction/distribution, loading/saving, and printing)
# ----------------------------------------------------------------------------
# Construct networks
def construct_nets(cG, cD, dataset, device, log):
misc.log("Constructing networks...", "white", log)
common_kwargs = dict(c_dim = dataset.label_dim, img_resolution = dataset.resolution, img_channels = dataset.num_channels)
G = dnnlib.util.construct_class_by_name(**cG, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nnnet
D = dnnlib.util.construct_class_by_name(**cD, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nnnet
Gs = copy.deepcopy(G).eval()
return G, D, Gs
# Distribute models onto the GPUs
def distribute_nets(G, D, Gs, device, num_gpus, log):
misc.log(f"Distributing across {num_gpus} GPUs...", "white", log)
networks = {}
for name, net in [("G", G), ("D", D), (None, Gs)]: # ("G_mapping", G.mapping), ("G_synthesis", G.synthesis)
if (num_gpus > 1) and (net is not None) and len(list(net.parameters())) != 0:
net.requires_grad_(True)
net = torch.nn.parallel.DistributedDataParallel(net, device_ids = [device], broadcast_buffers = False,
find_unused_parameters = True)
net.requires_grad_(False)
if name is not None:
networks[name] = net
return networks
# Resume from existing pickle
def load_nets(load_pkl, nets, device, log):
if (load_pkl is not None) and log:
misc.log(f"Resuming from {load_pkl}", "white", log)
resume_data = loader.load_network(load_pkl)
if nets is not None:
G, D, Gs = nets
for name, net in [("G", G), ("D", D), ("Gs", Gs)]:
torch_misc.copy_params_and_buffers(resume_data[name], net, require_all = False)
else:
for net in ["G", "D", "Gs"]:
resume_data[net] = copy.deepcopy(resume_data[net]).eval().requires_grad_(False).to(device)
nets = (resume_data["G"], resume_data["D"], resume_data["Gs"])
return nets
def save_nets(G, D, Gs, cur_nimg, dataset_args, run_dir, distributed, last_snapshots, log):
snapshot_data = dict(dataset_args = dict(dataset_args))
for name, net in [("G", G), ("D", D), ("Gs", Gs)]:
if net is not None:
if distributed:
torch_misc.assert_ddp_consistency(net, ignore_regex = r".*\.w_avg")
net = copy.deepcopy(net).eval().requires_grad_(False).cpu()
snapshot_data[name] = net
del net
snapshot_pkl = os.path.join(run_dir, f"network-snapshot-{cur_nimg//1000:06d}.pkl")
if log:
with open(snapshot_pkl, "wb") as f:
pickle.dump(snapshot_data, f)
if last_snapshots > 0:
misc.rm(sorted(glob.glob(os.path.join(run_dir, "network*.pkl")))[:-last_snapshots])
return snapshot_data, snapshot_pkl
# Print network summary tables
def print_nets(G, D, batch_gpu, device, log):
if not log:
return
z = torch.empty([batch_gpu, *G.input_shape[1:]], device = device)
c = torch.empty([batch_gpu, *G.cond_shape[1:]], device = device)
img = torch_misc.print_module_summary(G, [z, c])[0]
torch_misc.print_module_summary(D, [img, c])
# Training and optimization
# ----------------------------------------------------------------------------
# Initialize cuda according to command line arguments
def init_cuda(rank, cudnn_benchmark, allow_tf32):
device = torch.device("cuda", rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed
torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for matmul
torch.backends.cudnn.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for convolutions
conv2d_gradfix.enabled = True # Improves training speed
return device
# Setup training stages (alternating optimization of G and D, and for )
def setup_training_stages(loss_args, G, cG, D, cD, ddp_nets, device, log):
misc.log("Setting up training stages...", "white", log)
loss = dnnlib.util.construct_class_by_name(device = device, **ddp_nets, **loss_args) # subclass of training.loss.Loss
stages = []
for name, net, config in [("G", G, cG), ("D", D, cD)]:
if config.reg_interval is None:
opt = dnnlib.util.construct_class_by_name(params = net.parameters(), **config.opt_args) # subclass of torch.optimOptimizer
stages.append(dnnlib.EasyDict(name = f"{name}_both", net = net, opt = opt, interval = 1))
# Lazy regularization
else:
mb_ratio = config.reg_interval / (config.reg_interval + 1)
opt_args = dnnlib.EasyDict(config.opt_args)
opt_args.lr = opt_args.lr * mb_ratio
opt_args.betas = [beta ** mb_ratio for beta in opt_args.betas]
opt = dnnlib.util.construct_class_by_name(net.parameters(), **opt_args) # subclass of torch.optimOptimizer
stages.append(dnnlib.EasyDict(name = f"{name}_main", net = net, opt = opt, interval = 1))
stages.append(dnnlib.EasyDict(name = f"{name}_reg", net = net, opt = opt, interval = config.reg_interval))
for stage in stages:
stage.start_event = None
stage.end_event = None
if log:
stage.start_event = torch.cuda.Event(enable_timing = True)
stage.end_event = torch.cuda.Event(enable_timing = True)
return loss, stages
# Compute gradients and update the network weights for the current training stage
def run_training_stage(loss, stage, device, real_img, real_c, gen_z, gen_c, batch_size, batch_gpu, num_gpus):
# Initialize gradient accumulation
if stage.start_event is not None:
stage.start_event.record(torch.cuda.current_stream(device))
stage.opt.zero_grad(set_to_none = True)
stage.net.requires_grad_(True)
# Accumulate gradients over multiple rounds
for round_idx, (x, cx, z, cz) in enumerate(zip(real_img, real_c, gen_z, gen_c)):
sync = (round_idx == batch_size // (batch_gpu * num_gpus) - 1)
loss.accumulate_gradients(stage = stage.name, real_img = x, real_c = cx,
gen_z = z, gen_c = cz, sync = sync, gain = stage.interval)
# Update weights
stage.net.requires_grad_(False)
with torch.autograd.profiler.record_function(stage.name + "_opt"):
for param in stage.net.parameters():
if param.grad is not None:
torch_misc.nan_to_num(param.grad, nan = 0, posinf = 1e5, neginf=-1e5, out = param.grad)
stage.opt.step()
if stage.end_event is not None:
stage.end_event.record(torch.cuda.current_stream(device))
# Update Gs -- the exponential moving average weights copy of G
def update_ema_network(G, Gs, batch_size, cur_nimg, ema_kimg, ema_rampup):
with torch.autograd.profiler.record_function("Gs"):
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(Gs.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(Gs.buffers(), G.buffers()):
b_ema.copy_(b)
# Evaluate a model over a list of metrics and report the results
def evaluate(Gs, snapshot_pkl, metrics, eval_images_num, dataset_args, num_gpus, rank, device, log,
logger = None, run_dir = None, print_progress = True):
for metric in metrics:
result_dict = metric_main.compute_metric(metric = metric, max_items = eval_images_num,
G = Gs, dataset_args = dataset_args, num_gpus = num_gpus, rank = rank, device = device,
progress = metric_utils.ProgressMonitor(verbose = log))
if log:
metric_main.report_metric(result_dict, run_dir = run_dir, snapshot_pkl = snapshot_pkl)
if logger is not None:
logger.metrics.update(result_dict.results)
# Snapshots and logging
# ----------------------------------------------------------------------------
# Initialize image grid, of both real and generated sampled
def init_img_grid(dataset, input_shape, device, run_dir, log):
if not log:
return None, None, None
grid_size, images, labels = misc.setup_snapshot_img_grid(dataset)
misc.save_img_grid(images, os.path.join(run_dir, "reals.png"), drange = [0, 255], grid_size = grid_size)
grid_z = torch.randn([labels.shape[0], *input_shape[1:]], device = device)
grid_c = torch.from_numpy(labels).to(device)
return grid_size, grid_z, grid_c
# Save a snapshot of the sampled grid for the given latents/labels
def snapshot_img_grid(Gs, drange_net, grid_z, grid_c, grid_size, batch_gpu, truncation_psi, suffix = "init"):
images = torch.cat([Gs(z, c, truncation_psi, noise_mode = "const").cpu() for z, c in zip(grid_z.split(batch_gpu), grid_c.split(batch_gpu))]).numpy()
misc.save_img_grid(images, os.path.join(run_dir, f"fakes_{suffix}.png"), drange = drange_net, grid_size = grid_size)
# Initialize logs (tracking metrics, json log file, tfevent files, etc.)
def init_logger(run_dir, log):
logger = dnnlib.EasyDict({
"collector": training_stats.Collector(regex = ".*"),
"metrics": {},
"json": None,
"tfevents": None
})
if log:
logger.json = open(os.path.join(run_dir, "stats.jsonl"), "wt")
try:
import torch.utils.tensorboard as tensorboard
logger.tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print("Skipping tfevents export:", err)
return logger
# Collect statistics from each training stage across the processes/GPUs
def collect_stats(logger, stages):
for stage in stages:
value = []
if (stage.start_event is not None) and (stage.end_event is not None):
stage.end_event.synchronize()
value = stage.start_event.elapsed_time(stage.end_event)
training_stats.report0("Timing/" + stage.name, value)
logger.collector.update()
stats = logger.collector.as_dict()
return stats
# Update the logs (json and tfevents files) with the new info in stats
def update_logger(logger, stats, cur_nimg, start_time):
timestamp = time.time()
if logger.json is not None:
fields = dict(stats, timestamp = timestamp)
logger.json.write(json.dumps(fields) + "\n")
logger.json.flush()
if logger.tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats.items():
logger.tfevents.add_scalar(name, value.mean, global_step = global_step, walltime = walltime)
for name, value in logger.metrics.items():
logger.tfevents.add_scalar(f"Metrics/{name}", value, global_step = global_step, walltime = walltime)
logger.tfevents.flush()
# Training Loop
# ----------------------------------------------------------------------------
# 1. Sets up the environment and data
# 2. Builds the generator (g) and discriminator (d) networks
# 3. Manages the training process
# 4. Runs periodic evaluations on specified metrics
# 5. Produces sample images over the course of training
def training_loop(
# General configuration
train = False, # Training mode
eval = False, # Evaluation mode
vis = False, # Visualization mode
run_dir = ".", # Output directory
num_gpus = 1, # Number of GPUs participating in the training
rank = 0, # Rank of the current process in [0, num_gpus]
cG = {}, # Options for generator network
cD = {}, # Options for discriminator network
# Data
dataset_args = {}, # Options for training set
drange_net = [-1,1], # Dynamic range used when feeding image data to the networks
# Optimization
loss_args = {}, # Options for loss function
total_kimg = 25000, # Total length of the training, measured in thousands of real images
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus
batch_gpu = 4, # Number of samples processed at a time by one GPU
ema_kimg = 10.0, # Half-life of the exponential moving average (EMA) of generator weights
ema_rampup = None, # EMA ramp-up coefficient
cudnn_benchmark = True, # Enable torch.backends.cudnnbenchmark?
allow_tf32 = False, # Enable torch.backends.cuda.matmul.allow_tf32 and torch.backends.cudnnallow_tf32?
# Logging
resume_pkl = None, # Network pickle to resume training from
resume_kimg = 0.0, # Assumed training progress at the beginning
# Affects reporting and training schedule
kimg_per_tick = 8, # Progress snapshot interval
img_snapshot_ticks = 3, # How often to save image snapshots? None = disable
network_snapshot_ticks = 3, # How often to save network snapshots? None = disable
last_snapshots = 10, # Maximal number of prior snapshots to save
printname = "", # Experiment name for logging
# Evaluation
vis_args = {}, # Options for vis.vis
metrics = [], # Metrics to evaluate during training
eval_images_num = 50000, # Sample size for the metrics
truncation_psi = 0.7 # Style strength multiplier for the truncation trick (used for visualizations only)
):
# Initialize
start_time = time.time()
device = init_cuda(rank, cudnn_benchmark, allow_tf32)
log = (rank == 0)
dataset, dataset_iter = load_dataset(dataset_args, batch_size, rank, num_gpus, log) # Load training set
nets = construct_nets(cG, cD, dataset, device, log) if train else None # Construct networks
G, D, Gs = load_nets(resume_pkl, nets, device, log) # Resume from existing pickle
print_nets(G, D, batch_gpu, device, log) # Print network summary tables
if eval:
misc.log("Run evaluation...", log = log)
evaluate(Gs, resume_pkl, metrics, eval_images_num, dataset_args, num_gpus, rank, device, log)
if vis and log:
misc.log("Produce visualizations...")
visualize.vis(Gs, dataset, device, batch_gpu, drange_net = drange_net, ratio = dataset.ratio,
truncation_psi = truncation_psi, **vis_args)
if not train:
exit()
nets = distribute_nets(G, D, Gs, device, num_gpus, log) # Distribute networks across GPUs
loss, stages = setup_training_stages(loss_args, G, cG, D, cD, nets, device, log) # Setup training stages (losses and optimizers)
grid_size, grid_z, grid_c = init_img_grid(dataset, G.input_shape, device, run_dir, log) # Initialize an image grid
logger = init_logger(run_dir, log) # Initialize logs
# Train
misc.log(f"Training for {total_kimg} kimg...", "white", log)
cur_nimg, cur_tick, batch_idx = int(resume_kimg * 1000), 0, 0
tick_start_nimg, tick_start_time = cur_nimg, time.time()
stats = None
while True:
# Fetch training data
real_img, real_c, gen_zs, gen_cs = fetch_data(dataset, dataset_iter, G.input_shape, drange_net,
device, len(stages), batch_size, batch_gpu)
# Execute training stages
for stage, gen_z, gen_c in zip(stages, gen_zs, gen_cs):
if batch_idx % stage.interval != 0:
continue
run_training_stage(loss, stage, device, real_img, real_c, gen_z, gen_c, batch_size, batch_gpu, num_gpus)
# Update Gs
update_ema_network(G, Gs, batch_size, cur_nimg, ema_kimg, ema_rampup)
# Update state
cur_nimg += batch_size
batch_idx += 1
# Perform maintenance tasks once per tick
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line and accumulate the info in logger.collector
tick_end_time = time.time()
if stats is not None:
default = dnnlib.EasyDict({'mean': -1})
fields = []
fields.append("tick " + misc.bold(f"{training_stats.report0('Progress/tick', cur_tick):<5d}"))
fields.append("kimg " + misc.bcolored(f"{training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}", "red"))
fields.append("")
fields.append("loss/reg: G (" + misc.bcolored(f"{stats.get('Loss/G/loss', default).mean:>6.3f}", "blue"))
fields.append(misc.bold(f"{stats.get('Loss/G/reg', default).mean:>6.3f}") + ")")
fields.append("D "+ misc.bcolored(f"({stats.get('Loss/D/loss', default).mean:>6.3f}", "blue"))
fields.append(misc.bold(f"{stats.get('Loss/D/reg', default).mean:>6.3f}") + ")")
fields.append("")
fields.append("time " + misc.bold(f"{dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"))
fields.append(f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}")
fields.append(f"mem: GPU {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}")
fields.append(f"CPU {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}")
fields.append(misc.bold(printname))
torch.cuda.reset_peak_memory_stats()
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
misc.log(" ".join(fields), log = log)
# Save image snapshot
if log and (img_snapshot_ticks is not None) and (done or cur_tick % img_snapshot_ticks == 0):
visualize.vis(Gs, dataset, device, batch_gpu, training = True,
step = cur_nimg // 1000, grid_size = grid_size, latents = grid_z,
labels = grid_c, drange_net = drange_net, ratio = dataset.ratio, **vis_args)
# Save network snapshot
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data, snapshot_pkl = save_nets(G, D, Gs, cur_nimg, dataset_args, run_dir, num_gpus > 1, last_snapshots, log)
# Evaluate metrics
evaluate(snapshot_data["Gs"], snapshot_pkl, metrics, eval_images_num,
dataset_args, num_gpus, rank, device, log, logger, run_dir)
del snapshot_data
# Collect stats and update logs
stats = collect_stats(logger, stages)
update_logger(logger, stats, cur_nimg, start_time)
cur_tick += 1
tick_start_nimg, tick_start_time = cur_nimg, time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done
misc.log("Done!", "blue")
|
__author__ = 'ftp-user'
|
# local
import ivy
# global
from typing import Callable, Type, List, Iterable, Optional
from types import ModuleType
def _wrap_function(function_name: str) -> Callable:
"""Wraps the function called `function_name`.
Parameters
----------
function_name
the name of the function e.g. "abs", "mean" etc.
Returns
-------
new_function
the wrapped function.
Examples
--------
>>> ivy.set_backend("torch")
>>> from ivy.array.wrapping import _wrap_function
>>> absolute = _wrap_function("abs")
>>> x = ivy.array([-1])
>>> print(absolute(x))
ivy.array([1])
"""
def new_function(self, *args, **kwargs):
"""Add the data of the current array from which the instance function is invoked
as the first arg parameter or kwarg parameter. Return the new function with
the name function_name and the new args variable or kwargs as the new inputs.
"""
function = ivy.__dict__[function_name]
# gives us the position and name of the array argument
data_idx = function.array_spec[0]
if len(args) > data_idx[0][0]:
args = ivy.copy_nest(args, to_mutable=True)
data_idx = [data_idx[0][0]] + [
0 if idx is int else idx for idx in data_idx[1:]
]
ivy.insert_into_nest_at_index(args, data_idx, self._data)
else:
kwargs = ivy.copy_nest(kwargs, to_mutable=True)
data_idx = [data_idx[0][1]] + [
0 if idx is int else idx for idx in data_idx[1:]
]
ivy.insert_into_nest_at_index(kwargs, data_idx, self._data)
return function(*args, **kwargs)
return new_function
def add_ivy_array_instance_methods(
cls: Type[ivy.Array], modules: List[ModuleType], to_ignore: Optional[Iterable] = ()
):
"""Loop over all ivy modules such as activations, general, etc. and add
the module functions to ivy arrays as instance methods using _wrap_function.
Parameters
----------
cls
the class we want to add the instance methods to.
modules
the modules to loop over: activations, general etc.
to_ignore
any items we don't want to add an instance method for.
Examples
--------
As shown, `add_ivy_array_instance_methods` adds all the appropriate functions from
the activations module as instance methods to our toy `ArrayExample` class:
>>> from ivy.functional.ivy import activations
>>> class ArrayExample: \
pass
>>> ivy.add_ivy_array_instance_methods(ArrayExample, [activations])
>>> print(hasattr(ArrayExample, "relu"), hasattr(ArrayExample, "softmax"))
True True
"""
for module in modules:
for key, value in module.__dict__.items():
# we skip the cases where the function is protected, the instance
# method has already been added manually and a few other cases
if (
key.startswith("_")
or key[0].isupper()
or not callable(value)
or key in cls.__dict__
or hasattr(cls, key)
or key in to_ignore
or key not in ivy.__dict__
):
continue
try:
setattr(cls, key, _wrap_function(key))
except AttributeError:
pass
|
"""Interactions with Labber's HDF5 data files."""
from .labber_io import LabberData
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
from libs.math import statistics
from waterfall.test.wf_testcase import WaterfallTestCase
class StatisticsTest(WaterfallTestCase):
def testGetZFromAlpha(self):
self.assertAlmostEqual(1.2815515655446004, statistics._GetZFromAlpha(0.1))
self.assertAlmostEqual(1.2815515655446004, statistics._GetZFromAlpha(.12))
def testWilsonScoreConfidenceInterval(self):
interval = statistics.WilsonScoreConfidenceInterval(1.0, 100, 0.001)
self.assertAlmostEqual(0.9128290627200445, interval.lower)
self.assertAlmostEqual(1.0, interval.upper)
|
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, sessionmaker
from teams_and_flags import team_data
import os
basedir = os.path.abspath(os.path.dirname(__file__))
Base = declarative_base()
engine = create_engine('sqlite:///db/mian.db')
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String)
password = Column(String)
flag = Column(String)
def __init__(self, id, username, password, flag):
self.id = id
self.username = username
self.password = password
self.flag = flag
class Task(Base):
__tablename__ = "tasks"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
answer = Column(String)
stage = Column(Integer)
def __init__(self, user_id):
self.user_id = user_id
self.stage = 1
def init_db():
Session = sessionmaker(bind=engine)
s = Session()
Base.metadata.create_all(engine)
for id, (username, password, flag) in enumerate(team_data):
user = User(id, username, password, flag)
s.add(user)
task = Task(id)
s.add(task)
s.commit()
s.close()
def check_user(username, password):
Session = sessionmaker(bind=engine)
s = Session()
return s.query(User).filter_by(username=username, password=password).first()
def get_user(id):
Session = sessionmaker(bind=engine)
s = Session()
return s.query(User).filter_by(id=id).first()
def get_task(user_id):
Session = sessionmaker(bind=engine)
s = Session()
return s.query(Task).filter_by(user_id=user_id).first()
def update_answer(user_id, answer):
Session = sessionmaker(bind=engine)
s = Session()
task = s.query(Task).filter_by(user_id=user_id).first()
task.answer = answer
s.commit()
s.close()
def update_stage(user_id, stage):
Session = sessionmaker(bind=engine)
s = Session()
task = s.query(Task).filter_by(user_id=user_id).first()
task.stage = stage
s.commit()
s.close() |
from serial import Serial
import os
# I like never use right pinky (10) for alpha characters
# and I never use right thumb (6)
alphabet_to_finger = {
'a': 5,
'b': 2,
'c': 3,
'd': 3,
'e': 3,
'f': 2,
'g': 2,
'h': 7,
'i': 8,
'j': 7,
'k': 8,
'l': 9,
'm': 7,
'n': 7,
'o': 9,
'p': 9,
'q': 5,
'r': 2,
's': 4,
't': 2,
'u': 7,
'v': 2,
'w': 4,
'x': 4,
'y': 7,
'z': 5
}
words = [
'hello',
'world'
# actually wordlist is filled from /usr/share/dict/words
]
keystroke_command = 'osascript -e \'tell application "System Events" to keystroke "%s"\''
def send_keystroke(c):
os.system(keystroke_command % c)
def send_keys(word):
for c in word:
send_keystroke(c)
send_keystroke(' ')
print(word)
def pick_word(options):
# TODO: use a word frequency dictionary
# this is the most hacky part of the code.
if 'device' in options:
return 'device'
if 'design' in options:
return 'design'
if 'world' in options:
return 'world'
return options[0]
def build_mappings(words):
fingers_to_words = {}
for word in words:
word = word.lower()
if not word.isalpha():
# skip weird word
continue
finger_key = ''
for c in word:
finger_key += str(alphabet_to_finger[c])
if finger_key in fingers_to_words:
fingers_to_words[finger_key].append(word)
else:
fingers_to_words[finger_key] = [word]
return fingers_to_words
def build_wordlist():
words = []
with open('/usr/share/dict/words') as wordf:
for line in wordf:
words.append(line.strip())
return words
def main():
usbser = Serial('/dev/cu.usbmodem1422')
fingers_to_words = build_mappings(build_wordlist())
print('Ready to parse your input.')
keysBuffer = []
while True:
line = usbser.readline().strip()
if line[-1] == 'u':
print(line[0])
# depressed
if line[0] == '1' or line[0] == '6':
# word boundary (space)
options = fingers_to_words[''.join(keysBuffer)]
if len(options) > 1:
print('more than one option, picking...')
send_keys(pick_word(options))
else:
send_keys(pick_word(options))
keysBuffer = []
else:
keysBuffer.append(line[0])
if __name__ == '__main__':
main()
|
#!python
from set import Set
import unittest
class SetTest(unittest.TestCase):
def test_init_empty(self):
s = Set()
assert s.size() == 0
def test_init_with_elements(self):
s = Set(['one', 'two', 'three'])
assert s.size() == 3
assert s.contains('one') is True
assert s.contains('two') is True
assert s.contains('three') is True
def test_size(self):
s = Set()
assert s.size() == 0
s.add('one')
assert s.size() == 1
s.add('two')
assert s.size() == 2
s.remove('two')
assert s.size() == 1
def test_contains(self):
s = Set(['waffle', 'potato', 'three'])
assert s.contains('three') is True
assert s.contains('potato') is True
assert s.contains('waffle') is True
s.add('chicken')
assert s.contains('chicken') is True
s.add('chicken')
assert s.contains('chicken') is True
s.remove('waffle')
assert s.contains('waffle') is False
assert s.contains('four') is False
def test_add(self):
s = Set()
assert s.size() == 0
s.add('hello')
assert s.contains('hello') is True
assert s.size() == 1
def test_remove(self):
s = Set(['one', 'two', 'three'])
assert s.size() == 3
s.remove('one')
assert s.contains('one') is False
assert s.size() == 2
s.remove('two')
assert s.contains('two') is False
assert s.size() == 1
s.remove('three')
assert s.contains('three') is False
assert s.size() == 0
with self.assertRaises(KeyError):
s.remove('four')
def test_union(self):
s1 = Set(['one', 'two', 'three'])
s2 = Set(['three', 'four', 'five'])
s1_union = s1.union(s2)
assert s1_union.size() == 5
assert s1_union.contains('one') is True
assert s1_union.contains('two') is True
assert s1_union.contains('three') is True
assert s1_union.contains('four') is True
assert s1_union.contains('five') is True
s2_union = s2.union(s1)
assert s2_union.size() == 5
assert s2_union.contains('one') is True
assert s2_union.contains('two') is True
assert s2_union.contains('three') is True
assert s2_union.contains('four') is True
assert s2_union.contains('five') is True
def test_intersection(self):
s1 = Set(['one', 'two', 'three', 'four'])
s2 = Set(['three', 'four', 'five'])
s1_intersection = s1.intersection(s2)
assert s1_intersection.size() == 2
assert s1_intersection.contains('one') is False
assert s1_intersection.contains('three') is True
assert s1_intersection.contains('four') is True
s2_intersection = s2.intersection(s1)
assert s2_intersection.size() == 2
assert s2_intersection.contains('one') is False
assert s2_intersection.contains('three') is True
assert s2_intersection.contains('four') is True
def test_difference(self):
s1 = Set(['one', 'two', 'three'])
s2 = Set(['three', 'four', 'five'])
s1_difference = s1.difference(s2)
assert s1_difference.size() == 2
assert s1_difference.contains('one') is True
assert s1_difference.contains('two') is True
assert s1_difference.contains('three') is False
assert s1_difference.contains('four') is False
s2_difference = s2.difference(s1)
assert s2_difference.size() == 2
assert s2_difference.contains('one') is False
assert s2_difference.contains('two') is False
assert s2_difference.contains('three') is False
assert s2_difference.contains('four') is True
assert s2_difference.contains('five') is True
def test_is_subset(self):
s1 = Set(['one', 'two', 'three', 'four'])
s2 = Set(['two', 'four'])
assert s1.is_subset(s2) is True
assert s2.is_subset(s1) is False
if __name__ == '__main__':
unittest.main()
|
"""
data.py
PURPOSE: This file defines the data for training from genius
as well as code for collecting the initial data
"""
|
import ssl
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3 import PoolManager
class MyAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
ssl_version=ssl.PROTOCOL_SSLv3)
def session():
s = requests.Session()
s.mount('https://', MyAdapter()) # 所有的 https 连接都用 ssl.PROTOCOL_SSLV3 去连接
return s
|
#!/usr/bin/python3
from app import db, Students, Classes, Enrolments
db.create_all()
|
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import cProfile as profiler
import gc
import sys
import time
try:
import pstats
except Exception:
# Don't want to force pstats into the venv if it's not always used
pass
from neutron.api.v2 import attributes
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def filter_body(context, body, admin_only=None, always_filter=None):
if not context.is_admin and admin_only:
for attr in admin_only:
pop_param(body, attr)
if always_filter:
for attr in always_filter:
pop_param(body, attr)
def attr_specified(param):
return param is not attributes.ATTR_NOT_SPECIFIED
def timed(fn):
def _wrapped(*args, **kwargs):
began = time.time()
res = fn(*args, **kwargs)
elapsed = time.time() - began
LOG.info("Time for %s = %s" % (fn, elapsed))
return res
return _wrapped
def profile(output):
def _inner(fn):
def _wrapped(*args, **kw):
result = _profile(output, fn, *args, **kw)
# uncomment this to see who's calling what
# stats.print_callers()
return result
return _wrapped
return _inner
def live_profile(fn):
def _wrapped(*args, **kw):
elapsed, stat_loader, result = _live_profile(fn, *args, **kw)
stats = stat_loader()
stats.sort_stats('cumulative')
stats.print_stats()
# uncomment this to see who's calling what
# stats.print_callers()
return result
return _wrapped
def _profile(filename, fn, *args, **kw):
gc.collect()
profiler.runctx('result = fn(*args, **kw)', globals(), locals(),
filename=filename)
return locals()['result']
def _live_profile(fn, *args, **kw):
load_stats = lambda: pstats.Stats()
gc.collect()
began = time.time()
profiler.runctx('result = fn(*args, **kw)', globals(), locals())
ended = time.time()
return ended - began, load_stats, locals()['result']
def pop_param(attrs, param, default=None):
val = attrs.pop(param, default)
if attr_specified(val):
return val
return default
class Command(object):
def __init__(self, func):
self.func = func
self.result = None
self.called = False
def __call__(self, *args, **kwargs):
self.called = True
self.result = self.func(*args, **kwargs)
return self.result
class CommandManager(object):
def __init__(self):
self.do_commands = []
self.undo_commands = []
@contextlib.contextmanager
def execute(self, exc=None):
try:
yield self
except Exception:
exc_info = sys.exc_info()
LOG.exception("Exception in transaction", exc_info=exc_info)
self.rollback()
raise exc_info[1]
def do(self, func):
cmd = Command(func)
self.do_commands.append(cmd)
return cmd
def undo(self, func):
cmd = Command(func)
self.undo_commands.append(cmd)
return cmd
def rollback(self):
do_commands = reversed(self.do_commands)
for cmd in reversed(self.undo_commands):
do = do_commands.next()
if not do.called:
continue
try:
cmd(do.result)
except Exception:
LOG.exception("Rollback failed and wasn't caught!")
|
import unittest
import __init__ as lib
import jinja2
from ansible.parsing.dataloader import DataLoader
class TaskBashTestCase(unittest.TestCase):
file = 'cmf/all/scripts/tasks/bash.yml'
tests = [
# Covers:
# - run: "/path/to/script.sh"
{
'result': {
'name': 'Running a Bash command',
'shell': '/path/to/script.sh',
},
'args': {
'run': '/path/to/script.sh',
},
},
# Covers:
# - name: "Doing something"
# run: "echo 12"
{
'result': {
'name': 'Doing something',
'shell': 'echo 12',
},
'args': {
'name': 'Doing something',
'run': 'echo 12',
},
},
]
def test(self):
loader = DataLoader()
tasks = loader.load_from_file(lib.cikit.dirs['self'] + '/' + self.file)
self.assertEqual(len(tasks), 1)
self.assertTrue('shell' in tasks[0])
self.assertTrue('name' in tasks[0])
self.assertTrue('when' in tasks[0])
self.assertTrue('args' in tasks[0])
for test in self.tests:
for item in ['name', 'shell']:
self.assertEqual(
jinja2.Template(tasks[0][item]).render({'item': test['args']}),
test['result'][item]
)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms, models
from torch import nn, optim, tensor
import torch.nn.functional as F
from collections import OrderedDict
from PIL import Image
from torch.autograd import Variable
import helperr
import json
import os
import copy
import time
import argparse
ap = argparse.ArgumentParser(description='Train.py')
ap.add_argument('data_dir', action="store", default="./flowers/")
ap.add_argument('--gpu', dest="gpu", action="store", default="gpu")
ap.add_argument('--save_dir', dest="save_dir", action="store", default="./checkpoint.pth")
ap.add_argument('--learning_rate', dest="learning_rate", action="store", default=0.001)
ap.add_argument('--dropout', dest = "dropout", action = "store", default = 0.5)
ap.add_argument('--epochs', dest="epochs", action="store", type=int, default=3)
ap.add_argument('--model', dest="model", action="store", default="densenet121", type = str)
ap.add_argument('--hidden_layer1', type=int, dest="hidden_layer1", action="store", default=4096)
pa = ap.parse_args()
root = pa.data_dir
path = pa.save_dir
lr = pa.learning_rate
model = pa.model
dropout = pa.dropout
hidden_layer1 = pa.hidden_layer1
device = pa.gpu
epochs = pa.epochs
def main():
trainloader, validloader, testloader = helperr.load_data(root)
model, optimizer, criterion = helperr.build_model(hidden_layer1, class_to_idx)
helperr.train(model, epochs, lr, criterion, optimizer, trainloader, validloader)
helperr.save_checkpoint(model,path,structure,hidden_layers,dropout,lr)
print("Done Training!")
if __name__== "__main__":
main()
|
#Leia 10 números inteiros e armazene em um vetor. Em seguida
#escreva os elementos que são primos e suas respectivas
#posições no vetor.
import random
vetorA=[]
primo={}
for c in range(0,10):
n1=random.randint(1,50)
vetorA.append(n1)
p=0
for u in range(1,n1+1):
if(n1%u==0):
p+=1
if(p<=2):
primo[n1]=c
print(vetorA)
print(primo)
|
"""Functions related to taxa formatting and hierarchy."""
def set_taxon(taxon, subtax, db):
"""Return a database specific key-val pair for taxon."""
import requests
# Parse incomming taxon name string for errors and reassemble
taxon_list = taxon.split(',')
taxon_list = [x.strip() for x in taxon_list]
taxon_list = [x.capitalize() for x in taxon_list]
clean_list = list()
for item in taxon_list:
if len(item.split()) > 3 or len(item.split()) == 0:
msg = 'Unsupported taxon name length: {0:s}'.format(item)
raise ValueError(400, msg)
if '^' in item:
parts = [x.strip() for x in item.split('^')]
if len(parts) != 2:
msg = 'Incorrect usage of "not" caret: {0:s}'.format(item)
raise ValueError(400, msg)
# NEW RESOURCE: Add to list below if DB supports not (^) notation
if db in ['pbdb']:
parts = [x.capitalize() for x in parts]
clean_list.append('^'.join(parts))
else:
# DB does not support caret so remove the "not" portion
clean_list.append(parts[0])
else:
clean_list.append(item)
taxon = ','.join(clean_list)
# Format for specific database API parameter payloads
if db == 'neotoma':
if subtax:
return {'taxonname': taxon,
'lower': 'true'}
else:
return {'taxonname': taxon}
elif db == 'pbdb':
if subtax:
return {'base_name': taxon}
else:
return {'taxon_name': taxon}
elif db == 'sead':
# Currently SEAD does not support general taxa serching.
# An external service must be used to resolve the taxon rank of
# the first name in a list of taxa prior to parameterizing the query
single_taxon = taxon.split(',')[0]
if len(single_taxon.split()) == 2:
# Consider this to be a 'Genus Species' name
query = 'ilike.*{0:s}'.format(single_taxon)
return {'taxon': query}
else:
url = 'https://paleobiodb.org/data1.2/taxa/single.json'
payload = {'taxon_name': single_taxon}
rank = requests.get(url, payload).json()['records'][0]['rnk']
if rank == 9:
# Rank of Family
query = 'ilike.{0:s}'.format(single_taxon)
return {'family_name': query}
elif rank == 5:
# Rank of Genus
query = 'ilike.{0:s}'.format(single_taxon)
return {'genus_name': query}
# NEW RESOURCE: Add another databse specific taxon name mapping here
else:
return {}
def get_subtaxa(taxon, inc_syn=True):
"""
Query PBDB for all lower order relatives of a specified taxa.
:arg taxon: Taxonmic name to query
:type taxon: str
:arg inc_syn: Include recognized synonyms in the return
:type inc_syn: bool
"""
import requests
from ..elc import config
subtaxa = set()
url = ''.join([config.get('resource_api', 'pbdb'), 'taxa/list.json'])
payload = {'rel': 'all_children', 'name': taxon}
try:
r = requests.get(url=url,
params=payload,
timeout=config.get('default', 'timeout'))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
msg = r.json().get('warnings')
raise ValueError(r.status_code, msg)
data = r.json()
for rec in data['records']:
if rec.get('tdf') and not inc_syn:
subtaxa.add(rec.get('acn'))
else:
subtaxa.add(rec.get('nam'))
return list(subtaxa)
def get_parents(taxon):
"""
Query PBDB for parent taxonomic groups.
:arg taxon: Taxonomic name to query
:type taxon: str
"""
import requests
from collections import OrderedDict
from ..elc import config
parents = dict()
base_url = config.get('resource_api', 'pbdb') + 'taxa/list.json'
tax_sys = ['kingdom', 'phylum', 'class', 'order',
'family', 'genus', 'species']
payload = dict()
payload.update(vocab='pbdb', rel='all_parents',
order='hierarchy', name=taxon)
resp = requests.get(base_url, params=payload, timeout=None)
if resp.status_code == 200:
resp_json = resp.json()
if 'warnings' in resp_json:
raise ValueError(400, 'Bad Request',
str(resp_json['warnings'][0]))
else:
for rec in resp_json['records']:
for rank in tax_sys:
if rec.get('taxon_rank') == rank:
parents.update({rank: rec.get('taxon_name')})
return OrderedDict(parents)
else:
raise ValueError(resp.status_code, resp.reason,
'Server error or bad URL')
|
"""
Django settings for website project.
Generated by 'django-admin startproject' using Django 2.2.20.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# reading website/settings/.env file
environ.Env.read_env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
SECRET_KEY = env("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env("DEBUG")
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
EXTERNAL_APPS = [
"admin_honeypot",
"axes",
]
LOCAL_APPS = [
"website.common.apps.CommonConfig",
]
INSTALLED_APPS += EXTERNAL_APPS + LOCAL_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"csp.middleware.CSPMiddleware",
"axes.middleware.AxesMiddleware",
]
ROOT_URLCONF = "website.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"website.common.context_processors.global_settings",
],
},
},
]
WSGI_APPLICATION = "website.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": env.db(),
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = "/static/"
SITE_TITLE = "django-base"
DJANGO_ADMIN_PATH = env("DJANGO_ADMIN_PATH")
AUTHENTICATION_BACKENDS = [
"axes.backends.AxesBackend",
"django.contrib.auth.backends.ModelBackend",
]
# default-src 'none'; img-src 'self'; script-src 'self'; style-src 'self'
CSP_SCRIPT_SRC = ("'self'", "code.jquery.com", "cdn.jsdelivr.net")
CSP_IMG_SRC = ("'self'", "data:")
CSP_STYLE_SRC = ("'self'", "cdn.jsdelivr.net")
|
#!/usr/bin/env python
from datetime import datetime, timedelta
from app import db
from flask import current_app
from app.models import User, Challenge, Task, challengers
from config import Config
def tearDown(db):
db.session.remove()
db.drop_all()
db.session.commit()
|
"""Create multiple metrics
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'src', 'data_utils')))
import numpy as np
from sklearn.metrics import confusion_matrix
from scipy import signal
import os
import pickle
import argparse
import matplotlib.pyplot as plt
import itertools
from lea_metrics import accuracy, edit_score, overlap_f1, mid_mAP
def conf_mat(y_pred, y_true, normalize=False):
"""Find confusion matrix for frame-wise accuracy
Args:
y_pred: list of prediction vectors
y_true: list of groundtruth vectors
Returns:
frame-wise confusion matrix
"""
y_pred_all = np.concatenate(y_pred)
y_true_all = np.concatenate(y_true)
cm = confusion_matrix(y_true_all, y_pred_all)
if normalize:
cmsum = cm.sum(axis=1)[:, np.newaxis] + 1e-10
cm = cm.astype('float') / cmsum
return cm
def auto_make(y_score_in, y_true_in, downsampling=1, median_size=5):
"""Automatically make result dictionary
Args:
y_score_in: input score data, list of list of list following the order
of: videos, samples, classes
y_score_in: input groundtruth, list of list following the order of:
videos, samples
downsampling: downsampling rate, default=1 means no downsampling
median_size: size of median filter. This is just for reference
Returns:
results_dict: dictionary of results
"""
print('Computing metrics...')
n_vids = len(y_true_in)
n_classes = len(y_score_in[0][0])
bg_class = 0
# downsample if needed
y_score = []
y_true = []
y_pred = []
y_pred_median = []
for i in range(n_vids):
a_score = np.copy(y_score_in[i])[::downsampling, :]
y_score.append(np.array(a_score))
y_pred.append(np.argmax(a_score, axis=1))
y_true.append(np.copy(y_true_in[i])[::downsampling])
# post-processing
y_pred_median = []
for i in range(n_vids):
filtered = signal.medfilt(np.copy(y_pred[i]), median_size)
y_pred_median.append(filtered.astype(np.int))
# compute all metrics------------------------------------------------------
results_dict = {}
results_dict['y_score_in'] = y_score_in
results_dict['y_true_in'] = y_true_in
results_dict['downsampling'] = downsampling
results_dict['y_pred'] = y_pred
# results_dict['y_pred_median'] = y_pred_median
results_dict['median_size'] = median_size
results_dict['conf_mat'] = conf_mat(y_pred, y_true)
acc = accuracy(y_pred, y_true)
edit = edit_score(y_pred, y_true, True, bg_class)
f1 = overlap_f1(y_pred, y_true, n_classes, bg_class)
precisions, mAP = mid_mAP(y_pred, y_true, y_score, bg_class)
results_dict['frame_accuracy'] = acc
results_dict['edit'] = edit
results_dict['f1'] = f1
results_dict['precisions'] = precisions
results_dict['mAP'] = mAP
# print results------------------------------------------------------------
print('>' * 80)
print('Frame-wise accuracy: {:.02f}'.format(acc))
print('Edit: {:.02f}'.format(edit))
print('Overlap F1: {:.02f}'.format(f1))
# print('Midpoint-hit criterion metrics')
# print(' precisions: ', results_dict['precisions'])
print('mAP: {:.02f}'.format(mAP))
print('<' * 80)
return results_dict
def _viz_pred(y_pred, y_true):
"""Visualize prediction results
Args:
y_pred: list of prediction vectors
y_true: list of groundtruth vectors
"""
y_pred = np.array(y_pred)
y_true = np.array(y_true)
if y_pred.ndim == 1:
y_pred = np.expand_dims(y_pred, 0)
if y_true.ndim == 1:
y_true = np.expand_dims(y_true, 0)
plt.figure()
# detection results
plt.subplot(211)
plt.imshow(y_pred, aspect='auto', interpolation='nearest')
plt.yticks([])
plt.ylabel('detection')
plt.tight_layout()
plt.colorbar()
# groundtruth
plt.subplot(212)
plt.imshow(y_true, aspect='auto', interpolation='nearest')
plt.yticks([])
plt.ylabel('groundtruth')
plt.tight_layout()
plt.colorbar()
pass
def _viz_confmat(cm, label_dict, normalize=True):
"""Visualize confusion matrix
Args:
cm: confusion matrix
label_dict: list of labels (with background). If None, will use numbers
"""
if normalize:
cmsum = cm.sum(axis=1)[:, np.newaxis] + 1e-10
cm = cm.astype('float') / cmsum
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar()
if label_dict is not None:
tick_marks = np.arange(len(label_dict))
plt.xticks(tick_marks, label_dict, rotation=45)
plt.yticks(tick_marks, label_dict)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
pass
def reload_and_visualize(results_dict, label_dict, video_index,
new_downsampling, new_median_size,
visualize):
"""Load and visualize saved results with new_downsampling and
new_median_size if needed
Args:
results_dict: dictionary of results
label_dict: list of all labels (with background)
video_index: index of video to visualize detection results
new_downsampling: new downsampling rate
new_median_size: new median size
visualize: visualize the results
"""
# downsampling again if necessary
results_dict = auto_make(results_dict['y_score_in'],
results_dict['y_true_in'],
new_downsampling, new_median_size)
if visualize:
# visualize one prediction results
_viz_pred(results_dict['y_pred'][video_index],
results_dict['y_true_in'][video_index])
# visualize confusion matrix
_viz_confmat(results_dict['conf_mat'], label_dict)
plt.show()
pass
if __name__ == '__main__':
"""Interface to call from command line
"""
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--results_dict', type=str,
help='path to results_dict')
parser.add_argument('-l', '--label_dict', type=str,
help='path to label_dict. If ignored, it will use '
'numbers instead')
parser.add_argument('-i', '--video_index', type=int, default=0,
help='index of video to visualize detection')
parser.add_argument('-d', '--new_downsampling', type=int, default=1,
help='new downsampling. If ignored, there will be no '
'downsampling')
parser.add_argument('-m', '--new_median_size', type=int, default=5,
help='new size for median filter (only for reference '
'purpose)')
parser.add_argument('-v', '--visualize', type=int, default=0,
help='1 if visualize, 0 otherwise')
args = parser.parse_args()
assert os.path.exists(args.results_dict)
# load data
results_dict = pickle.load(open(args.results_dict, 'rb'))
if args.label_dict is not None and os.path.exists(args.label_dict):
label_dict = open(args.label_dict).read().splitlines()
label_dict = ['background'] + label_dict
else:
label_dict = None
# reload and visualize
reload_and_visualize(results_dict, label_dict, args.video_index,
args.new_downsampling, args.new_median_size,
args.visualize)
|
"""
Web named colors generated from generate_colors.py
You can install the Color Highlight vscode extension to see colors in editor.
"""
from vmath import Vector3
# ---------- CamelCase ----------
# Pink colors
Pink = Vector3(1.0000, 0.7529, 0.7961) #ffc0cb
LightPink = Vector3(1.0000, 0.7137, 0.7569) #ffb6c1
HotPink = Vector3(1.0000, 0.4118, 0.7059) #ff69b4
DeepPink = Vector3(1.0000, 0.0784, 0.5765) #ff1493
PaleVioletRed = Vector3(0.8588, 0.4392, 0.5765) #db7093
MediumVioletRed = Vector3(0.7804, 0.0824, 0.5216) #c71585
# Red colors
LightSalmon = Vector3(1.0000, 0.6275, 0.4784) #ffa07a
Salmon = Vector3(0.9804, 0.5020, 0.4471) #fa8072
DarkSalmon = Vector3(0.9137, 0.5882, 0.4784) #e9967a
LightCoral = Vector3(0.9412, 0.5020, 0.5020) #f08080
IndianRed = Vector3(0.8039, 0.3608, 0.3608) #cd5c5c
Crimson = Vector3(0.8627, 0.0784, 0.2353) #dc143c
Firebrick = Vector3(0.6980, 0.1333, 0.1333) #b22222
DarkRed = Vector3(0.5451, 0.0000, 0.0000) #8b0000
Red = Vector3(1.0000, 0.0000, 0.0000) #ff0000
# Orange colors
OrangeRed = Vector3(1.0000, 0.2706, 0.0000) #ff4500
Tomato = Vector3(1.0000, 0.3882, 0.2784) #ff6347
Coral = Vector3(1.0000, 0.4980, 0.3137) #ff7f50
DarkOrange = Vector3(1.0000, 0.5490, 0.0000) #ff8c00
Orange = Vector3(1.0000, 0.6471, 0.0000) #ffa500
# Yellow colors
Yellow = Vector3(1.0000, 1.0000, 0.0000) #ffff00
LightYellow = Vector3(1.0000, 1.0000, 0.8784) #ffffe0
LemonChiffon = Vector3(1.0000, 0.9804, 0.8039) #fffacd
LightGoldenrodYellow = Vector3(0.9804, 0.9804, 0.8235) #fafad2
PapayaWhip = Vector3(1.0000, 0.9373, 0.8353) #ffefd5
Moccasin = Vector3(1.0000, 0.8941, 0.7098) #ffe4b5
PeachPuff = Vector3(1.0000, 0.8549, 0.7255) #ffdab9
PaleGoldenrod = Vector3(0.9333, 0.9098, 0.6667) #eee8aa
Khaki = Vector3(0.9412, 0.9020, 0.5490) #f0e68c
DarkKhaki = Vector3(0.7412, 0.7176, 0.4196) #bdb76b
Gold = Vector3(1.0000, 0.8431, 0.0000) #ffd700
# Brown colors
Cornsilk = Vector3(1.0000, 0.9725, 0.8627) #fff8dc
BlanchedAlmond = Vector3(1.0000, 0.9216, 0.8039) #ffebcd
Bisque = Vector3(1.0000, 0.8941, 0.7686) #ffe4c4
NavajoWhite = Vector3(1.0000, 0.8706, 0.6784) #ffdead
Wheat = Vector3(0.9608, 0.8706, 0.7020) #f5deb3
Burlywood = Vector3(0.8706, 0.7216, 0.5294) #deb887
Tan = Vector3(0.8235, 0.7059, 0.5490) #d2b48c
RosyBrown = Vector3(0.7373, 0.5608, 0.5608) #bc8f8f
SandyBrown = Vector3(0.9569, 0.6431, 0.3765) #f4a460
Goldenrod = Vector3(0.8549, 0.6471, 0.1255) #daa520
DarkGoldenrod = Vector3(0.7216, 0.5255, 0.0431) #b8860b
Peru = Vector3(0.8039, 0.5216, 0.2471) #cd853f
Chocolate = Vector3(0.8235, 0.4118, 0.1176) #d2691e
SaddleBrown = Vector3(0.5451, 0.2706, 0.0745) #8b4513
Sienna = Vector3(0.6275, 0.3216, 0.1765) #a0522d
Brown = Vector3(0.6471, 0.1647, 0.1647) #a52a2a
Maroon = Vector3(0.5020, 0.0000, 0.0000) #800000
# Green colors
DarkOliveGreen = Vector3(0.3333, 0.4196, 0.1843) #556b2f
Olive = Vector3(0.5020, 0.5020, 0.0000) #808000
OliveDrab = Vector3(0.4196, 0.5569, 0.1373) #6b8e23
YellowGreen = Vector3(0.6039, 0.8039, 0.1961) #9acd32
LimeGreen = Vector3(0.1961, 0.8039, 0.1961) #32cd32
Lime = Vector3(0.0000, 1.0000, 0.0000) #00ff00
LawnGreen = Vector3(0.4863, 0.9882, 0.0000) #7cfc00
Chartreuse = Vector3(0.4980, 1.0000, 0.0000) #7fff00
GreenYellow = Vector3(0.6784, 1.0000, 0.1843) #adff2f
SpringGreen = Vector3(0.0000, 1.0000, 0.4980) #00ff7f
MediumSpringGreen = Vector3(0.0000, 0.9804, 0.6039) #00fa9a
LightGreen = Vector3(0.5647, 0.9333, 0.5647) #90ee90
PaleGreen = Vector3(0.5961, 0.9843, 0.5961) #98fb98
DarkSeaGreen = Vector3(0.5608, 0.7373, 0.5608) #8fbc8f
MediumAquamarine = Vector3(0.4000, 0.8039, 0.6667) #66cdaa
MediumSeaGreen = Vector3(0.2353, 0.7020, 0.4431) #3cb371
SeaGreen = Vector3(0.1804, 0.5451, 0.3412) #2e8b57
ForestGreen = Vector3(0.1333, 0.5451, 0.1333) #228b22
Green = Vector3(0.0000, 0.5020, 0.0000) #008000
DarkGreen = Vector3(0.0000, 0.3922, 0.0000) #006400
# Cyan colors
Aqua = Vector3(0.0000, 1.0000, 1.0000) #00ffff
Cyan = Vector3(0.0000, 1.0000, 1.0000) #00ffff
LightCyan = Vector3(0.8784, 1.0000, 1.0000) #e0ffff
PaleTurquoise = Vector3(0.6863, 0.9333, 0.9333) #afeeee
Aquamarine = Vector3(0.4980, 1.0000, 0.8314) #7fffd4
Turquoise = Vector3(0.2510, 0.8784, 0.8157) #40e0d0
MediumTurquoise = Vector3(0.2824, 0.8196, 0.8000) #48d1cc
DarkTurquoise = Vector3(0.0000, 0.8078, 0.8196) #00ced1
LightSeaGreen = Vector3(0.1255, 0.6980, 0.6667) #20b2aa
CadetBlue = Vector3(0.3725, 0.6196, 0.6275) #5f9ea0
DarkCyan = Vector3(0.0000, 0.5451, 0.5451) #008b8b
Teal = Vector3(0.0000, 0.5020, 0.5020) #008080
# Blue colors
LightSteelBlue = Vector3(0.6902, 0.7686, 0.8706) #b0c4de
PowderBlue = Vector3(0.6902, 0.8784, 0.9020) #b0e0e6
LightBlue = Vector3(0.6784, 0.8471, 0.9020) #add8e6
SkyBlue = Vector3(0.5294, 0.8078, 0.9216) #87ceeb
LightSkyBlue = Vector3(0.5294, 0.8078, 0.9804) #87cefa
DeepSkyBlue = Vector3(0.0000, 0.7490, 1.0000) #00bfff
DodgerBlue = Vector3(0.1176, 0.5647, 1.0000) #1e90ff
CornflowerBlue = Vector3(0.3922, 0.5843, 0.9294) #6495ed
SteelBlue = Vector3(0.2745, 0.5098, 0.7059) #4682b4
RoyalBlue = Vector3(0.2549, 0.4118, 0.8824) #4169e1
Blue = Vector3(0.0000, 0.0000, 1.0000) #0000ff
MediumBlue = Vector3(0.0000, 0.0000, 0.8039) #0000cd
DarkBlue = Vector3(0.0000, 0.0000, 0.5451) #00008b
Navy = Vector3(0.0000, 0.0000, 0.5020) #000080
MidnightBlue = Vector3(0.0980, 0.0980, 0.4392) #191970
# Violet colors
Lavender = Vector3(0.9020, 0.9020, 0.9804) #e6e6fa
Thistle = Vector3(0.8471, 0.7490, 0.8471) #d8bfd8
Plum = Vector3(0.8667, 0.6275, 0.8667) #dda0dd
Violet = Vector3(0.9333, 0.5098, 0.9333) #ee82ee
Orchid = Vector3(0.8549, 0.4392, 0.8392) #da70d6
Fuchsia = Vector3(1.0000, 0.0000, 1.0000) #ff00ff
Magenta = Vector3(1.0000, 0.0000, 1.0000) #ff00ff
MediumOrchid = Vector3(0.7294, 0.3333, 0.8275) #ba55d3
MediumPurple = Vector3(0.5765, 0.4392, 0.8588) #9370db
BlueViolet = Vector3(0.5412, 0.1686, 0.8863) #8a2be2
DarkViolet = Vector3(0.5804, 0.0000, 0.8275) #9400d3
DarkOrchid = Vector3(0.6000, 0.1961, 0.8000) #9932cc
DarkMagenta = Vector3(0.5451, 0.0000, 0.5451) #8b008b
Purple = Vector3(0.5020, 0.0000, 0.5020) #800080
Indigo = Vector3(0.2941, 0.0000, 0.5098) #4b0082
DarkSlateBlue = Vector3(0.2824, 0.2392, 0.5451) #483d8b
SlateBlue = Vector3(0.4157, 0.3529, 0.8039) #6a5acd
MediumSlateBlue = Vector3(0.4824, 0.4078, 0.9333) #7b68ee
RebeccaPurple = Vector3(0.4000, 0.2000, 0.6000) #663399
# White colors
White = Vector3(1.0000, 1.0000, 1.0000) #ffffff
Snow = Vector3(1.0000, 0.9804, 0.9804) #fffafa
Honeydew = Vector3(0.9412, 1.0000, 0.9412) #f0fff0
MintCream = Vector3(0.9608, 1.0000, 0.9804) #f5fffa
Azure = Vector3(0.9412, 1.0000, 1.0000) #f0ffff
AliceBlue = Vector3(0.9412, 0.9725, 1.0000) #f0f8ff
GhostWhite = Vector3(0.9725, 0.9725, 1.0000) #f8f8ff
WhiteSmoke = Vector3(0.9608, 0.9608, 0.9608) #f5f5f5
Seashell = Vector3(1.0000, 0.9608, 0.9333) #fff5ee
Beige = Vector3(0.9608, 0.9608, 0.8627) #f5f5dc
OldLace = Vector3(0.9922, 0.9608, 0.9020) #fdf5e6
FloralWhite = Vector3(1.0000, 0.9804, 0.9412) #fffaf0
Ivory = Vector3(1.0000, 1.0000, 0.9412) #fffff0
AntiqueWhite = Vector3(0.9804, 0.9216, 0.8431) #faebd7
Linen = Vector3(0.9804, 0.9412, 0.9020) #faf0e6
LavenderBlush = Vector3(1.0000, 0.9412, 0.9608) #fff0f5
MistyRose = Vector3(1.0000, 0.8941, 0.8824) #ffe4e1
# Gray colors
Gainsboro = Vector3(0.8627, 0.8627, 0.8627) #dcdcdc
LightGray = Vector3(0.8275, 0.8275, 0.8275) #d3d3d3
LightGrey = Vector3(0.8275, 0.8275, 0.8275) #d3d3d3
Silver = Vector3(0.7529, 0.7529, 0.7529) #c0c0c0
DarkGray = Vector3(0.6627, 0.6627, 0.6627) #a9a9a9
DarkGrey = Vector3(0.6627, 0.6627, 0.6627) #a9a9a9
Gray = Vector3(0.5020, 0.5020, 0.5020) #808080
Grey = Vector3(0.5020, 0.5020, 0.5020) #808080
DimGray = Vector3(0.4118, 0.4118, 0.4118) #696969
DimGrey = Vector3(0.4118, 0.4118, 0.4118) #696969
LightSlateGray = Vector3(0.4667, 0.5333, 0.6000) #778899
LightSlateGrey = Vector3(0.4667, 0.5333, 0.6000) #778899
SlateGray = Vector3(0.4392, 0.5020, 0.5647) #708090
SlateGrey = Vector3(0.4392, 0.5020, 0.5647) #708090
DarkSlateGray = Vector3(0.1843, 0.3098, 0.3098) #2f4f4f
DarkSlateGrey = Vector3(0.1843, 0.3098, 0.3098) #2f4f4f
Black = Vector3(0.0000, 0.0000, 0.0000) #000000
# ---------- lowercase ----------
# Pink colors
pink = Vector3(1.0000, 0.7529, 0.7961) #ffc0cb
lightpink = Vector3(1.0000, 0.7137, 0.7569) #ffb6c1
hotpink = Vector3(1.0000, 0.4118, 0.7059) #ff69b4
deeppink = Vector3(1.0000, 0.0784, 0.5765) #ff1493
palevioletred = Vector3(0.8588, 0.4392, 0.5765) #db7093
mediumvioletred = Vector3(0.7804, 0.0824, 0.5216) #c71585
# Red colors
lightsalmon = Vector3(1.0000, 0.6275, 0.4784) #ffa07a
salmon = Vector3(0.9804, 0.5020, 0.4471) #fa8072
darksalmon = Vector3(0.9137, 0.5882, 0.4784) #e9967a
lightcoral = Vector3(0.9412, 0.5020, 0.5020) #f08080
indianred = Vector3(0.8039, 0.3608, 0.3608) #cd5c5c
crimson = Vector3(0.8627, 0.0784, 0.2353) #dc143c
firebrick = Vector3(0.6980, 0.1333, 0.1333) #b22222
darkred = Vector3(0.5451, 0.0000, 0.0000) #8b0000
red = Vector3(1.0000, 0.0000, 0.0000) #ff0000
# Orange colors
orangered = Vector3(1.0000, 0.2706, 0.0000) #ff4500
tomato = Vector3(1.0000, 0.3882, 0.2784) #ff6347
coral = Vector3(1.0000, 0.4980, 0.3137) #ff7f50
darkorange = Vector3(1.0000, 0.5490, 0.0000) #ff8c00
orange = Vector3(1.0000, 0.6471, 0.0000) #ffa500
# Yellow colors
yellow = Vector3(1.0000, 1.0000, 0.0000) #ffff00
lightyellow = Vector3(1.0000, 1.0000, 0.8784) #ffffe0
lemonchiffon = Vector3(1.0000, 0.9804, 0.8039) #fffacd
lightgoldenrodyellow = Vector3(0.9804, 0.9804, 0.8235) #fafad2
papayawhip = Vector3(1.0000, 0.9373, 0.8353) #ffefd5
moccasin = Vector3(1.0000, 0.8941, 0.7098) #ffe4b5
peachpuff = Vector3(1.0000, 0.8549, 0.7255) #ffdab9
palegoldenrod = Vector3(0.9333, 0.9098, 0.6667) #eee8aa
khaki = Vector3(0.9412, 0.9020, 0.5490) #f0e68c
darkkhaki = Vector3(0.7412, 0.7176, 0.4196) #bdb76b
gold = Vector3(1.0000, 0.8431, 0.0000) #ffd700
# Brown colors
cornsilk = Vector3(1.0000, 0.9725, 0.8627) #fff8dc
blanchedalmond = Vector3(1.0000, 0.9216, 0.8039) #ffebcd
bisque = Vector3(1.0000, 0.8941, 0.7686) #ffe4c4
navajowhite = Vector3(1.0000, 0.8706, 0.6784) #ffdead
wheat = Vector3(0.9608, 0.8706, 0.7020) #f5deb3
burlywood = Vector3(0.8706, 0.7216, 0.5294) #deb887
tan = Vector3(0.8235, 0.7059, 0.5490) #d2b48c
rosybrown = Vector3(0.7373, 0.5608, 0.5608) #bc8f8f
sandybrown = Vector3(0.9569, 0.6431, 0.3765) #f4a460
goldenrod = Vector3(0.8549, 0.6471, 0.1255) #daa520
darkgoldenrod = Vector3(0.7216, 0.5255, 0.0431) #b8860b
peru = Vector3(0.8039, 0.5216, 0.2471) #cd853f
chocolate = Vector3(0.8235, 0.4118, 0.1176) #d2691e
saddlebrown = Vector3(0.5451, 0.2706, 0.0745) #8b4513
sienna = Vector3(0.6275, 0.3216, 0.1765) #a0522d
brown = Vector3(0.6471, 0.1647, 0.1647) #a52a2a
maroon = Vector3(0.5020, 0.0000, 0.0000) #800000
# Green colors
darkolivegreen = Vector3(0.3333, 0.4196, 0.1843) #556b2f
olive = Vector3(0.5020, 0.5020, 0.0000) #808000
olivedrab = Vector3(0.4196, 0.5569, 0.1373) #6b8e23
yellowgreen = Vector3(0.6039, 0.8039, 0.1961) #9acd32
limegreen = Vector3(0.1961, 0.8039, 0.1961) #32cd32
lime = Vector3(0.0000, 1.0000, 0.0000) #00ff00
lawngreen = Vector3(0.4863, 0.9882, 0.0000) #7cfc00
chartreuse = Vector3(0.4980, 1.0000, 0.0000) #7fff00
greenyellow = Vector3(0.6784, 1.0000, 0.1843) #adff2f
springgreen = Vector3(0.0000, 1.0000, 0.4980) #00ff7f
mediumspringgreen = Vector3(0.0000, 0.9804, 0.6039) #00fa9a
lightgreen = Vector3(0.5647, 0.9333, 0.5647) #90ee90
palegreen = Vector3(0.5961, 0.9843, 0.5961) #98fb98
darkseagreen = Vector3(0.5608, 0.7373, 0.5608) #8fbc8f
mediumaquamarine = Vector3(0.4000, 0.8039, 0.6667) #66cdaa
mediumseagreen = Vector3(0.2353, 0.7020, 0.4431) #3cb371
seagreen = Vector3(0.1804, 0.5451, 0.3412) #2e8b57
forestgreen = Vector3(0.1333, 0.5451, 0.1333) #228b22
green = Vector3(0.0000, 0.5020, 0.0000) #008000
darkgreen = Vector3(0.0000, 0.3922, 0.0000) #006400
# Cyan colors
aqua = Vector3(0.0000, 1.0000, 1.0000) #00ffff
cyan = Vector3(0.0000, 1.0000, 1.0000) #00ffff
lightcyan = Vector3(0.8784, 1.0000, 1.0000) #e0ffff
paleturquoise = Vector3(0.6863, 0.9333, 0.9333) #afeeee
aquamarine = Vector3(0.4980, 1.0000, 0.8314) #7fffd4
turquoise = Vector3(0.2510, 0.8784, 0.8157) #40e0d0
mediumturquoise = Vector3(0.2824, 0.8196, 0.8000) #48d1cc
darkturquoise = Vector3(0.0000, 0.8078, 0.8196) #00ced1
lightseagreen = Vector3(0.1255, 0.6980, 0.6667) #20b2aa
cadetblue = Vector3(0.3725, 0.6196, 0.6275) #5f9ea0
darkcyan = Vector3(0.0000, 0.5451, 0.5451) #008b8b
teal = Vector3(0.0000, 0.5020, 0.5020) #008080
# Blue colors
lightsteelblue = Vector3(0.6902, 0.7686, 0.8706) #b0c4de
powderblue = Vector3(0.6902, 0.8784, 0.9020) #b0e0e6
lightblue = Vector3(0.6784, 0.8471, 0.9020) #add8e6
skyblue = Vector3(0.5294, 0.8078, 0.9216) #87ceeb
lightskyblue = Vector3(0.5294, 0.8078, 0.9804) #87cefa
deepskyblue = Vector3(0.0000, 0.7490, 1.0000) #00bfff
dodgerblue = Vector3(0.1176, 0.5647, 1.0000) #1e90ff
cornflowerblue = Vector3(0.3922, 0.5843, 0.9294) #6495ed
steelblue = Vector3(0.2745, 0.5098, 0.7059) #4682b4
royalblue = Vector3(0.2549, 0.4118, 0.8824) #4169e1
blue = Vector3(0.0000, 0.0000, 1.0000) #0000ff
mediumblue = Vector3(0.0000, 0.0000, 0.8039) #0000cd
darkblue = Vector3(0.0000, 0.0000, 0.5451) #00008b
navy = Vector3(0.0000, 0.0000, 0.5020) #000080
midnightblue = Vector3(0.0980, 0.0980, 0.4392) #191970
# Violet colors
lavender = Vector3(0.9020, 0.9020, 0.9804) #e6e6fa
thistle = Vector3(0.8471, 0.7490, 0.8471) #d8bfd8
plum = Vector3(0.8667, 0.6275, 0.8667) #dda0dd
violet = Vector3(0.9333, 0.5098, 0.9333) #ee82ee
orchid = Vector3(0.8549, 0.4392, 0.8392) #da70d6
fuchsia = Vector3(1.0000, 0.0000, 1.0000) #ff00ff
magenta = Vector3(1.0000, 0.0000, 1.0000) #ff00ff
mediumorchid = Vector3(0.7294, 0.3333, 0.8275) #ba55d3
mediumpurple = Vector3(0.5765, 0.4392, 0.8588) #9370db
blueviolet = Vector3(0.5412, 0.1686, 0.8863) #8a2be2
darkviolet = Vector3(0.5804, 0.0000, 0.8275) #9400d3
darkorchid = Vector3(0.6000, 0.1961, 0.8000) #9932cc
darkmagenta = Vector3(0.5451, 0.0000, 0.5451) #8b008b
purple = Vector3(0.5020, 0.0000, 0.5020) #800080
indigo = Vector3(0.2941, 0.0000, 0.5098) #4b0082
darkslateblue = Vector3(0.2824, 0.2392, 0.5451) #483d8b
slateblue = Vector3(0.4157, 0.3529, 0.8039) #6a5acd
mediumslateblue = Vector3(0.4824, 0.4078, 0.9333) #7b68ee
rebeccapurple = Vector3(0.4000, 0.2000, 0.6000) #663399
# White colors
white = Vector3(1.0000, 1.0000, 1.0000) #ffffff
snow = Vector3(1.0000, 0.9804, 0.9804) #fffafa
honeydew = Vector3(0.9412, 1.0000, 0.9412) #f0fff0
mintcream = Vector3(0.9608, 1.0000, 0.9804) #f5fffa
azure = Vector3(0.9412, 1.0000, 1.0000) #f0ffff
aliceblue = Vector3(0.9412, 0.9725, 1.0000) #f0f8ff
ghostwhite = Vector3(0.9725, 0.9725, 1.0000) #f8f8ff
whitesmoke = Vector3(0.9608, 0.9608, 0.9608) #f5f5f5
seashell = Vector3(1.0000, 0.9608, 0.9333) #fff5ee
beige = Vector3(0.9608, 0.9608, 0.8627) #f5f5dc
oldlace = Vector3(0.9922, 0.9608, 0.9020) #fdf5e6
floralwhite = Vector3(1.0000, 0.9804, 0.9412) #fffaf0
ivory = Vector3(1.0000, 1.0000, 0.9412) #fffff0
antiquewhite = Vector3(0.9804, 0.9216, 0.8431) #faebd7
linen = Vector3(0.9804, 0.9412, 0.9020) #faf0e6
lavenderblush = Vector3(1.0000, 0.9412, 0.9608) #fff0f5
mistyrose = Vector3(1.0000, 0.8941, 0.8824) #ffe4e1
# Gray colors
gainsboro = Vector3(0.8627, 0.8627, 0.8627) #dcdcdc
lightgray = Vector3(0.8275, 0.8275, 0.8275) #d3d3d3
lightgrey = Vector3(0.8275, 0.8275, 0.8275) #d3d3d3
silver = Vector3(0.7529, 0.7529, 0.7529) #c0c0c0
darkgray = Vector3(0.6627, 0.6627, 0.6627) #a9a9a9
darkgrey = Vector3(0.6627, 0.6627, 0.6627) #a9a9a9
gray = Vector3(0.5020, 0.5020, 0.5020) #808080
grey = Vector3(0.5020, 0.5020, 0.5020) #808080
dimgray = Vector3(0.4118, 0.4118, 0.4118) #696969
dimgrey = Vector3(0.4118, 0.4118, 0.4118) #696969
lightslategray = Vector3(0.4667, 0.5333, 0.6000) #778899
lightslategrey = Vector3(0.4667, 0.5333, 0.6000) #778899
slategray = Vector3(0.4392, 0.5020, 0.5647) #708090
slategrey = Vector3(0.4392, 0.5020, 0.5647) #708090
darkslategray = Vector3(0.1843, 0.3098, 0.3098) #2f4f4f
darkslategrey = Vector3(0.1843, 0.3098, 0.3098) #2f4f4f
black = Vector3(0.0000, 0.0000, 0.0000) #000000
|
# mass conservation approach for uniterminal graphs
# Subclass of BistabilityFinder and BistabilityAnalysis
import os
import numpy
import sympy
import sympy.utilities.lambdify
import scipy.optimize
import sys
import time
import numpy.linalg
import itertools
import warnings
import math
from .bistability_finder import BistabilityFinder
from .bistability_analysis import BistabilityAnalysis
class MassConservationApproach(BistabilityFinder, BistabilityAnalysis):
"""
Class for constructing variables and methods needed for the mass conservation approach.
"""
def __init__(self, cgraph, get_physiological_range):
"""
Initialization of the MassConservationApproach class.
See also
---------
crnt4sbml.CRNT.get_mass_conservation_approach()
"""
self.__cgraph = cgraph
self.get_physiological_range = get_physiological_range
if not all([i <= 1 for i in self.__cgraph.get_number_of_terminal_strong_lc_per_lc()]):
print("The network is not uniterminal!")
sys.exit()
if not self.__cgraph.get_dim_equilibrium_manifold() > 0:
print("# of species - rank(S) is not greater than zero!")
print("The mass conservation approach cannot be ran!")
sys.exit()
# declare key fields
self.__deficiency_pars = None
self.__concentration_pars = None
self.__reaction_pars = None
self.__W = None # matrix
self.__W_nullspace = None
self.__H = None # vector
self.__G = None # matrix
self.__symbolic_objective_fun = None
self.__concentration_vals = None
self.__decision_vector_x = None
self.__concentration_funs = None
self.__objective_fun_params = None
self.__lambda_objective_fun = None
self.__important_info = ""
self.__numpy_dtype = None
self.__independent_odes = None
self.__independent_species = None
self.__comm = None
self.__my_rank = None
self.__num_cores = None
self.__method = "MassConservationApproach"
# vars used frequently
self.__N = len(self.__cgraph.get_species())
self.__R = len(self.__cgraph.get_reactions())
self.__species = self.__cgraph.get_species()
self.__reactions = self.__cgraph.get_reactions()
self.__delta = self.__cgraph.get_deficiency()
self.__M = len(self.__cgraph.get_complexes())
self.__ell = len(self.__cgraph.get_linkage_classes())
self.__lambda = self.__cgraph.get_dim_equilibrium_manifold()
self.__classification = self.__cgraph.get_network_dimensionality_classification()
# compute necessary vectors and matrices
self.__create_deficiency_pars()
self.__create_concentration_pars()
self.__create_reaction_pars()
self.__create_w_matrix()
self.__create_w_nullspace()
self.__create_h_vector()
self.__create_g_matrix()
self.__create_symbolic_objective_fun()
self.__create_decision_vector_x()
self.__create_concentration_bounds_species()
self.__create_concentration_lambda_fun()
self.__create_objective_fun__lambda_fun()
self.__create_g_matrix_lambda_fun()
self.__create_dch_matrix_lambda_fun()
def __create_deficiency_pars(self):
# creating a vector of the deficiency parameters
# \alpha_1, ..., \alpha_\delta
self.__deficiency_pars = [sympy.symbols('a' + str(i + 1), real=True) for i
in range(self.__delta)]
def __create_concentration_pars(self):
# putting the species in a list for more readable code
self.__concentration_pars = [sympy.Symbol(self.__species[i], positive=True) for i in range(self.__N)]
def __create_reaction_pars(self):
self.__reaction_pars = [sympy.Symbol(self.__reactions[i], positive=True) for i in range(self.__R)]
def __create_w_matrix(self):
# concatenating Y and Lambda_T columnwise
# to create [Y,Lambda_T]^T
self.__W = self.__cgraph.get_y().col_join(self.__cgraph.get_lambda().T)
def __create_w_nullspace(self):
# Finding the null space of [Y,Lambda_T]^T,
# i.e. \omega_i i=1,...,\delta
self.__W_nullspace = self.__W.nullspace()
def __create_h_vector(self):
print("Creating Equilibrium Manifold ...")
start = time.process_time()
# creates the H vector in equation (10) by finding the linearly
# independent rows of (9)
# symbolic for of psi vector
psi_symbol_vec = sympy.zeros(self.__M, 1)
for i in range(self.__M):
psi_symbol_vec[i] = sympy.Symbol('psi' + str(i))
# creating the right-hand side defined by
# \sum_{i=1}^\delta \alpha_i \omega_i
rhs = sympy.zeros(self.__M, 1)
temp = sympy.zeros(self.__M, 1)
for i in range(self.__delta):
for j in range(self.__M):
temp[j] = self.__deficiency_pars[i] * self.__W_nullspace[i][j]
rhs += temp
# creating H(c,\alpha,k) with rows that might
# be linearly dependent
temp_vec = self.__cgraph.get_a() * self.__cgraph.get_psi() - rhs
temp_vec2 = self.__cgraph.get_a() * psi_symbol_vec - rhs
# creating a matrix of coefficients for the variables
# psi_1, ..., psi_M, alpha_1, ... alpha_delta
variables = [sympy.Symbol('psi' + str(i)) for i in range(self.__M)] + self.__deficiency_pars
a, _ = sympy.linear_eq_to_matrix(temp_vec2, variables)
temp_mat_2 = sympy.zeros(self.__M, self.__M + self.__delta)
# preallocating the H vector
self.__H = sympy.zeros(self.__M - self.__ell, 1)
leng = self.__M - self.__ell - self.__delta
comb = list(itertools.combinations(self.__concentration_pars, leng))
# building the different possible independent variable sets
indp_vars = []
for i in comb:
indp_vars.append(list(i) + self.__deficiency_pars)
self.__indices_explored = []
self.__counts_n_indices = []
# continue loop until acceptable concentration solutions are found
flag = True
indicies_to_skip = []
while flag:
reordered_indices, chosen_index = self.__create_fixed_free_pars_and_reordered_ind(temp_vec,
indicies_to_skip,
indp_vars)
for i in range(self.__M):
temp_mat_2[i, :] = a[reordered_indices[i], :]
# gives vals which is the rows of temp_mat that are
# independent from the others this allows us
# to create H(c, \alpha, k)
_, temp_vals = temp_mat_2.T.rref()
vals = [reordered_indices[i] for i in temp_vals]
# Filling the H vector with linearly independent rows
for i in range(len(vals)):
self.__H[i] = temp_vec[vals[i]]
flag = self.__create_concentration_values()
if flag:
indicies_to_skip.append(chosen_index)
nn = len(self.__concentration_pars)
rr = self.__M - self.__ell - self.__delta
if len(indicies_to_skip) == math.factorial(nn) / (math.factorial(rr) * math.factorial(nn - rr)):
flag = False
raise Exception(
"An analytic solution for the concentrations could not be found. The mass conservation approach connot be used.")
end = time.process_time()
print("Elapsed time for creating Equilibrium Manifold: " + str(end - start))
def __create_fixed_free_pars_and_reordered_ind(self, temp_vec, indicies_to_skip, indp_vars):
# determining the different combinations of concentrations
# present in the independent variables once the deficiency
# parameters are chosen to be independent
leng = self.__M - self.__ell - self.__delta
# finding the number of linear equations produced by a
# given independent variable set
for jj in range(len(indp_vars)):
if jj not in indicies_to_skip and jj not in self.__indices_explored:
num_lin_entries = [self.__is_linear(temp_vec[j], indp_vars[jj][0:leng]) for j in
range(temp_vec.shape[0])].count(True)
self.__counts_n_indices.append([num_lin_entries, jj])
self.__indices_explored.append(jj)
# if all of the equations are linear stop,
# prevents long run times
if num_lin_entries == self.__M:
break
# picking the independent variable set that has the most
# amount of linear equations
max_element = self.__max_element(indicies_to_skip)
chosen_index = max_element[1]
self.__fixed_pars = indp_vars[chosen_index]
self.__free_pars = [i for i in self.__deficiency_pars + self.__concentration_pars if i not in self.__fixed_pars]
# rearranging A s.t. the linear equations are in the top
# rows, this is for convenience and easier solutions for
# the independent variables
out = [self.__is_linear(temp_vec[j], indp_vars[chosen_index]) for j in range(temp_vec.shape[0])]
reordered_indices = [i for i, x in enumerate(out) if x] + [i for i, x in enumerate(out) if not x]
return reordered_indices, max_element[1]
def __max_element(self, indicies_to_skip):
temp = self.__counts_n_indices[:]
for i in self.__counts_n_indices:
if i[1] in indicies_to_skip:
temp.remove(i)
return max(temp, key=lambda item: item[0])
# routine that determines if a sympy expression is jointly
# linear with respect to a given set of variables. This
# test is conducted by seeing if the second order derivatives
# are zero.
def __is_linear(self, expr, variables):
for x in variables:
for y in variables:
try:
if not sympy.Eq(sympy.diff(expr, x, y), 0):
return False
except TypeError:
return False
return True
def __create_g_matrix(self):
# creating the matrix DCH which is the jacobian of
# the vector H with respect to the concentration vector
self.__DCH = self.__H.jacobian(sympy.Matrix(self.__concentration_pars))
# creation of the matrix dah which is the jacobian of
# the vector H with respect to the deficiency param vector
dah = self.__H.jacobian(sympy.Matrix(self.__deficiency_pars))
# creation of the matrix daw which is the jacobian of
# the vector W with respect to the deficiency param vector
# However, as given in page 11, D_\alpha W = 0, thus
# just a zero matrix of size \lambda by \delta
daw = sympy.zeros(self.__lambda, self.__delta)
# creating the upper half of the matrix G i.e. [DCH dah]
g_upper = self.__DCH.row_join(dah)
# creating the lower half of the matrix G i.e. [DCW daw]
# Note that D_c W = B^T
g_lower = self.__cgraph.get_b().row_join(daw)
# putting the upper and lower half of the matrix together
# this forms the full G(c, \alpha, k) matrix
self.__G = g_upper.col_join(g_lower)
def __create_symbolic_objective_fun(self):
# computing the simplified version of the objective
# function defined as: det(G(c, \alpha, k))^2
self.__symbolic_objective_fun = (self.__G.det(method='lu')) ** 2
def __create_concentration_values(self):
# Putting the concentrations in terms of the kinetic
# constants and deficiency parameters using the H
# vector of the equilibrium manifold
try:
temp_solution_tuple = sympy.solve(self.__H, self.__fixed_pars, dict=True)
except Exception as e:
temp_solution_tuple = []
if not temp_solution_tuple:
flag = True
else:
if isinstance(temp_solution_tuple, dict):
self.__concentration_vals = []
for i in self.__concentration_pars:
if i in temp_solution_tuple:
self.__concentration_vals.append(temp_solution_tuple[i])
else:
self.__concentration_vals.append(i)
# multiple solutions found
else:
solution_list = []
for i in temp_solution_tuple:
temp = []
for j in self.__concentration_pars:
if j in i:
temp.append(i[j])
else:
temp.append(j)
solution_list.append(temp)
self.__concentration_vals = self.__pick_solution_set(
solution_list) ########### TODO: do same for if statement make flag True if negative
for i in self.__concentration_vals:
deficiency_pars_found = [i.count(j) > 0 for j in self.__deficiency_pars + self.__fixed_pars]
if True in deficiency_pars_found:
flag = True
break
else:
flag = False
return flag
# chose solution set that is most likely to produce
# positive concentrations
def __pick_solution_set(self, solution_list):
positivity = []
for i in solution_list:
temp = []
for j in i:
temp.append(j.is_positive)
positivity.append(temp)
verdict = []
for i in positivity:
if False not in i:
if all(i):
# positive concentrations achieved
verdict.append("P")
elif True in i:
# positive concentrations and Nones
verdict.append("PU")
else:
# All entries are None
verdict.append("U")
else:
# negative concentration given
verdict.append("N")
if "P" in verdict:
indx = verdict.index("P")
choice = solution_list[indx]
elif "PU" in verdict:
indx = verdict.index("PU")
choice = solution_list[indx]
elif "U" in verdict:
indx = verdict.index("U")
choice = solution_list[indx]
else:
print("Solution chosen produces all negative concentrations!")
sys.exit()
return choice
def __create_decision_vector_x(self):
# if it is a proper/over-dimensioned network let
# xvec = (k_1 , ... k_R, alpha_1, ... alpha_lambda)
# else let
# xvec = (k_1 , ... k_R, alpha_1, ... alpha_delta,
# c_1, ..., c_(lambda - delta))
self.__decision_vector_x = self.__reaction_pars + self.__free_pars
self.__d_len = len(self.__decision_vector_x)
def __create_concentration_bounds_species(self):
self.__concentration_bounds_species = [i for i in self.__concentration_pars
if i not in self.__decision_vector_x]
def __create_concentration_lambda_fun(self):
self.__concentration_funs = []
for i in range(self.__N):
self.__concentration_funs += [sympy.utilities.lambdify(self.__decision_vector_x,
self.__concentration_vals[i])]
def __create_objective_fun__lambda_fun(self):
self.__objective_fun_params = self.__reaction_pars + self.__concentration_pars
self.__lambda_objective_fun = sympy.utilities.lambdify(self.__objective_fun_params,
self.__symbolic_objective_fun)
def __create_g_matrix_lambda_fun(self):
self.__lambda_G_matrix = sympy.utilities.lambdify(self.__objective_fun_params, self.__G)
def __create_dch_matrix_lambda_fun(self):
self.__lambda_DCH_matrix = sympy.utilities.lambdify(self.__objective_fun_params, self.__DCH)
# getters
def get_w_matrix(self):
"""
Returns SymPy matrix :math:`[Y, \Lambda^T]^T`, which we call the W matrix.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> import sympy
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> sympy.pprint(approach.get_w_matrix())
⎡1 0 0 0 0 1 1 0 0⎤
⎢ ⎥
⎢1 0 1 0 0 0 0 0 0⎥
⎢ ⎥
⎢0 1 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢0 0 1 1 0 0 1 0 2⎥
⎢ ⎥
⎢0 0 0 1 0 1 0 0 0⎥
⎢ ⎥
⎢0 0 0 0 1 0 0 0 0⎥
⎢ ⎥
⎢0 0 0 0 0 0 0 1 0⎥
⎢ ⎥
⎢1 1 1 0 0 0 0 0 0⎥
⎢ ⎥
⎢0 0 0 1 1 1 0 0 0⎥
⎢ ⎥
⎣0 0 0 0 0 0 1 1 1⎦
"""
return self.__W
def get_w_nullspace(self):
"""
Returns a list of SymPy column vectors representing :math:`Null([Y, \Lambda^T]^T)`.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> import sympy
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> sympy.pprint(approach.get_w_nullspace())
⎡⎡-1⎤ ⎡1 ⎤⎤
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢0 ⎥ ⎢0 ⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢1 ⎥ ⎢-1⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢-1⎥ ⎢0 ⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢0 ⎥, ⎢0 ⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢1 ⎥ ⎢0 ⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢0 ⎥ ⎢-1⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢0 ⎥ ⎢0 ⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎣⎣0 ⎦ ⎣1 ⎦⎦
"""
return self.__W_nullspace
def get_h_vector(self):
"""
Returns a SymPy matrix representing the equilibrium manifold.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> import sympy
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> sympy.pprint(approach.get_h_vector())
⎡a₁ - a₂ - re₁⋅s₁⋅s₂ + re1r⋅s₃⎤
⎢ ⎥
⎢re₁⋅s₁⋅s₂ + s₃⋅(-re1r - re₂) ⎥
⎢ ⎥
⎢ a₁ - re₃⋅s₆⋅s₇ + re3r⋅s₁₆ ⎥
⎢ ⎥
⎢re₃⋅s₆⋅s₇ + s₁₆⋅(-re3r - re₄)⎥
⎢ ⎥
⎢ a₂ - re₅⋅s₁⋅s₆ + re5r⋅s₁₅ ⎥
⎢ ⎥
⎣re₅⋅s₁⋅s₆ + s₁₅⋅(-re5r - re₆)⎦
"""
return self.__H
def get_g_matrix(self):
"""
Returns a SymPy matrix representing the G matrix of the defined optimization problem.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> import sympy
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> sympy.pprint(approach.get_g_matrix())
⎡-re₁⋅s₂ -re₁⋅s₁ re1r 0 0 0 0 1 -1⎤
⎢ ⎥
⎢re₁⋅s₂ re₁⋅s₁ -re1r - re₂ 0 0 0 0 0 0 ⎥
⎢ ⎥
⎢ 0 0 0 -re₃⋅s₇ -re₃⋅s₆ re3r 0 1 0 ⎥
⎢ ⎥
⎢ 0 0 0 re₃⋅s₇ re₃⋅s₆ -re3r - re₄ 0 0 0 ⎥
⎢ ⎥
⎢-re₅⋅s₆ 0 0 -re₅⋅s₁ 0 0 re5r 0 1 ⎥
⎢ ⎥
⎢re₅⋅s₆ 0 0 re₅⋅s₁ 0 0 -re5r - re₆ 0 0 ⎥
⎢ ⎥
⎢ 0 0 0 0 1.0 1.0 0 0 0 ⎥
⎢ ⎥
⎢ 0 1.0 1.0 0 0 0 0 0 0 ⎥
⎢ ⎥
⎣ 1.0 0 1.0 1.0 0 1.0 2.0 0 0 ⎦
"""
return self.__G
def get_dch_matrix(self):
"""
Returns a SymPy matrix representing the Jacobian of the equilibrium manifold with respect to the species.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> import sympy
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> sympy.pprint(approach.get_dch_matrix())
⎡-re₁⋅s₂ -re₁⋅s₁ re1r 0 0 0 0 ⎤
⎢ ⎥
⎢re₁⋅s₂ re₁⋅s₁ -re1r - re₂ 0 0 0 0 ⎥
⎢ ⎥
⎢ 0 0 0 -re₃⋅s₇ -re₃⋅s₆ re3r 0 ⎥
⎢ ⎥
⎢ 0 0 0 re₃⋅s₇ re₃⋅s₆ -re3r - re₄ 0 ⎥
⎢ ⎥
⎢-re₅⋅s₆ 0 0 -re₅⋅s₁ 0 0 re5r ⎥
⎢ ⎥
⎣re₅⋅s₆ 0 0 re₅⋅s₁ 0 0 -re5r - re₆⎦
"""
return self.__DCH
def get_lambda_g_matrix(self):
"""
Returns a lambda function representation of the G matrix. Here the arguments of the lambda function are given
by the values provided by :func:`crnt4sbml.MassConservationApproach.get_objective_fun_params`.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_lambda_g_matrix())
<function _lambdifygenerated at 0x13248ac80>
"""
return self.__lambda_G_matrix
def get_lambda_dch_matrix(self):
"""
Returns a lambda function representation of the Jacobian of the equilibrium manifold matrix. Here the
arguments of the lambda function are given by the values provided by
:func:`crnt4sbml.MassConservationApproach.get_objective_fun_params`.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_lambda_dch_matrix())
<function _lambdifygenerated at 0x131a06ea0>
"""
return self.__lambda_DCH_matrix
def get_symbolic_objective_fun(self):
"""
Returns SymPy expression for the objective function of the optimization problem. This is the determinant of the
G matrix squared.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_symbolic_objective_fun())
1.0*re1**2*re2**2*re3**2*re4**2*re5**2*re6**2*s1**2*s6**2*s7**2*((1.0*s2/s7 - 1.0*s2*(-re3r - re4)/
(re3*s6*s7))/re4 + (1.0 + 1.0*re1r/(re1*s1))/re2 + 1.0/(re1*s1))**2*(-((1.0*s6*(-1.0*s1/s6 + 1.0)/s7 +
1.0 - (-re3r - re4)*(-1.0*s1/s6 + 1.0)/(re3*s7))/re4 + 1.0/re2)*(-1.0*re5r*s2/(re5*re6*s1*s6) -
1.0*s2*(1 + re5*s6/(re1*s2))/(re5*s1*s6) - (1.0 + 1.0*re1r/(re1*s1))/re2)/((1.0*s2/s7 - 1.0*s2*
(-re3r - re4)/(re3*s6*s7))/re4 + (1.0 + 1.0*re1r/(re1*s1))/re2 + 1.0/(re1*s1)) + (2.0 + 1.0*re5r/(re5*s6))/
re6 + 1.0*(1 + re5*s6/(re1*s2))/(re5*s6) - 1.0/re2 - 1.0/(re1*s2))**2
"""
return self.__symbolic_objective_fun
def get_lambda_objective_fun(self):
"""
Returns a lambda function representation of the objective function of the optimization problem. Here the
arguments of the lambda function are given by the values provided by
:func:`crnt4sbml.MassConservationApproach.get_objective_fun_params`.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_lambda_objective_fun())
<function _lambdifygenerated at 0x12f6f7ea0>
"""
return self.__lambda_objective_fun
def get_concentration_vals(self):
"""
Returns a list of SymPy expressions representing the species in terms of those variables present in the decision
vector. The order is that established in :func:`crnt4sbml.Cgraph.get_species`. Note that if only a single
species is provided as an element in the list, this means the species is a free variable.
See also
---------
crnt4sbml.MassConservationApproach.get_concentration_solutions
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_concentration_vals())
[s15*(re5r + re6)/(re5*s6), s2, re1*s15*s2*(re5r + re6)/(re5*s6*(re1r + re2)), s6,
-s15*(re5*re5r*s6*(re1r + re2)*(re3r + re4) - (re5r + re6)*(-re1*re1r*re3r*s2 - re1*re1r*re4*s2 +
re1*re3r*s2*(re1r + re2) + re1*re4*s2*(re1r + re2) + re5*s6*(re1r + re2)*(re3r + re4)))/(re3*re4*re5*s6**2*
(re1r + re2)), s15*(re1*re2*re5r*s2 + re1*re2*re6*s2 + re1r*re5*re6*s6 + re2*re5*re6*s6)/(re4*re5*s6*(re1r + re2)), s15]
"""
return self.__concentration_vals
def get_decision_vector(self):
"""
Returns a list of SymPy variables that represent the decision vector of the optimization problem.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_decision_vector())
[re1, re1r, re2, re3, re3r, re4, re5, re5r, re6, s2, s6, s15]
"""
return self.__decision_vector_x
def get_concentration_bounds_species(self):
"""
Returns a list of SymPy variables that represents the order of species for the concentration bounds provided
to :func:`crnt4sbml.MassConservationApproach.run_optimization`.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_concentration_bounds_species())
[s1, s3, s7, s16]
"""
return self.__concentration_bounds_species
def get_concentration_funs(self):
"""
Returns a list of lambda functions representing each of the species. Here the species are those expressions
provided by :func:`crnt4sbml.MassConservationApproach.get_concentration_vals` where the arguments of each
lambda function is provided by :func:`crnt4sbml.MassConservationApproach.get_decision_vector`.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_concentration_funs())
[<function _lambdifygenerated at 0x135f8b4d0>, <function _lambdifygenerated at 0x135f72050>,
<function _lambdifygenerated at 0x135f728c0>, <function _lambdifygenerated at 0x135f725f0>,
<function _lambdifygenerated at 0x135f5f830>, <function _lambdifygenerated at 0x135fa0170>,
<function _lambdifygenerated at 0x135fa04d0>]
"""
return self.__concentration_funs
def get_objective_fun_params(self):
"""
Returns a list of SymPy variables that represent those variables that may be contained in the G matrix, Jacobian
of the equilibrium manifold with respect to the species, or objective function.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_objective_fun_params())
[re1, re1r, re2, re3, re3r, re4, re5, re5r, re6, s1, s2, s3, s6, s7, s16, s15]
"""
return self.__objective_fun_params
def get_conservation_laws(self):
"""
Returns a string representation of the conservation laws. Here the values on the left hand side of each equation
are the constants of the conservation laws.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_conservation_laws())
C1 = 1.0*s16 + 1.0*s7
C2 = 1.0*s2 + 1.0*s3
C3 = 1.0*s1 + 2.0*s15 + 1.0*s16 + 1.0*s3 + 1.0*s6
"""
rhs = self.__cgraph.get_b() * sympy.Matrix([self.__concentration_pars]).T
laws = ""
for i in range(rhs.shape[0]):
laws += 'C' + str(i + 1) + ' = ' + str(rhs[i]) + '\n'
return laws
def get_concentration_solutions(self):
"""
Returns a more readable string representation of the species defined in terms of the decision vector.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> print(approach.get_concentration_solutions())
s1 = s15*(re5r + re6)/(re5*s6)
s2 = s2
s3 = re1*s15*s2*(re5r + re6)/(re5*s6*(re1r + re2))
s6 = s6
s7 = -s15*(re5*re5r*s6*(re1r + re2)*(re3r + re4) - (re5r + re6)*(-re1*re1r*re3r*s2 - re1*re1r*re4*s2 + re1*re3r*s2*(re1r + re2) + re1*re4*s2*(re1r + re2) + re5*s6*(re1r + re2)*(re3r + re4)))/(re3*re4*re5*s6**2*(re1r + re2))
s16 = s15*(re1*re2*re5r*s2 + re1*re2*re6*s2 + re1r*re5*re6*s6 + re2*re5*re6*s6)/(re4*re5*s6*(re1r + re2))
s15 = s15
"""
sols = ""
for i in range(self.__N):
sols += self.__species[i] + ' = ' + str(self.__concentration_vals[i]) + '\n'
return sols
def get_independent_odes(self):
"""
Returns a SymPy Matrix where the rows represent the independent ODEs used in the numerical continuation routine. Here
the entries of the list correspond to the time derivatives of the corresponding species provided by
:func:`crnt4sbml.MassConservationApproach.get_independent_species`. Note that the independent ODEs created are
based on the species chosen for the numerical continuation. Thus, the continuation routine needs to be ran
first. If this function is called before the numerical continuation routine then None will be returned.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/sbml_file.xml")
>>> approach = network.get_mass_conservation_approach()
>>> multistable_param_ind = approach.run_greedy_continuity_analysis(species="species", parameters=params_for_global_min,
auto_parameters={'PrincipalContinuationParameter': "PCP"})
>>> odes = approach.get_independent_odes()
"""
return self.__independent_odes
def get_independent_species(self):
"""
Returns a list of SymPy representations of the independent species used in the numerical continuation routine.
Note that the independent species created are based on the species chosen for the numerical continuation. Thus,
the continuation routine needs to be ran first. If this function is called before the numerical continuation
routine then None will be returned.
Example
--------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/sbml_file.xml")
>>> approach = network.get_mass_conservation_approach()
>>> multistable_param_ind = approach.run_greedy_continuity_analysis(species="species", parameters=params_for_global_min,
auto_parameters={'PrincipalContinuationParameter': "PCP"})
>>> species = approach.get_independent_species()
"""
return self.__independent_species
def get_optimization_bounds(self):
"""
Builds all of the necessary physiological bounds for the optimization routine.
:download:`Fig1Ci.xml <../../sbml_files/Fig1Ci.xml>` for the provided
example.
Returns
--------
bounds: list of tuples
List of tuples defining the upper and lower bounds for the decision vector variables based on physiological
ranges.
concentration_bounds: list of tuples
List of tuples defining the upper and lower bounds for those concentrations not in the decision vector
based on physiological ranges.
Examples
---------
>>> import crnt4sbml
>>> network = crnt4sbml.CRNT("path/to/Fig1Ci.xml")
>>> approach = network.get_mass_conservation_approach()
Creating Equilibrium Manifold ...
Elapsed time for creating Equilibrium Manifold: 2.060944
>>> bounds, concentration_bounds = approach.get_optimization_bounds()
>>> print(bounds)
[(1e-08, 0.0001), (1e-05, 0.001), (0.001, 1.0), (1e-08, 0.0001), (1e-05, 0.001), (0.001, 1.0),
(1e-08, 0.0001), (1e-05, 0.001), (0.001, 1.0), (0.5, 500000.0), (0.5, 500000.0), (0.5, 500000.0)]
>>> print(concentration_bounds)
[(0.5, 500000.0), (0.5, 500000.0), (0.5, 500000.0), (0.5, 500000.0)]
"""
graph_edges = self.__cgraph.get_g_edges()
dec_vec_var_def = []
for i in self.get_decision_vector():
if i in self.__concentration_pars:
dec_vec_var_def.append("concentration")
elif i in self.__reaction_pars:
ind = self.__reaction_pars.index(i)
reaction = graph_edges[ind]
reaction_type = self.__cgraph.get_graph().edges[reaction]['type']
dec_vec_var_def.append(reaction_type)
if reaction_type is None:
output_statement = "The reaction type of reaction " + self.__cgraph.get_graph().edges[reaction][
'label'] \
+ " could not be identified as it does not fit any biological criteria " + \
"established. \n" + "You must enter bounds manually for this reaction! \n"
print(output_statement)
concentration_bounds = [self.get_physiological_range("concentration")] * len(
self.get_concentration_bounds_species())
bounds = [self.get_physiological_range(i) for i in dec_vec_var_def]
return bounds, concentration_bounds
def run_optimization(self, bounds=None, iterations=10, sys_min_val=numpy.finfo(float).eps, seed=0, print_flag=False,
numpy_dtype=numpy.float64, concentration_bounds=None, confidence_level_flag=False,
change_in_rel_error=1e-1, parallel_flag=False):
"""
Function for running the optimization problem for the mass conservation approach.
Parameters
-----------
bounds: list of tuples
A list defining the lower and upper bounds for each variable in the decision vector. Here the reactions
are allowed to be set to a single value.
iterations: int
The number of iterations to run the feasible point method.
sys_min_val: float
The value that should be considered zero for the optimization problem.
seed: int
Seed for the random number generator. None should be used if a random generation is desired.
print_flag: bool
Should be set to True if the user wants the objective function values found in the optimization problem
and False otherwise.
numpy_dtype:
The numpy data type used within the optimization routine. All variables in the optimization routine will
be converted to this data type.
concentration_bounds: list of tuples
A list defining the lower and upper bounds for those species' concentrations not in the decision vector.
The user is not allowed to set the species' concentration to a single value. See also:
:func:`crnt4sbml.MassConservationApproach.get_concentration_bounds_species`.
confidence_level_flag: bool
If True a confidence level for the objective function will be given.
change_in_rel_error: float
The maximum relative error that should be allowed to consider :math:`f_k` in the neighborhood
of :math:`\widetilde{f}`.
parallel_flag: bool
If set to True a parallel version of the optimization routine is ran. If False, a serial version of the
optimization routine is ran. See :ref:`parallel-gen-app-label`.
Returns
--------
params_for_global_min: list of numpy arrays
A list of numpy arrays that correspond to the decision vectors of the problem.
obj_fun_val_for_params: list of floats
A list of objective function values produced by the corresponding decision vectors in params_for_global_min.
Examples
---------
See :ref:`quickstart-deficiency-label` and :ref:`my-deficiency-label`.
"""
self.__initialize_optimization_variables(bounds, iterations, sys_min_val, seed, print_flag, numpy_dtype,
concentration_bounds, confidence_level_flag, change_in_rel_error,
parallel_flag)
params_for_global_min, obj_fun_val_for_params, self.__important_info = self._BistabilityFinder__parent_run_optimization()
self.__my_rank = self._BistabilityFinder__my_rank
self.__comm = self._BistabilityFinder__comm
return params_for_global_min, obj_fun_val_for_params
def __initialize_optimization_variables(self, bounds, iterations, sys_min_val, seed, print_flag, numpy_dtype,
concentration_bounds, confidence_level_flag, change_in_rel_error,
parallel_flag):
self.__bounds = bounds
self.__iterations = iterations
self.__seed = seed
self.__print_flag = print_flag
self.__concentration_bounds = concentration_bounds
self.__confidence_level_flag = confidence_level_flag
self.__change_in_rel_error = change_in_rel_error
self.__parallel_flag = parallel_flag
self.__numpy_dtype = numpy_dtype
self.__sys_min_val = self.__numpy_dtype(sys_min_val)
self.__x_full = None
self.__non_equality_bounds_indices = None
self.__MassConservationApproach__true_bounds = None
self.__true_bounds = None
self.__temp_c = numpy.zeros(self.__N, dtype=self.__numpy_dtype)
# testing to see if there are any equalities in bounds
self.__equality_bounds_indices = []
for i in range(len(self.__bounds)):
if not isinstance(self.__bounds[i], tuple):
self.__equality_bounds_indices.append(i)
# recasting user provided input to numpy_dtype
for i in range(len(self.__bounds)):
self.__bounds[i] = self.__numpy_dtype(self.__bounds[i])
for i in range(len(self.__concentration_bounds)):
self.__concentration_bounds[i] = self.__numpy_dtype(self.__concentration_bounds[i])
if len(self.__concentration_bounds) != len(self.__concentration_bounds_species):
print("Concentration bounds is the incorrect length!")
sys.exit()
self.__full_concentration_bounds = []
for i in range(self.__N):
if self.__concentration_pars[i] in self.__decision_vector_x:
indx = self.__decision_vector_x.index(self.__concentration_pars[i])
self.__full_concentration_bounds.append(self.__bounds[indx])
else:
indx = self.__concentration_bounds_species.index(self.__concentration_pars[i])
self.__full_concentration_bounds.append(self.__concentration_bounds[indx])
def __run_global_optimization_routine(self, initial_x):
result = scipy.optimize.basinhopping(self.__objective_function_to_optimize, initial_x,
minimizer_kwargs={'method': 'Nelder-Mead', 'tol': 1e-16},
niter=2, seed=self.__seed)
return result
def __run_local_optimization_routine(self, initial_x):
result = scipy.optimize.minimize(self.__objective_function_to_optimize, initial_x, method='Nelder-Mead', tol=1e-16)
return result
def __run_local_optimization_routine_penalty_1(self, initial_x):
result = scipy.optimize.minimize(self.__penalty_objective_func, initial_x, method='SLSQP', tol=1e-16, bounds=self.__true_bounds)
return result
def __run_local_optimization_routine_penalty_2(self, initial_x):
result = scipy.optimize.minimize(self.__penalty_objective_func, initial_x, method='Nelder-Mead', tol=1e-16)
return result
def __create_final_points(self, x_that_give_global_min):
output = self.__final_constraint_check(x_that_give_global_min)
if output[0]:
return numpy.array(list(output[1][:]))
def run_continuity_analysis(self, species=None, parameters=None, dir_path="./num_cont_graphs",
print_lbls_flag=False, auto_parameters=None, plot_labels=None):
"""
Function for running the numerical continuation and bistability analysis portions of the mass conservation
approach.
Parameters
------------
species: string
A string stating the species that is the y-axis of the bifurcation diagram.
parameters: list of numpy arrays
A list of numpy arrays corresponding to the decision vectors that produce a small objective function
value.
dir_path: string
A string stating the path where the bifurcation diagrams should be saved.
print_lbls_flag: bool
If True the routine will print the special points found by AUTO 2000 and False will not print any
special points.
auto_parameters: dict
Dictionary defining the parameters for the AUTO 2000 run. Please note that one should **not** set
'SBML' or 'ScanDirection' in these parameters as these are automatically assigned. It is absolutely
necessary to set PrincipalContinuationParameter in this dictionary. For more information on these
parameters refer to :download:`AUTO parameters <../auto2000_input.pdf>`. 'NMX' will default to
10000 and 'ITMX' to 100.
plot_labels: list of strings
A list of strings defining the labels for the x-axis, y-axis, and title. Where the first element
is the label for x-axis, second is the y-axis label, and the last element is the title label. If
you would like to use the default settings for some of the labels, simply provide None for that
element.
Returns
---------
multistable_param_ind: list of integers
A list of those indices in 'parameters' that produce multistable plots.
plot_specifications: list of lists
A list whose elements correspond to the plot specifications of each element in multistable_param_ind.
Each element is a list where the first element specifies the range used for the x-axis, the second
element is the range for the y-axis, and the last element provides the x-y values and special point label
for each special point in the plot.
Example
---------
See :ref:`quickstart-deficiency-label` and :ref:`my-deficiency-label`.
"""
if self.__comm is not None:
if self.__my_rank == 0:
self.__initialize_continuity_analysis(species, parameters, dir_path, print_lbls_flag, auto_parameters,
plot_labels)
multistable_param_ind, important_info, plot_specifications = self._BistabilityAnalysis__parent_run_continuity_analysis()
else:
important_info = ''
multistable_param_ind = []
plot_specifications = []
self.__comm.Barrier()
else:
self.__initialize_continuity_analysis(species, parameters, dir_path, print_lbls_flag, auto_parameters,
plot_labels)
multistable_param_ind, important_info, plot_specifications = self._BistabilityAnalysis__parent_run_continuity_analysis()
self.__important_info += important_info
return multistable_param_ind, plot_specifications
def run_greedy_continuity_analysis(self, species=None, parameters=None, dir_path="./num_cont_graphs",
print_lbls_flag=False, auto_parameters=None, plot_labels=None):
"""
Function for running the greedy numerical continuation and bistability analysis portions of the mass conservation
approach. This routine uses the initial value of the principal continuation parameter to construct AUTO
parameters and then tests varying fixed step sizes for the continuation problem. Note that this routine may
produce jagged or missing sections in the plots provided. To produce better plots one should use the information
provided by this routine to run :func:`crnt4sbml.MassConservationApproach.run_continuity_analysis`.
Parameters
------------
species: string
A string stating the species that is the y-axis of the bifurcation diagram.
parameters: list of numpy arrays
A list of numpy arrays corresponding to the decision vectors that produce a small objective function
value.
dir_path: string
A string stating the path where the bifurcation diagrams should be saved.
print_lbls_flag: bool
If True the routine will print the special points found by AUTO 2000 and False will not print any
special points.
auto_parameters: dict
Dictionary defining the parameters for the AUTO 2000 run. Please note that only the
PrincipalContinuationParameter in this dictionary should be defined, no other AUTO parameters should
be set. For more information on these parameters refer to :download:`AUTO parameters <../auto2000_input.pdf>`.
plot_labels: list of strings
A list of strings defining the labels for the x-axis, y-axis, and title. Where the first element
is the label for x-axis, second is the y-axis label, and the last element is the title label. If
you would like to use the default settings for some of the labels, simply provide None for that
element.
Returns
---------
multistable_param_ind: list of integers
A list of those indices in 'parameters' that produce multistable plots.
plot_specifications: list of lists
A list whose elements correspond to the plot specifications of each element in multistable_param_ind.
Each element is a list where the first element specifies the range used for the x-axis, the second
element is the range for the y-axis, and the last element provides the x-y values and special point label
for each special point in the plot.
Example
---------
See :ref:`my-deficiency-label`.
"""
if self.__comm is not None:
if self.__my_rank == 0:
self.__initialize_continuity_analysis(species, parameters, dir_path, print_lbls_flag, auto_parameters,
plot_labels)
multistable_param_ind, important_info, plot_specifications = self._BistabilityAnalysis__parent_run_greedy_continuity_analysis()
else:
important_info = ''
multistable_param_ind = []
plot_specifications = []
self.__comm.Barrier()
else:
self.__initialize_continuity_analysis(species, parameters, dir_path, print_lbls_flag, auto_parameters,
plot_labels)
multistable_param_ind, important_info, plot_specifications = self._BistabilityAnalysis__parent_run_greedy_continuity_analysis()
self.__important_info += important_info
return multistable_param_ind, plot_specifications
def __initialize_continuity_analysis(self, species, parameters, dir_path, print_lbls_flag, auto_parameters, plot_labels):
self.__parameters = parameters
self.__dir_path = dir_path
self.__print_lbls_flag = print_lbls_flag
self.__auto_parameters = auto_parameters
self.__plot_labels = plot_labels
if self.__comm is not None:
print("")
print("A parallel version of numerical continuation is not available.")
print("Numerical continuation will be ran using only one core.")
print("For your convenience, the provided parameters have been saved in the current directory under the name params.npy.")
numpy.save('./params.npy', parameters)
# setting default values for AUTO
if 'NMX' not in self.__auto_parameters.keys():
self.__auto_parameters['NMX'] = 10000
if 'ITMX' not in self.__auto_parameters.keys():
self.__auto_parameters['ITMX'] = 100
# making the directory if it doesn't exist
if not os.path.isdir(self.__dir_path):
os.mkdir(self.__dir_path)
self.__species_num = self.__species.index(species) + 1
self.__species_y = str(self.__concentration_pars[self.__species_num - 1])
def run_direct_simulation(self, response=None, signal=None, params_for_global_min=None, dir_path="./dir_sim_graphs",
change_in_relative_error=1e-6, parallel_flag=False, print_flag=False,
left_multiplier=0.5, right_multiplier=0.5):
"""
Function for running direct simulation to conduct bistability analysis of the mass conservation approach.
Note: This routine is more expensive than the numerical continuation routines, but can provide solutions
when the Jacobian of the ODE system is always singular. A parallel version of this routine is available.
The routine automatically produces plots of the direct simulation runs and puts them in the user specified
dir_path.
Parameters
------------
response: string
A string stating the response species of the bifurcation analysis.
signal: string
A string stating the signal of the bifurcation analysis. Can be any of the of the conservation laws.
params_for_global_min: list of numpy arrays
A list of numpy arrays corresponding to the input vectors that produce a small objective function
value.
dir_path: string
A string stating the path where the bifurcation diagrams should be saved.
change_in_relative_error: float
A float value that determines how small the relative error should be in order for the solution of the
ODE system to be considered at a steady state. Note: a smaller value will run faster, but may produce
an ODE system that is not at a steady state.
parallel_flag: bool
If set to True a parallel version of direct simulation is ran. If False, a serial version of the
routine is ran. See :ref:`parallel-gen-app-label` for further information.
print_flag: bool
If set to True information about the direct simulation routine will be printed. If False, no output
will be provided.
left_multiplier: float
A float value that determines the percentage of the signal that will be searched to the left of the signal
value. For example, the lowerbound for the signal range will be signal_value - signal_value*left_multiplier.
right_multiplier: float
A float value that determines the percentage of the signal that will be searched to the right of the signal
value. For example, the upperbound for the signal range will be signal_value + signal_value*right_multiplier.
Returns
---------
list_of_ggplots: list of ggplots produced by plotnine
Example
---------
See :ref:`my-deficiency-label`.
"""
self.__initialize_direct_simulation(response, signal, params_for_global_min, dir_path, change_in_relative_error, parallel_flag,
print_flag, left_multiplier, right_multiplier)
self._BistabilityAnalysis__parent_run_direct_simulation()
self.__my_rank = self._BistabilityAnalysis__my_rank
self.__comm = self._BistabilityAnalysis__comm
def __initialize_direct_simulation(self, response, signal, params_for_global_min, dir_path,
change_in_relative_error, parallel_flag, print_flag, left_multiplier,
right_multiplier):
self.__parameters = params_for_global_min
self.__dir_path = dir_path
self.__change_in_relative_error = change_in_relative_error
self.__parallel_flag = parallel_flag
self.__dir_sim_print_flag = print_flag
self.__left_multiplier = left_multiplier
self.__right_multiplier = right_multiplier
self.__response = response
self.__signal = signal
self.__sympy_species = self.__concentration_pars
self.__sympy_reactions = self.__reaction_pars
self.__cons_laws_sympy = self.__cgraph.get_b() * sympy.Matrix([self.__concentration_pars]).T
self.__cons_laws_sympy_lamb = [sympy.utilities.lambdify(self.__concentration_pars, self.__cons_laws_sympy[i])
for i in range(len(self.__cons_laws_sympy))]
conservation_constants = ['C' + str(i + 1) for i in range(len(self.__cons_laws_sympy))]
self.__signal_index = conservation_constants.index(self.__signal)
lambda_inputs = self.__sympy_reactions + self.__sympy_species
self.__ode_lambda_functions = [sympy.utilities.lambdify(lambda_inputs, self.__cgraph.get_ode_system()[i]) for i in
range(len(self.__cgraph.get_ode_system()))]
self.__jac_lambda_function = sympy.utilities.lambdify(lambda_inputs,
self.__cgraph.get_ode_system().jacobian(self.__sympy_species))
def __initialize_ant_string(self, species_num, pcp_x):
y = self.__cgraph.get_y()
a = self.__cgraph.get_a()
bt = self.__cgraph.get_b()
psi = self.__cgraph.get_psi()
# forming ya matrix
ya = y * a
# finding how many rows are indep in ya
_, vals = ya.T.rref()
num_indp_eqns = len(vals)
num_dep_eqns = ya.shape[0] - num_indp_eqns
# getting dimensions of bt
bt_rows = bt.shape[0]
bt_cols = bt.shape[1]
bt_nonzero_ind = []
for i in range(bt_rows):
bt_nonzero_ind.append([j for j in range(bt_cols) if bt[i, j] != 0 and j != species_num - 1])
chosen_indp_indices, chosen_dep_indices = self.__get_indp_dep_species_indices(bt_nonzero_ind, num_dep_eqns,
num_indp_eqns, ya)
replacements, ind_spec_conc_temp, indp_odes_temp = self.__construct_important_variables(chosen_indp_indices,
chosen_dep_indices, ya,
psi, bt)
ode_str = self.__create_ode_str(replacements, ind_spec_conc_temp, indp_odes_temp, species_num)
return ode_str, pcp_x
def __get_indp_dep_species_indices(self, bt_nonzero_ind, num_dep_eqns, num_indp_eqns, ya):
# getting all combinations of the list indices
possible_dep_species = list(itertools.product(*bt_nonzero_ind))
removed_entries = []
# remove tuples that have duplicate entries
for i in range(len(possible_dep_species)):
if len(set(possible_dep_species[i])) != num_dep_eqns:
removed_entries.append(i)
for index in sorted(removed_entries, reverse=True):
del possible_dep_species[index]
# get corresponding possible dependent species
possible_indp_species = []
species_ind = [i for i in range(len(self.__concentration_pars))]
for i in possible_dep_species:
possible_indp_species.append([j for j in species_ind if j not in i])
# using YA to pick one of the possible indices
chosen_indp_indices = []
chosen_dep_indices = []
for i in range(len(possible_indp_species)):
_, vals = ya[possible_indp_species[i], :].T.rref()
if len(vals) == num_indp_eqns:
chosen_indp_indices = possible_indp_species[i]
chosen_dep_indices = possible_dep_species[i]
break
return chosen_indp_indices, chosen_dep_indices
def __construct_important_variables(self, chosen_indp_indices, chosen_dep_indices, ya, psi, bt):
# getting independent concentrations
ind_spec_conc_temp = [self.__concentration_pars[i] for i in chosen_indp_indices]
# getting dependent concentrations
dep_spec_conc = [self.__concentration_pars[i] for i in chosen_dep_indices]
# constructing the independent ODEs
indp_odes_temp = ya[chosen_indp_indices, :] * psi
# creating conservation laws string
self.__cons_laws_sympy = bt * sympy.Matrix([self.__concentration_pars]).T
# Lambda function of conservation laws
self.__cons_laws_lamb = [sympy.utilities.lambdify(self.__concentration_pars, self.__cons_laws_sympy[i])
for i in range(len(self.__cons_laws_sympy))]
cons_laws_sympy_eq = [sympy.Eq(sympy.Symbol('C' + str(i + 1), real=True), self.__cons_laws_sympy[i])
for i in range(len(self.__cons_laws_sympy))]
dep_conc_in_laws = self.__dependent_species_concentrations(self.__cons_laws_sympy, dep_spec_conc)
replacements = self.__find_dep_concentration_replacements(dep_conc_in_laws, self.__cons_laws_sympy,
dep_spec_conc, cons_laws_sympy_eq)
return replacements, ind_spec_conc_temp, indp_odes_temp
def __create_ode_str(self, replacements, ind_spec_conc_temp, indp_odes_temp, species_num):
# rearrange ind_spec_conc and indp_odes to make species of
# interest be the first ODE
indx_species_num = ind_spec_conc_temp.index(self.__concentration_pars[species_num - 1])
self.__ind_spec_conc = [ind_spec_conc_temp[indx_species_num]]
for i in ind_spec_conc_temp:
if i != self.__concentration_pars[species_num - 1]:
self.__ind_spec_conc.append(i)
indp_odes = sympy.zeros(indp_odes_temp.shape[0], indp_odes_temp.shape[1])
indp_odes[0] = indp_odes_temp[indx_species_num]
count = 1
for i in range(indp_odes_temp.shape[0]):
if i != indx_species_num:
indp_odes[count] = indp_odes_temp[i]
count += 1
# bulding ODE string in Antimony format
ode_str = self.__building_ode_str(replacements, self.__ind_spec_conc, indp_odes)
return ode_str
def __finalize_ant_string(self, x, ode_str):
concentration_vals = [self.__concentration_funs[j](*tuple(x)) for j in range(self.__N)]
kinetic_vals = [x[i] for i in range(self.__R)]
antstr = self.__initialize_variables_in_antimony_string(self.__cons_laws_sympy, ode_str,
self.__cons_laws_lamb, concentration_vals, kinetic_vals,
self.__reaction_pars)
if self.__print_lbls_flag:
print(antstr)
return antstr
def __final_constraint_check(self, x_initial):
self.__non_equality_bounds_indices = [i for i in range(len(self.__bounds)) if i not in self.__equality_bounds_indices]
self.__x_full = numpy.zeros(len(self.__bounds), dtype=self.__numpy_dtype)
for j in self.__equality_bounds_indices:
self.__x_full[j] = self.__bounds[j]
count = 0
for j in self.__non_equality_bounds_indices:
self.__x_full[j] = x_initial[count]
count += 1
# concentration > 0 check
con = numpy.asarray([self.__concentration_funs[j](*tuple(self.__x_full)) for j in range(self.__N)],
dtype=self.__numpy_dtype)
con_temp = []
for i in range(self.__N):
con_temp.append(con[i] >= self.__full_concentration_bounds[i][0] and con[i] <= self.__full_concentration_bounds[i][1])
concs_chk = numpy.all(con_temp)
# boundary check
test = []
for j in self.__non_equality_bounds_indices:
test.append(self.__x_full[j] >= self.__bounds[j][0] and self.__x_full[j] <= self.__bounds[j][1])
boundry_chk = numpy.all(test)
# rank(G) = N + delta - 1 check
# xx = numpy.concatenate((x[0:self.__R],con),axis=None)
# must convert xx to numpy.float64 because higher
# is not supported in linalg
# xx = numpy.float64(xx)
# rank_G = numpy.linalg.matrix_rank(self.__lambda_G_matrix(*tuple(xx)))
# rank_G_chk = rank_G == (self.__N + self.__delta - 1)
# rank(DCH) = min(N,M-ell) check
# rank_DCH = numpy.linalg.matrix_rank(self.__lambda_DCH_matrix(*tuple(xx)))
# rank_DCH_chk = rank_DCH == min(self.__N,self.__M - self.__ell)
if concs_chk and boundry_chk: # and rank_G_chk and rank_DCH_chk:
return [True, self.__x_full]
else:
return [False, []]
def __concentration_violation_fun(self, g, len_g):
temp = numpy.zeros(len_g, dtype=self.__numpy_dtype)
for i in range(len_g):
temp[i] = numpy.maximum(self.__numpy_dtype(0.0), -g[i]) ** 2
return temp
def __x_violation_fun(self, x, b, len_x):
temp = numpy.zeros(len_x, dtype=self.__numpy_dtype)
for i in range(len_x):
temp[i] = numpy.maximum(self.__numpy_dtype(0.0), self.__numpy_dtype(b) - x[i]) ** 2
return temp
def __penalty_objective_func(self, x_initial):
for j in self.__equality_bounds_indices:
self.__x_full[j] = self.__bounds[j]
count = 0
for j in self.__non_equality_bounds_indices:
self.__x_full[j] = x_initial[count]
count += 1
# evaluating the concentrations first
for i in range(self.__N):
temp_val = self.__concentration_funs[i](*tuple(self.__x_full))
if numpy.iscomplex(temp_val):
self.__temp_c = numpy.array([numpy.Inf for i in range(self.__N)], dtype=self.__numpy_dtype)
break
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore", numpy.ComplexWarning)
self.__temp_c[i] = temp_val
if numpy.all(numpy.isfinite(self.__temp_c)):
# obtaining the sum of the violation functions squared
sumval = self.__numpy_dtype(0.0)
for j in range(self.__N):
sumval += numpy.maximum(self.__numpy_dtype(0.0), self.__full_concentration_bounds[j][0] - self.__temp_c[j])**2
sumval += numpy.maximum(self.__numpy_dtype(0.0), self.__temp_c[j] - self.__full_concentration_bounds[j][1])**2
sum0 = self.__numpy_dtype(0.0)
for j in self.__non_equality_bounds_indices:
sum0 += numpy.maximum(self.__numpy_dtype(0.0), self.__bounds[j][0] - self.__x_full[j]) ** 2
sum0 += numpy.maximum(self.__numpy_dtype(0.0), self.__x_full[j] - self.__bounds[j][1]) ** 2
sumval += sum0
# obtaining the violation function values for
# k's and concentrations in x
# xx = numpy.concatenate((x[0:self.__R],x[self.__R + self.__alpha_end_ind:self.__d_len]),axis=None)
# temp = self.__x_violation_fun(xx,self.__numpy_dtype(0.0),self.__R + (self.__d_len - (self.__R +
# self.__alpha_end_ind)))
# sumval += numpy.sum(temp)
return sumval
else:
return numpy.PINF
def __feasible_point_check(self, x, result_fun):
result_x = numpy.zeros(len(self.__bounds), dtype=self.__numpy_dtype)
for j in self.__equality_bounds_indices:
result_x[j] = self.__bounds[j]
count = 0
for j in self.__non_equality_bounds_indices:
result_x[j] = x[count]
count += 1
# double checking the concentrations
con = numpy.asarray([self.__concentration_funs[i](*tuple(result_x)) for i in range(self.__N)],
dtype=self.__numpy_dtype)
con_temp = []
for i in range(self.__N):
con_temp.append(con[i] >= self.__full_concentration_bounds[i][0] and con[i] <= self.__full_concentration_bounds[i][1])
concs_chk = numpy.all(con_temp)
finite_chk = numpy.isfinite(con)
if concs_chk and numpy.all(finite_chk):
# putting the feasible points in x_candidates
if abs(result_fun) <= self.__sys_min_val and numpy.all(con > self.__numpy_dtype(0)):
return True
else:
return False
else:
return False
def __objective_function_to_optimize(self, x_initial):
for j in self.__equality_bounds_indices:
self.__x_full[j] = self.__bounds[j]
count = 0
for j in self.__non_equality_bounds_indices:
self.__x_full[j] = x_initial[count]
count += 1
test = []
for j in self.__non_equality_bounds_indices:
test.append(self.__x_full[j] >= self.__bounds[j][0] and self.__x_full[j] <= self.__bounds[j][1])
boundry_chk = numpy.all(test)
if boundry_chk:
# calculating the concentration values
for i in range(self.__N):
temp_val = self.__concentration_funs[i](*tuple(self.__x_full))
if numpy.iscomplex(temp_val):
self.__temp_c = numpy.array([numpy.Inf for i in range(self.__N)], dtype=self.__numpy_dtype)
break
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore", numpy.ComplexWarning)
self.__temp_c[i] = temp_val
finite_chk = numpy.isfinite(self.__temp_c)
con_temp = []
for i in range(self.__N):
con_temp.append(self.__temp_c[i] >= self.__full_concentration_bounds[i][0] and self.__temp_c[i] <= self.__full_concentration_bounds[i][1])
concs_chk = numpy.all(con_temp)
# making sure our concentrations are finite
if concs_chk and numpy.all(finite_chk):
temp = numpy.zeros(self.__N, dtype=self.__numpy_dtype)
for i in range(self.__N):
temp[i] = numpy.maximum(self.__numpy_dtype(0.0), - self.__temp_c[i])
sumval = numpy.sum(temp)
xx = numpy.concatenate((self.__x_full[0:self.__R], self.__temp_c), axis=None)
return self.__lambda_objective_fun(*tuple(xx)) + sumval
else:
return numpy.PINF
else:
return numpy.PINF
def __dependent_species_concentrations(self, cons_laws_sympy, dep_spec_conc):
# finding those dep_spec_conc that occur in each conservation law
dep_conc_in_laws = []
for i in range(len(cons_laws_sympy)):
temp = []
for j in range(len(dep_spec_conc)):
if cons_laws_sympy[i].count(dep_spec_conc[j]) > 0:
temp.append(dep_spec_conc[j])
dep_conc_in_laws.append(temp)
return dep_conc_in_laws
def __is_list_empty(self, inlist):
if isinstance(inlist, list): # Is a list
return all(map(self.__is_list_empty, inlist))
return False # Not a list
def __find_dep_concentration_replacements(self, dep_conc_in_laws, cons_laws_sympy, dep_spec_conc,
cons_laws_sympy_eq):
replacements = []
flag = True
while flag:
for i in range(len(cons_laws_sympy_eq)):
if len(dep_conc_in_laws[i]) == 1:
temp = sympy.solve(cons_laws_sympy_eq[i], dep_conc_in_laws[i])
cons_laws_sympy = [cons_laws_sympy[j].subs(dep_conc_in_laws[i][0], temp[0])
for j in range(len(cons_laws_sympy))]
cons_laws_sympy_eq = [sympy.Eq(sympy.Symbol('C' + str(i + 1), real=True), cons_laws_sympy[i])
for i in range(len(cons_laws_sympy))]
replacements.append([dep_conc_in_laws[i][0], '(' + str(temp[0]) + ')'])
dep_conc_in_laws = self.__dependent_species_concentrations(cons_laws_sympy, dep_spec_conc)
if self.__is_list_empty(dep_conc_in_laws):
flag = False
return replacements
def __building_ode_str(self, replacements, ind_spec_conc, indp_odes):
indp_odes_str = []
# making the replacements in the indep. ODEs
for i in range(len(indp_odes)):
for j in range(len(replacements)):
indp_odes[i] = indp_odes[i].subs(replacements[j][0], replacements[j][1])
indp_odes_str.append(str(indp_odes[i]))
self.__independent_odes = indp_odes
self.__independent_species = ind_spec_conc
# replacing all powers with ^ instead of **
for i in range(len(indp_odes_str)):
indp_odes_str[i] = indp_odes_str[i].replace('**', '^')
# building the string of ODEs in Antimony syntax
ode_str = ''
for i in range(len(ind_spec_conc)):
ode_str += 'J' + str(i) + ': -> ' + str(ind_spec_conc[i]) + '; ' + indp_odes_str[i] + ';'
return ode_str
def __building_ant_str(self, ode_str, kinetic_con, lhs_cons_laws, var_vals):
vars_to_initialize = kinetic_con + lhs_cons_laws + [str(self.__concentration_pars[i]) for i in range(self.__N)]
ant_str = ode_str
for i in range(len(vars_to_initialize)):
ant_str += str(vars_to_initialize[i]) + ' = ' + str(var_vals[i]) + ';'
return ant_str
def __initialize_variables_in_antimony_string(self, cons_laws_sympy, ode_str, cons_laws_lamb, concentration_vals,
kinetic_vals, kinetic_con):
# string representation of variables on lhs of mass cons laws
lhs_cons_laws = ['C' + str(i + 1) for i in range(len(cons_laws_sympy))]
conservation_law_vals = [cons_laws_lamb[i](*tuple(concentration_vals)) for i in range(len(cons_laws_lamb))]
var_vals = kinetic_vals + conservation_law_vals + concentration_vals
# The full Antimony string of system of ODEs
ant_str = self.__building_ant_str(ode_str, kinetic_con, lhs_cons_laws, var_vals)
return ant_str
def generate_report(self):
"""
Prints out helpful details constructed by :func:`crnt4sbml.MassConservationApproach.run_optimization` and
:func:`crnt4sbml.MassConservationApproach.run_continuity_analysis`.
Example
--------
See :ref:`quickstart-deficiency-label` and :ref:`my-deficiency-label`.
"""
if self.__comm is None:
print(self.__important_info)
else:
all_important_info = self.__comm.gather(self.__important_info, root=0)
self.__comm.Barrier()
if self.__my_rank == 0:
for i in range(1, len(all_important_info)):
if all_important_info[i] != "":
print(all_important_info[i])
print(self.__important_info)
def get_comm(self):
"""
Returns a mpi4py communicator if it has been initialized and None otherwise.
"""
return self.__comm
def get_my_rank(self):
"""
Returns the rank assigned by mpi4py if it is initialized, otherwise None will be returned.
"""
return self.__my_rank |
# Number of Distinct Islands
# Count the number of distinct islands. An island is considered to be the same as another
# if and only if one island can be translated (and not rotated or reflected) to equal the other.
# Example:
# 11011
# 10000
# 00001
# 11011
# => 3
# Approach:
# combine direction and recursion to form a string that present the path
class Solution(object):
def numDistinctIslands(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not any(grid):
return 0
m, n = len(grid), len(grid[0])
islands = set()
directions = { 'u': (-1, 0), 'l': (0, -1), 'd': (1, 0), 'r': (0, 1) }
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
path = []
self.dfs(i, j, grid, path, directions)
islands.add(''.join(path))
return len(islands)
def dfs(self, x, y, grid, path, directions):
m, n = len(grid), len(grid[0])
grid[x][y] = 0
for d in directions:
dx, dy = directions[d]
i, j = x + dx, y + dy
if 0 <= i < m and 0 <= j < n and grid[i][j]:
path.append(d)
self.dfs(i, j, grid, path, directions)
path.append('b') # end of current recursion
|
from typing import List
class Solution:
def findMaxLength(self, nums: List[int]) -> int:
sum_ = 0
max_len = 0
sum_to_index = {0: -1}
for i in range(len(nums)):
sum_ += 1 if nums[i] else -1
if sum_to_index.get(sum_) is not None:
max_len = max(max_len, i - sum_to_index.get(sum_))
else:
sum_to_index[sum_] = i
return max_len
s = Solution()
nums = [0, 0, 1]
print(s.findMaxLength(nums))
|
from __future__ import print_function, division
def pprint_nodes(subtrees):
"""
Prettyprints systems of nodes.
Examples
========
>>> from sympy.printing.tree import pprint_nodes
>>> print(pprint_nodes(["a", "b1\\nb2", "c"]))
+-a
+-b1
| b2
+-c
"""
def indent(s, type=1):
x = s.split("\n")
r = "+-%s\n" % x[0]
for a in x[1:]:
if a == "":
continue
if type == 1:
r += "| %s\n" % a
else:
r += " %s\n" % a
return r
if len(subtrees) == 0:
return ""
f = ""
for a in subtrees[:-1]:
f += indent(a)
f += indent(subtrees[-1], 2)
return f
def print_node(node):
"""
Returns an information about the "node".
This includes class name, string representation and assumptions.
"""
s = "%s: %s\n" % (node.__class__.__name__, str(node))
if len(node._assumptions) > 0:
for a in node._assumptions:
s += "%s: %s\n" % (a, node._assumptions[a])
return s
def tree(node):
"""
Returns a tree representation of "node" as a string.
It uses print_node() together with pprint_nodes() on node.args recursively.
See also: print_tree()
"""
subtrees = []
for arg in node.args:
subtrees.append(tree(arg))
s = print_node(node) + pprint_nodes(subtrees)
return s
def print_tree(node):
"""
Prints a tree representation of "node".
Examples
========
>>> from sympy.printing import print_tree
>>> from sympy.abc import x
>>> print_tree(x**2) # doctest: +SKIP
Pow: x**2
+-Symbol: x
| comparable: False
+-Integer: 2
real: True
nonzero: True
comparable: True
commutative: True
infinitesimal: False
unbounded: False
noninteger: False
zero: False
complex: True
bounded: True
rational: True
integer: True
imaginary: False
finite: True
irrational: False
<BLANKLINE>
See also: tree()
"""
print(tree(node))
|
# Copyright (c) 2017 Christoph Landgraf. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from .base import \
with_session, with_optional_session, \
BreakpointBuffer, py_display_all_breakpoints, py_display_session_breakpoints, \
SessionBuffer
from .threads import \
ThreadBuffer, CodeBuffer, FrameBuffer, EvalBuffer
|
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wa import Workload, Parameter, TargetError, WorkloadError, Executable, Alias
from wa.utils.exec_control import once
BLOCK_SIZES = [16, 64, 256, 1024, 8192, 16384]
ECD = ['secp160r1', 'nistp192', 'nistp224', 'nistp256', 'nistp384', 'nistp521',
'nistk163', 'nistk233', 'nistk283', 'nistk409', 'nistk571', 'nistb163',
'nistb233', 'nistb283', 'nistb409', 'nistb571', 'curve25519']
CIPHER_PKI = ['rsa', 'dsa', 'ecdh', 'ecdsa']
EVP_NEW = ['aes-128-cbc', 'aes-192-cbc', 'aes-256-cbc', 'aes-128-gcm', 'aes-192-gcm',
'aes-256-gcm', 'sha1', 'sha256', 'sha384', 'sha512']
class Openssl(Workload):
name = 'openssl'
description = '''
Benchmark Openssl algorithms using Openssl's speed command.
The command tests how long it takes to perfrom typical SSL operations using
a range of supported algorithms and ciphers.
By defalt, this workload will use openssl installed on the target, however
it is possible to provide an alternative binary as a workload resource.
'''
parameters = [
Parameter('algorithm', default='aes-256-cbc',
allowed_values=EVP_NEW + CIPHER_PKI,
description='''
Algorithm to benchmark.
'''),
Parameter('threads', kind=int, default=1,
description='''
The number of threads to use
'''),
Parameter('use_system_binary', kind=bool, default=True,
description='''
If ``True``, the system Openssl binary will be used.
Otherwise, use the binary provided in the workload
resources.
'''),
]
aliases = [Alias('ossl-' + algo, algorithm=algo)
for algo in EVP_NEW + CIPHER_PKI]
@once
def initialize(self, context):
if self.use_system_binary:
try:
cmd = '{0} md5sum < $({0} which openssl)'
output = self.target.execute(cmd.format(self.target.busybox))
md5hash = output.split()[0]
version = self.target.execute('openssl version').strip()
context.update_metadata('hashes', 'openssl', md5hash)
context.update_metadata('versions', 'openssl', version)
except TargetError:
msg = 'Openssl does not appear to be installed on target.'
raise WorkloadError(msg)
Openssl.target_exe = 'openssl'
else:
resource = Executable(self, self.target.abi, 'openssl')
host_exe = context.get_resource(resource)
Openssl.target_exe = self.target.install(host_exe)
def setup(self, context):
self.output = None
if self.algorithm in EVP_NEW:
cmd_template = '{} speed -mr -multi {} -evp {}'
else:
cmd_template = '{} speed -mr -multi {} {}'
self.command = cmd_template.format(self.target_exe, self.threads, self.algorithm)
def run(self, context):
self.output = self.target.execute(self.command)
def extract_results(self, context):
if not self.output:
return
outfile = os.path.join(context.output_directory, 'openssl.output')
with open(outfile, 'w') as wfh:
wfh.write(self.output)
context.add_artifact('openssl-output', outfile, 'raw', 'openssl\'s stdout')
def update_output(self, context):
if not self.output:
return
for line in self.output.split('\n'):
line = line.strip()
if not line.startswith('+F'):
continue
parts = line.split(':')
if parts[0] == '+F': # evp ciphers
for bs, value in zip(BLOCK_SIZES, list(map(float, parts[3:]))):
value = value / 2**20 # to MB
context.add_metric('score', value, 'MB/s',
classifiers={'block_size': bs})
elif parts[0] in ['+F2', '+F3']: # rsa, dsa
key_len = int(parts[2])
sign = float(parts[3])
verify = float(parts[4])
context.add_metric('sign', sign, 'seconds',
classifiers={'key_length': key_len})
context.add_metric('verify', verify, 'seconds',
classifiers={'key_length': key_len})
elif parts[0] == '+F4': # ecdsa
ec_idx = int(parts[1])
key_len = int(parts[2])
sign = float(parts[3])
verify = float(parts[4])
context.add_metric('sign', sign, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
context.add_metric('verify', verify, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
elif parts[0] == '+F5': # ecdh
ec_idx = int(parts[1])
key_len = int(parts[2])
op_time = float(parts[3])
ops_per_sec = float(parts[4])
context.add_metric('op', op_time, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
context.add_metric('ops_per_sec', ops_per_sec, 'Hz',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
else:
self.logger.warning('Unexpected result: "{}"'.format(line))
@once
def finalize(self, context):
if not self.use_system_binary:
self.target.uninstall('openssl')
|
import requests
import json
# disable warnings from SSL/TLS certificates
requests.packages.urllib3.disable_warnings()
# All of our REST calls will use the url for the APIC EM Controller as the base URL
# So lets define a variable for the controller IP or DNS so we don't have to keep typing it
controller_url = "https://sandboxapicdc.cisco.com"
################## Get Hosts ##########################################################
# This function allows you to view a list of all the hosts in the network.
login_url = controller_url + '/api/aaaLogin.json'
payload = {
"aaaUser":{
"attributes":{
"name":"admin",
"pwd":"ciscopsdt"
}
}
}
# To create a policy, we need to use the POST method.
# When using POST, you need to specify the Content-Type header as application/json
headers = {'content-type': 'application/json'}
# Use requests.post to do a POST to the policy API
# Specify request body json data and headers
policy_create_response = requests.post(login_url, data=json.dumps(payload), headers=headers, verify=False)
print ("\nResult of Create" + policy_create_response.text + '\n')
response = policy_create_response.json()
print(json.dumps(response, indent=4, separators=(',',': ')))
sessionToken = response["imdata"][0]["aaaLogin"]["attributes"]["token"]
tenant_url = controller_url + "/api/node/class/fvTenant.json"
sessionToken = 'APIC-cookie={}'.format(sessionToken)
#headers = {'content-type': 'application/json', 'Authorization': 'APIC-Cookie %s' % sessionToken}
#headers = {'content-type': 'application/json', 'access_Token' : sessionToken}
#headers = {'content-type': 'application/json', 'x-api-key' : sessionToken}
headers = {'content-type': 'application/json', 'Cookie' : sessionToken}
params = {'subscription' : 'yes'}
#get_req_response = requests.get(tenant_url, verify= False, headers=headers, params=params)
get_req_response = requests.get(tenant_url, verify= False, headers=headers)
response = get_req_response.json()
print(json.dumps(response, indent=4, separators=(',',': ')))
|
"""Handler file for all routes pertaining to about_us_page_settings"""
from database.models import AboutUsPageSettings
from api.handlers.page_settings import PageSettingsHandler
class AboutUsPageSettingsHandler(PageSettingsHandler):
def __init__(self):
super().__init__('about_us', AboutUsPageSettings)
|
# Copyright (c) 2012-2013, Razvan Pascanu
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
class MemTask(object):
def __init__(self,
rng,
floatX,
n_values = 5,
n_pos = 10,
generate_all = False):
self.rng = rng
self.floatX = floatX
self.dim = n_values**n_pos
self.n_values = n_values
self.n_pos = n_pos
self.generate_all = generate_all
if generate_all:
self.data = numpy.zeros((n_pos, self.dim, n_values+2))
for val in xrange(self.dim):
tmp_val = val
for k in xrange(n_pos):
self.data[k, val, tmp_val % n_values] = 1.
tmp_val = tmp_val // n_values
self.nin = self.n_values + 2
self.nout = n_values + 1
self.classifType = 'softmax'
self.report = 'all'
def generate(self, batchsize, length):
if self.generate_all:
batchsize = self.dim
input_data = numpy.zeros((length + 2*self.n_pos,
batchsize,
self.n_values + 2),
dtype=self.floatX)
targ_data = numpy.zeros((length + 2*self.n_pos,
batchsize,
self.n_values+1),
dtype=self.floatX)
targ_data[:-self.n_pos,:, -1] = 1
input_data[self.n_pos:,:, -2] = 1
input_data[length + self.n_pos, :, -2] = 0
input_data[length + self.n_pos, :, -1] = 1
if not self.generate_all:
self.data = numpy.zeros((self.n_pos, batchsize, self.n_values+2))
for val in xrange(batchsize):
tmp_val = self.rng.randint(self.dim)
for k in xrange(self.n_pos):
self.data[k, val, tmp_val % self.n_values] = 1.
tmp_val = tmp_val // self.n_values
input_data[:self.n_pos, :, :] = self.data
targ_data[-self.n_pos:, :, :] = self.data[:,:,:-1]
return input_data, targ_data.reshape(((length +
2*self.n_pos)*batchsize, -1))
if __name__ == '__main__':
print 'Testing memorization task generator ..'
task = MemTask(numpy.random.RandomState(123),
'float32')
seq, targ = task.generate(3, 25)
assert seq.dtype == 'float32'
assert targ.dtype == 'float32'
print 'Seq_0'
print seq[:,0,:].argmax(axis=1)
print 'Targ0'
print targ.reshape((25+2*10, 3, -1))[:,0,:].argmax(1)
print
print 'Seq_1'
print seq[:,1,:].argmax(axis=1)
print 'Targ1'
print targ.reshape((25+2*10, 3, -1))[:,1,:].argmax(1)
print
print 'Seq_2'
print seq[:,2,:].argmax(axis=1)
print 'Targ2'
print targ.reshape((25+2*10, 3, -1))[:,2,:].argmax(1)
|
from tensorflow.keras.layers import Layer
class Self_Attention(Layer):
"""
self attention layer
"""
def __init__(self):
self.output_shape
def build(self, input_shape):
super().build(input_shape)
def call(self, x):
# x: [batch_size, sequence_length, embedding_size]
pass
def compute_output_shape(self):
return self.output_shape
|
from . api import ScheduleConnection, ScheduleManipulation, ProviderConnection, \
ProviderReport, LocationConnection, ScheduleWithData, ProviderLocations |
# Copyright (C) 2013 Joseph W. Kaus and Matthew C. Zwier and Lillian T. Chong
#
# This file is part of WESTPA.
#
# WESTPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WESTPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WESTPA. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
import argparse
import logging
log = logging.getLogger('w_truncate')
import westpa
warning_string = '''\
NOTE: w_truncate only deletes iteration groups from the HDF5 data store.
It is recommended that any iteration data saved to the file system (e.g. in the
traj_segs directory) is deleted or moved for the corresponding iterations.
'''
parser = argparse.ArgumentParser('w_truncate', description='''\
Remove all iterations after a certain point in a WESTPA simulation.
''',
epilog=warning_string)
westpa.rc.add_args(parser)
parser.add_argument('-n', '--iter', dest='n_iter', type=int,
help='Truncate this iteration and those following.')
args = parser.parse_args()
westpa.rc.process_args(args, config_required=False)
dm = westpa.rc.get_data_manager()
dm.open_backing()
max_iter = dm.current_iteration
n_iter = args.n_iter if args.n_iter > 0 else dm.current_iteration
for i in xrange(n_iter, dm.current_iteration+1):
dm.del_iter_group(i)
dm.del_iter_summary(n_iter)
dm.current_iteration = n_iter - 1
print('simulation data truncated after iteration {}'.format(dm.current_iteration))
print('\n' + warning_string)
dm.flush_backing()
dm.close_backing()
|
from flask_wtf import FlaskForm
from wtforms.validators import Required
from wtforms import StringField,TextAreaField,SubmitField, SelectField, RadioField
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('SUBMIT')
class BlogForm(FlaskForm):
content = TextAreaField('YOUR BLOG')
submit = SubmitField('Create Blog')
class CommentsForm(FlaskForm):
comment = TextAreaField('Comment on this blog:', validators=[Required()])
submit = SubmitField('SUBMIT')
class SubscribeForm(FlaskForm):
email = StringField("Enter email", validators = [Required()])
submit = SubmitField("Subscribe")
|
#!/usr/bin/env python
#
#
import sys, re, json;
from pprint import pprint;
the_raw_input = sys.argv[1].strip();
the_raw_key = sys.argv[2].strip();
the_json = json.loads(the_raw_input);
def to_int(raw):
if re.match('^[0-9]{1,3}$', raw):
return int(raw)
else:
raise Exception("Invalid value for int: " + repr(raw))
def get(val, raw_key):
the_type = type(val).__name__;
if the_type == "list":
return get_index(val, raw_key);
elif the_type == "dict":
return get_key(val, raw_key);
else:
raise Exception("Invalid value: " + repr(val) + " (key: " + repr(raw_key) + ")")
def get_index(val, raw_key):
return val[to_int(raw_key)];
def get_key(d, raw_key):
keys = to_keys(raw_key)
if len(keys) == 1:
return d[keys[0]]
else:
return get_index(d[keys[0]], keys[1])
def to_keys(raw_key):
m = re.search('^(?P<k>[^\[]+)(\[(?P<i>[0-9]+)\])?$', raw_key)
if m.group("i"):
return [m.group("k"), m.group("i")]
else:
return [m.group("k")]
current = the_json
for m in re.finditer('[^\.]+', the_raw_key):
current = get(current, m.group(0));
pprint(current)
|
import math
import random
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from lib.utils import *
class ReplayBuffer(Dataset):
def __init__(self, env, maximum, preprocess_dict, gamma = 0.99):
#only note the game environment
#not the enviroment object
self.env = env
self.maximum = maximum
self.preprocess_dict = preprocess_dict
self.length = 0
self.gamma = gamma
self.eps = 10e-7
#one elemnet in datalist is a training pair with three elements: observation, reward, action
#the pair relationship -> model(observation) ==> action ==> reward
self.data = []
self.rewards = []
self.__insert_lock = []
def reset_maximum(self, new):
self.maximum = new
return None
def new_episode(self):
if len(self.rewards) > self.maximum:
self.data = self.data[1: ]
self.rewards = self.rewards[1: ]
self.__insert_lock = self.__insert_lock[1: ]
self.data.append([])
self.rewards.append([])
self.__insert_lock.append(False)
return None
def insert(self, observation, action):
if self.__insert_lock[-1] != True:
#not lock can append
self.data[-1].append([observation.squeeze(), action])
else:
raise RuntimeError('Please use new_episode() before insert new episode information.')
return None
def insert_reward(self, reward, times, done):
if self.__insert_lock[-1] != True:
for i in range(times):
if self.preprocess_dict['time_decay']:
decay_reward = reward * math.pow((self.gamma), (times - 1 - i))
self.rewards[-1].append(decay_reward)
else:
self.rewards[-1].append(reward)
else:
raise RuntimeError('Please use new_episode() before insert new episode information.')
if done:
self.__insert_lock[-1] = True
return None
def trainable(self):
#check the buffer is ready for training
return True if len(self.rewards) >= self.maximum else False
def make(self, episode_size):
self.observation = None
self.action = None
self.reward = None
for i in range(episode_size):
select = random.randint(0, self.maximum - 1)
dataset = EpisodeSet(self.data[select], self.rewards[select])
dataloader = DataLoader(dataset, batch_size = len(self.data[select]), shuffle = False)
for iter, (obs, act, rew) in enumerate(dataloader):
if self.observation is None:
self.observation = obs.squeeze()
else:
self.observation = torch.cat((self.observation, obs.squeeze()), dim = 0)
if self.action is None:
self.action = act.squeeze()
else:
self.action = torch.cat((self.action, act.squeeze()), dim = 0)
if self.reward is None:
self.reward = rew
else:
self.reward = torch.cat((self.reward, rew), dim = 0)
if self.preprocess_dict['normalized']:
mean = torch.mean(self.reward, dim = 0)
std = torch.std(self.reward, dim = 0)
self.reward = (self.reward - mean) / (std + self.eps)
self.length = self.reward.size(0)
return None
def __getitem__(self, index):
return self.observation[index].detach(), self.action[index].detach(), self.reward[index].detach()
def __len__(self):
return self.length
class EpisodeSet(Dataset):
def __init__(self, data, rewards):
self.data = data
self.rewards = rewards
def __getitem__(self, index):
#return observation, action
reward = torch.tensor(self.rewards[index]).float()
return self.data[index][0].float(), self.data[index][1].float(), reward
def __len__(self):
return len(self.data)
|
#!/usr/bin/python
# Program for simulating full model with coupling
import sys
import numpy as np
nogui = '-nogui' in sys.argv
if not nogui:
import matplotlib.pyplot as plt
from decimal import *
sys.path += '.'
from x_inf_decimal import *
from input_vars_decimal import * # Global variables from input data file.
# Simulation Parameters
deltat = Decimal('0.01e-3')
duration = Decimal('0.03') #********************* Duration Duration ***************
numpoints = int(round(duration/deltat))
numtests = 11
xaxis = [round(x * Decimal('1e3'), 2) for x in np.arange(deltat, duration+deltat, deltat)]
# Input parameters
onset = int(round(Decimal('0.002')/deltat))
offset = int(round(Decimal('0.022')/deltat))
# Variable Declaration
V = list()
I_j = list()
I_mem = list()
Ca = list()
n = list()
p = list()
q = list()
e = list()
f = list()
h = list()
for i in range(0,numtests):
V.append(list())
I_mem.append(list())
Ca.append(list())
n.append(Decimal('0'))
p.append(Decimal('0'))
q.append(Decimal('0'))
e.append(Decimal('0'))
f.append(Decimal('0'))
h.append(Decimal('0'))
I_j.append(Decimal('0'))
for j in range(0,numpoints):
V[i].append(Decimal('0'))
I_mem[i].append(Decimal('0'))
Ca[i].append(Decimal('0'))
I_j.append(Decimal('0'))
# Input initialization
for i in range(0,numtests):
for j in range(0,numpoints):
V[i][j] = Decimal('-70e-3')
Vstim = Decimal('40e-3')
for i in range(0,numtests):
for j in range(onset-1,offset):
V[i][j] = Vstim
Vstim = Vstim - Decimal('10e-3')
# Variable initialization
for j in range(0,numtests):
Ca[j][0] = Decimal('0')
n[j] = x_inf(V[j][0], Vhalf_n, k_n)
p[j] = x_inf(V[j][0], Vhalf_p, k_p)
q[j] = x_inf(V[j][0], Vhalf_q, k_q)
e[j] = x_inf(V[j][0], Vhalf_e, k_e)
f[j] = x_inf(V[j][0], Vhalf_f, k_f)
# Start of simulation
for j in range(0,numtests):
for i in range(1,numpoints):
dn = (x_inf(V[j][i-1], Vhalf_n, k_n) - n[j])/T_n
n[j] = n[j] + dn*deltat
dp = (x_inf(V[j][i-1], Vhalf_p, k_p) - p[j])/T_p
p[j] = p[j] + dp*deltat
dq = (x_inf(V[j][i-1], Vhalf_q, k_q) - q[j])/T_q
q[j] = q[j] + dq*deltat
de = (x_inf(V[j][i-1], Vhalf_e, k_e) - e[j])/T_e
e[j] = e[j] + de*deltat
df = (x_inf(V[j][i-1], Vhalf_f, k_f) - f[j])/T_f
f[j] = f[j] + df*deltat
h[j] = x_inf(Ca[j][i-1], Cahalf_h, k_h)
# H_rec[i] = h[j]
IKS = gKS * n[j] * (V[j][i-1] - VKS)
IKF = gKF * p[j]**4 * q[j] * (V[j][i-1] - VKF)
ICa = gCa * e[j]**2 * f[j] * (1 + (h[j] - 1) * alphaCa) * (V[j][i-1] - VCa)
IL = gL * (V[j][i-1] - VL)
dCa = -(Ca[j][i-1]/T_Ca + thiCa*ICa)
Ca[j][i] = Ca[j][i-1] + dCa*deltat
I_mem[j][i] = (IKS + IKF + ICa)
if not nogui: plt.plot(xaxis, [x * Decimal('1e9') for x in I_mem[j]])
print('Finished simulation of %s seconds'%duration)
if not nogui:
plt.ylabel('Imem (nA)')
plt.xlabel('Time (ms)')
plt.show()
|
# ===================================================================================== #
# Test suite for enumerate.py
# Author : Edward Lee, [email protected]
#
# MIT License
#
# Copyright (c) 2019 Edward D. Lee, Bryan C. Daniels
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================================== #
import numpy as np
import mpmath as mp
from .utils import pair_corr, bin_states
from .enumerate import fast_logsumexp, mp_fast_logsumexp
np.random.seed(0)
def test_basic():
hJ = np.random.normal(size=6,scale=.2)
# make sure probability distribution is normalized, p and correlations agree for both symmetrized and
# unsymmetrized bases
# n=3
from .ising_eqn import ising_eqn_3_sym as ising
p = ising.p(hJ)
assert np.isclose(p.sum(), 1)
assert ((ising.calc_observables(hJ)<=1)&(ising.calc_observables(hJ)>=-1)).all()
assert np.isclose(ising.calc_observables(hJ),
pair_corr(bin_states(3,True), weights=ising.p(hJ), concat=True)).all()
from .ising_eqn import ising_eqn_3 as ising
p = ising.p(hJ)
assert np.isclose(p.sum(), 1)
assert ((ising.calc_observables(hJ)<=1)&(ising.calc_observables(hJ)>=0)).all()
assert np.isclose(ising.calc_observables(hJ),
pair_corr(bin_states(3), weights=ising.p(hJ), concat=True)).all()
# n=4
hJ = np.random.normal(size=10, scale=.2)
from .ising_eqn import ising_eqn_4_sym as ising
p = ising.p(hJ)
assert np.isclose(p.sum(), 1)
assert ((ising.calc_observables(hJ)<=1)&(ising.calc_observables(hJ)>=-1)).all()
assert np.isclose(ising.calc_observables(hJ),
pair_corr(bin_states(4,True), weights=ising.p(hJ), concat=True)).all()
from .ising_eqn import ising_eqn_4 as ising
p = ising.p(hJ)
assert np.isclose(p.sum(), 1)
assert ((ising.calc_observables(hJ)<=1)&(ising.calc_observables(hJ)>=0)).all()
assert np.isclose(ising.calc_observables(hJ),
pair_corr(bin_states(4), weights=ising.p(hJ), concat=True)).all()
# n=4, high precision
hJ = np.array(list(map(mp.mpf, np.random.normal(size=10, scale=.2))))
from .ising_eqn import ising_eqn_4_sym_hp as ising
p = ising.p(hJ)
assert np.isclose(float(p.sum()), 1)
assert ((ising.calc_observables(hJ)<=1)&(ising.calc_observables(hJ)>=-1)).all()
assert np.isclose(ising.calc_observables(hJ).astype(float),
pair_corr(bin_states(4,sym=True), weights=ising.p(hJ).astype(float), concat=True)).all()
def test_fast_logsumexp():
from scipy.special import logsumexp
X = np.random.normal(size=10, scale=10, loc=1000)
coeffs = np.random.choice([-1,1], size=X.size)
npval = logsumexp(X, b=coeffs, return_sign=True)
assert np.array_equal(fast_logsumexp(X, coeffs), npval)
X = np.array(list(map(mp.mpf, X)))
assert abs(float(mp_fast_logsumexp(X, coeffs)[0])-npval[0])<1e-16
|
# -----------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2020 Jason McKinney
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# --------------------------------------------------------------------------------
# vecElo.py
#
# Created: 2019-02-22
# --------------------------------------------------------------------------------
# The author can be contacted via email at jason_at_jmmckinney_dot_net
# or on the VEX Forum as jmmckinney.
# --------------------------------------------------------------------------------
import requests
import pandas as pd
import numpy as np
import pickle
import os.path
import math
import numbers
import signal
from threading import Event
k_factor = 64
nToEstablish = 16
pd.options.mode.chained_assignment = None
exit_event = Event()
class Metadata:
def __init__(self, scored_matches=0):
self.scored_matches = scored_matches
def get_teams_total(program='VRC'):
return requests.get(
'https://api.vexdb.io/v1/get_teams',
params={
'nodata': 'true',
'program': program
}
).json()['size']
def get_all_teams(program='VRC'):
teams_total = get_teams_total(program)
res = requests.get(
'https://api.vexdb.io/v1/get_teams',
params={
'program': program
}
).json()
result_teams = res['result']
while len(result_teams) < teams_total:
res = requests.get(
'https://api.vexdb.io/v1/get_teams',
params={
'program': program,
'limit_start': len(result_teams)
}
).json()
result_teams.extend(res['result'])
return pd.DataFrame(result_teams).set_index('number')
def get_matches_total(season='current'):
return requests.get(
'https://api.vexdb.io/v1/get_matches',
params={'nodata': 'true', 'season': season}
).json()['size']
def get_matches_scored(season='current'):
return requests.get(
'https://api.vexdb.io/v1/get_matches',
params={'nodata': 'true', 'season': season, 'scored': '1'}
).json()['size']
def get_all_matches(season='current'):
matches_total = get_matches_total(season)
res = requests.get(
'https://api.vexdb.io/v1/get_matches',
params={
'season': season
}
).json()
matches = res['result']
while len(matches) < matches_total:
res = requests.get(
'https://api.vexdb.io/v1/get_matches',
params={
'limit_start': len(matches),
'season': season
}
).json()
matches.extend(res['result'])
raw_dataframe = pd.DataFrame(matches)
raw_dataframe = raw_dataframe[raw_dataframe.blue2 != '']
raw_dataframe = raw_dataframe[raw_dataframe.scored == 1]
skus = raw_dataframe['sku'].drop_duplicates()[::-1]
dataframe = pd.DataFrame(columns=[
'blue1',
'blue2',
'blue3',
'bluescore',
'bluesit',
'division',
'field',
'instance',
'matchnum',
'red1',
'red2',
'red3',
'redscore',
'redsit',
'round',
'scheduled',
'scored',
'sku'
])
for sku in skus.values:
dataframe = dataframe.append(raw_dataframe[raw_dataframe['sku'] == sku], sort=False)
return dataframe
def elo_rankings_from_matches(team_list, matches, rankings=None):
def add_team(the_list, team):
try:
team_from_list = team_list.loc[team]
country = team_from_list['country']
region = team_from_list['region']
grade = team_from_list['grade']
except KeyError:
region = ''
grade = ''
country = ''
mean_elo = the_list[the_list[:, 5] == False][:, 2].mean()
return np.insert(the_list, 0, [team, 0, mean_elo, 0, 0, True, 0.0, region, country, grade], axis=0)
def award_match(match, ranks):
# 0, 1, 2, 3, 4, 5
# blue1, blue2, bluescore, red1, red2, redscore
if match[0] not in ranks[:, 0]:
ranks = add_team(ranks, match[0])
if match[1] not in ranks[:, 0]:
ranks = add_team(ranks, match[1])
if match[3] not in ranks[:, 0]:
ranks = add_team(ranks, match[3])
if match[4] not in ranks[:, 0]:
ranks = add_team(ranks, match[4])
blue1 = np.where(ranks[:, 0] == match[0])[0][0]
blue2 = np.where(ranks[:, 0] == match[1])[0][0]
red1 = np.where(ranks[:, 0] == match[3])[0][0]
red2 = np.where(ranks[:, 0] == match[4])[0][0]
blue_r1 = ranks[blue1, 2]
blue_r2 = ranks[blue2, 2]
red_r1 = ranks[red1, 2]
red_r2 = ranks[red2, 2]
blue_rating = (blue_r1 + blue_r2) / 2.0
red_rating = (red_r1 + red_r2) / 2.0
expected_blue = 1.0 / (1.0 + pow(10.0, ((red_rating - blue_rating) / 400.0)))
expected_red = 1.0 - expected_blue
if match[2] > match[5]:
actual_blue = 1.0
ranks[blue1, 4] += 1
ranks[blue2, 4] += 1
elif match[2] < match[5]:
actual_blue = 0.0
ranks[red1, 4] += 1
ranks[red2, 4] += 1
else:
actual_blue = 0.5
actual_red = 1.0 - actual_blue
delta_blue = k_factor * (actual_blue - expected_blue)
delta_red = k_factor * (actual_red - expected_red)
blue1_contrib = blue_r1 / (blue_rating * 2)
blue2_contrib = 1.0 - blue1_contrib
red1_contrib = red_r1 / (red_rating * 2)
red2_contrib = 1.0 - red1_contrib
if ranks[blue1, 5]:
modifier = 0
if actual_blue == 1.0:
modifier = 400 - int(ranks[red1, 5]) * 100 - int(ranks[red2, 5]) * 100
elif actual_blue == 0.0:
modifier = -400 + int(ranks[red1, 5]) * 100 + int(ranks[red2, 5]) * 100
ranks[blue1, 6] += red_rating + modifier
else:
ranks[blue1, 2] = max(100.0, ranks[blue1, 2] + delta_blue * blue1_contrib)
if ranks[blue2, 5]:
modifier = 0
if actual_blue == 1.0:
modifier = 400 - int(ranks[red1, 5]) * 100 - int(ranks[red2, 5]) * 100
elif actual_blue == 0.0:
modifier = -400 + int(ranks[red1, 5]) * 100 + int(ranks[red2, 5]) * 100
ranks[blue2, 6] += red_rating + modifier
else:
ranks[blue2, 2] = max(100.0, ranks[blue2, 2] + delta_blue * blue2_contrib)
if ranks[red1, 5]:
modifier = 0
if actual_red == 1.0:
modifier = 400 - int(ranks[blue1, 5]) * 100 - int(ranks[blue2, 5]) * 100
elif actual_red == 0.0:
modifier = -400 + int(ranks[blue2, 5]) * 100 + int(ranks[blue2, 5]) * 100
ranks[red1, 6] += blue_rating + modifier
else:
ranks[red1, 2] = max(100.0, ranks[red1, 2] + delta_red * red1_contrib)
if ranks[red2, 5]:
modifier = 0
if actual_red == 1.0:
modifier = 400 - int(ranks[blue1, 5]) * 100 - int(ranks[blue2, 5]) * 100
elif actual_red == 0.0:
modifier = -400 + int(ranks[blue2, 5]) * 100 + int(ranks[blue2, 5]) * 100
ranks[red2, 6] += blue_rating + modifier
else:
ranks[red2, 2] = max(100.0, ranks[red2, 2] + delta_red * red2_contrib)
ranks[blue1, 3] += 1
ranks[blue2, 3] += 1
ranks[red1, 3] += 1
ranks[red2, 3] += 1
if ranks[blue1, 5]:
ranks[blue1, 2] = max(100.0, ranks[blue1, 6] / ranks[blue1, 3])
if ranks[blue1, 3] >= nToEstablish:
ranks[blue1, 5] = False
if ranks[blue2, 5]:
ranks[blue2, 2] = max(100.0, ranks[blue2, 6] / ranks[blue2, 3])
if ranks[blue2, 3] >= nToEstablish:
ranks[blue2, 5] = False
if ranks[red1, 5]:
ranks[red1, 2] = max(100.0, ranks[red1, 6] / ranks[red1, 3])
if ranks[red1, 3] >= nToEstablish:
ranks[red1, 5] = False
if ranks[red2, 5]:
ranks[red2, 2] = max(100.0, ranks[red2, 6] / ranks[red2, 3])
if ranks[red2, 3] >= nToEstablish:
ranks[red2, 5] = False
return ranks
if rankings is None:
rankings = pd.DataFrame(
data={
'global rank': [0],
'team': ['0000'],
'elo': [800.0],
'played': [1],
'won': [1],
'provisional': [False],
'provision': [800.0],
'region': [''],
'country': [''],
'grade': ['']
},
columns=[
'global rank',
'team',
'elo',
'played',
'won',
'provisional',
'provision',
'region',
'country',
'grade'
]
).set_index('team')
np_rankings = rankings.reset_index().to_numpy()
else:
np_rankings = rankings.reset_index().to_numpy()
np_rankings[:, 0:10] = np_rankings[:, [1, 0, 2, 3, 4, 8, 9, 5, 6, 7]]
matches = matches.filter(
items=[
'blue1',
'blue2',
'bluescore',
'red1',
'red2',
'redscore'
]
)
for row in matches.values:
np_rankings = award_match(row, np_rankings)
rankings = pd.DataFrame(
np_rankings,
columns=[
'team', 'global rank', 'elo', 'played', 'won',
'provisional', 'provision', 'region', 'country', 'grade'
]
).set_index('team')
if '0000' in rankings.index:
rankings.drop('0000', inplace=True)
rankings = rankings.reset_index().set_index('global rank')
rankings.sort_values(by=['elo'], ascending=False, inplace=True)
rankings.index = range(1, len(rankings) + 1)
rankings = rankings.reindex(
columns=['team', 'elo', 'played', 'won', 'region', 'country', 'grade', 'provisional', 'provision']
)
return rankings
def update_rankings(selected_season='current'):
os.makedirs("data/" + selected_season, exist_ok=True)
if os.path.exists("data/" + selected_season + "/metadata.pickle"):
with open("data/" + selected_season + "/metadata.pickle", "rb") as file:
metadata = pickle.load(file)
if metadata.scored_matches == get_matches_scored(selected_season):
return None
print("new data uploaded, updating ratings...")
if os.path.exists("data/" + selected_season + "/teams.pickle"):
num_teams = get_teams_total()
with open("data/" + selected_season + "/teams.pickle", "rb") as file:
teams = pickle.load(file)
if teams.shape[0] < num_teams:
teams = get_all_teams()
with open("data/" + selected_season + "/teams.pickle", "wb") as file:
pickle.dump(teams, file)
else:
teams = get_all_teams()
with open("data/" + selected_season + "/teams.pickle", "wb") as file:
pickle.dump(teams, file)
if os.path.exists("data/" + selected_season + "/match_list.pickle"):
match_list = get_all_matches(selected_season)
metadata = Metadata(get_matches_scored(selected_season))
with open("data/" + selected_season + "/metadata.pickle", "wb") as file:
pickle.dump(metadata, file)
with open("data/" + selected_season + "/match_list.pickle", "rb") as file:
match_list_old = pickle.load(file)
with open("data/" + selected_season + "/match_list.pickle", "wb") as file:
pickle.dump(match_list, file)
match_list = match_list_old.merge(match_list, indicator=True, how='outer')
match_list = match_list[match_list['_merge'] == 'right_only']
else:
match_list = get_all_matches(selected_season)
metadata = Metadata(get_matches_scored(selected_season))
with open("data/" + selected_season + "/metadata.pickle", "wb") as file:
pickle.dump(metadata, file)
with open("data/" + selected_season + "/match_list.pickle", "wb") as file:
pickle.dump(match_list, file)
if os.path.exists("data/" + selected_season + "/elo_db.pickle"):
with open("data/" + selected_season + "/elo_db.pickle", "rb") as file:
elo_db = pickle.load(file)
elo_db = elo_rankings_from_matches(teams, match_list, elo_db)
else:
elo_db = elo_rankings_from_matches(teams, match_list)
with open("data/" + selected_season + "/elo_db.pickle", "wb") as file:
pickle.dump(elo_db, file)
elo_db.to_csv("data/" + selected_season + "/elo_db.csv")
return elo_db
def set_exit_signal(signo, _frame):
global exit_event
exit_event.set()
if __name__ == '__main__':
signal.signal(signal.SIGINT, set_exit_signal)
while not exit_event.is_set():
rankings = update_rankings('current')
exit_event.wait(150.0)
|
import matplotlib.pyplot as plt
import pandas
import matplotlib.pyplot as plt
import sys
import avg
plt.close('all')
if len(sys.argv) == 2 :
data = pandas.read_csv(sys.argv[1]) #lecture des données
else :
data = pandas.read_csv('results_gAlea_10_150.csv')
data = data.sort_values(by ='nbSommetsGraphe') #tri des données
data = data.drop(['nomFichier', 'numInstance', 'score', 'tempsChemin', 'tempsCalcul'], axis=1) #on enlève les colonnes superflues
dijsktra = data.loc[data['nomAlgo'] == 'Dijsktra'].drop(['heuristique', 'nomAlgo'], axis=1)
data.drop(['nomAlgo'], axis=1)
manhattan = data.loc[data['heuristique'] == 'Manhattan'].drop(['heuristique'], axis=1) #extraction par catégories
euclidienne = data.loc[data['heuristique'] == 'euclidienne'].drop(['heuristique'], axis=1)
x = manhattan['nbSommetsGraphe'].tolist() # les valeurs en x
yManhattan = manhattan['nbSommetsParcourus'].tolist() # les valeurs en y
yEuclidienne = euclidienne['nbSommetsParcourus'].tolist()
yDijsktra = dijsktra['nbSommetsParcourus'].tolist()
plt.xlabel("Nombre de sommets du graphe")
plt.ylabel("Nombre de sommets visités avant de trouver le plus court chemin")
plt.title("Plus court chemin et nombre de sommets visités")
plt.yscale("log") # échelle logarithmique
plt.plot(x, yManhattan, label='A* - Manhattan')
plt.plot(x, yEuclidienne, label='A* - Euclidienne')
plt.plot(x, yDijsktra, label='Dijsktra')
print("Nombre de sommets visités moyen - Manhattan : ",avg.Average(yManhattan))
print("Nombre de sommets visités moyen - euclidienne : ",avg.Average(yEuclidienne))
print("Nombre de sommets visités moyen - Dijsktra : ",avg.Average(yDijsktra))
plt.legend()
plt.show() |
from os import listdir
from os.path import isfile, join
import re
DIR_PATH = 'data'
FILES = [DIR_PATH + '/' + file for file in listdir(DIR_PATH) if isfile(join(DIR_PATH, file))]
def main_code():
p = re.compile(r'^".*"$')
is_blank = False
for file in FILES:
with open(file, 'r', encoding='utf-8') as f:
for line in f:
if p.search(line):
print(line.replace('"', '').strip())
is_blank = False
else:
if not is_blank:
is_blank = True
print()
print('-- ' * 100)
if __name__ == '__main__':
main_code()
|
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
from utils.tools import get_mask_from_lengths, pad, word_level_pooling
from .blocks import (
ConvNorm,
RelativeFFTBlock,
WordToPhonemeAttention,
)
from text.symbols import symbols
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
""" Sinusoid position encoding table """
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array(
[get_posi_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.0
return torch.FloatTensor(sinusoid_table)
class LinguisticEncoder(nn.Module):
""" Linguistic Encoder """
def __init__(self, model_config, train_config):
super(LinguisticEncoder, self).__init__()
n_position = model_config["max_seq_len"] + 1
n_src_vocab = len(symbols) + 1
d_word_vec = model_config["transformer"]["encoder_hidden"]
n_layers = model_config["transformer"]["encoder_layer"]
n_head = model_config["transformer"]["encoder_head"]
d_k = d_v = (
model_config["transformer"]["encoder_hidden"]
// model_config["transformer"]["encoder_head"]
)
d_model = model_config["transformer"]["encoder_hidden"]
d_inner = model_config["transformer"]["conv_filter_size"]
kernel_size = model_config["transformer"]["conv_kernel_size"]
# dropout = model_config["transformer"]["encoder_dropout"]
window_size = model_config["transformer"]["encoder_window_size"]
self.helper_type = train_config["aligner"]["helper_type"]
self.max_seq_len = model_config["max_seq_len"]
self.d_model = d_model
self.n_head = n_head
self.src_emb = nn.Embedding(
n_src_vocab, d_word_vec, padding_idx=0
)
self.abs_position_enc = nn.Parameter(
get_sinusoid_encoding_table(n_position, d_word_vec).unsqueeze(0),
requires_grad=False,
)
self.kv_position_enc = nn.Parameter(
get_sinusoid_encoding_table(n_position, d_word_vec).unsqueeze(0),
requires_grad=True,
)
self.q_position_enc = nn.Parameter(
get_sinusoid_encoding_table(n_position, d_word_vec).unsqueeze(0),
requires_grad=True,
)
self.phoneme_encoder = RelativeFFTBlock(
hidden_channels=d_model,
filter_channels=d_inner,
n_heads=n_head,
n_layers=n_layers,
kernel_size=kernel_size,
# p_dropout=dropout,
window_size=window_size,
)
self.word_encoder = RelativeFFTBlock(
hidden_channels=d_model,
filter_channels=d_inner,
n_heads=n_head,
n_layers=n_layers,
kernel_size=kernel_size,
# p_dropout=dropout,
window_size=window_size,
)
self.length_regulator = LengthRegulator()
self.duration_predictor = VariancePredictor(model_config)
self.w2p_attn = WordToPhonemeAttention(
n_head, d_model, d_k, d_v # , dropout=dropout
)
def get_mapping_mask(self, q, kv, dur_w, wb, src_w_len):
"""
For applying a word-to-phoneme mapping mask to the attention weight to force each query (Q)
to only attend to the phonemes belongs to the word corresponding to this query.
"""
batch_size, q_len, kv_len, device = q.shape[0], q.shape[1], kv.shape[1], kv.device
mask = torch.ones(batch_size, q_len, kv_len, device=device)
for b, (w, p, l) in enumerate(zip(dur_w, wb, src_w_len)):
w, p = [0]+[d.item() for d in torch.cumsum(w[:l], dim=0)], [0] + \
[d.item() for d in torch.cumsum(p[:l], dim=0)]
# assert len(w) == len(p)
for i in range(1, len(w)):
mask[b, w[i-1]:w[i], p[i-1]:p[i]
] = torch.zeros(w[i]-w[i-1], p[i]-p[i-1], device=device)
return mask == 0.
def add_position_enc(self, src_seq, position_enc=None, coef=None):
batch_size, max_len = src_seq.shape[0], src_seq.shape[1]
if not self.training and src_seq.shape[1] > self.max_seq_len:
pos_enc = get_sinusoid_encoding_table(
src_seq.shape[1], self.d_model
)[: src_seq.shape[1], :].unsqueeze(0).expand(batch_size, -1, -1).to(
src_seq.device
)
if coef is not None:
pos_enc = coef.unsqueeze(-1) * pos_enc
enc_output = src_seq + pos_enc
else:
position_enc = self.abs_position_enc if position_enc is None else position_enc
pos_enc = position_enc[
:, :max_len, :
].expand(batch_size, -1, -1)
if coef is not None:
pos_enc = coef.unsqueeze(-1) * pos_enc
enc_output = src_seq + pos_enc
return enc_output
def get_rel_coef(self, dur, dur_len, mask):
"""
For adding a well-designed positional encoding to the inputs of word-to-phoneme attention module.
"""
idx, L, device = [], [], dur.device
for d, dl in zip(dur, dur_len):
idx_b, d = [], d[:dl].long()
m = torch.repeat_interleave(d, torch.tensor(
list(d), device=device), dim=0) # [tgt_len]
L.append(m)
for d_i in d:
idx_b += list(range(d_i))
idx.append(torch.tensor(idx_b).to(device))
# assert L[-1].shape == idx[-1].shape
return torch.div(pad(idx).to(device), pad(L).masked_fill(mask == 0., 1.).to(device))
def forward(
self,
src_p_seq,
src_p_len,
word_boundary,
src_p_mask,
src_w_len,
src_w_mask,
mel_mask=None,
max_len=None,
attn_prior=None,
duration_target=None,
duration_control=1.0,
):
# Phoneme Encoding
src_p_seq = self.src_emb(src_p_seq)
enc_p_out = self.phoneme_encoder(src_p_seq.transpose(
1, 2), src_p_mask.unsqueeze(1)).transpose(1, 2)
# Word-level Pooing
src_w_seq = word_level_pooling(
enc_p_out, src_p_len, word_boundary, src_w_len, reduce="mean")
# Word Encoding
enc_w_out = self.word_encoder(src_w_seq.transpose(
1, 2), src_w_mask.unsqueeze(1)).transpose(1, 2)
# Phoneme-level Duration Prediction
log_duration_p_prediction = self.duration_predictor(
enc_p_out, src_p_mask)
# Word-level Pooling (in log scale)
log_duration_w_prediction = word_level_pooling(
log_duration_p_prediction.exp().unsqueeze(-1), src_p_len, word_boundary, src_w_len, reduce="sum").log().squeeze(-1)
x = enc_w_out
if duration_target is not None:
# Word-level Pooing
duration_w_rounded = word_level_pooling(
duration_target.unsqueeze(-1), src_p_len, word_boundary, src_w_len, reduce="sum").squeeze(-1)
# Word-level Length Regulate
x, mel_len = self.length_regulator(x, duration_w_rounded, max_len)
else:
# Word-level Duration
duration_w_rounded = torch.clamp(
(torch.round(torch.exp(log_duration_w_prediction) - 1) * duration_control),
min=0,
).long()
# Word-level Length Regulate
x, mel_len = self.length_regulator(x, duration_w_rounded, max_len)
mel_mask = get_mask_from_lengths(mel_len)
# Word-to-Phoneme Attention
# [batch, mel_len, seq_len]
src_mask_ = src_p_mask.unsqueeze(1).expand(-1, mel_mask.shape[1], -1)
# [batch, mel_len, seq_len]
mel_mask_ = mel_mask.unsqueeze(-1).expand(-1, -1, src_p_mask.shape[1])
# [batch, mel_len, seq_len]
mapping_mask = self.get_mapping_mask(
x, enc_p_out, duration_w_rounded, word_boundary, src_w_len)
q = self.add_position_enc(x, position_enc=self.q_position_enc, coef=self.get_rel_coef(
duration_w_rounded, src_w_len, mel_mask))
k = self.add_position_enc(
enc_p_out, position_enc=self.kv_position_enc, coef=self.get_rel_coef(word_boundary, src_p_len, src_p_mask))
v = self.add_position_enc(
enc_p_out, position_enc=self.kv_position_enc, coef=self.get_rel_coef(word_boundary, src_p_len, src_p_mask))
# q = self.add_position_enc(x)
# k = self.add_position_enc(enc_p_out)
# v = self.add_position_enc(enc_p_out)
x, attns, attn_logprob = self.w2p_attn(
q=q,
k=k,
v=v,
key_mask=src_mask_,
query_mask=mel_mask_,
mapping_mask=mapping_mask,
indivisual_attn=True,
attn_prior=attn_prior if self.helper_type == "ctc" else None,
)
return (
x,
log_duration_w_prediction,
duration_w_rounded,
mel_len,
mel_mask,
attns,
attn_logprob,
)
class LengthRegulator(nn.Module):
""" Length Regulator """
def __init__(self):
super(LengthRegulator, self).__init__()
def LR(self, x, duration, max_len):
output = list()
mel_len = list()
for batch, expand_target in zip(x, duration):
expanded = self.expand(batch, expand_target)
output.append(expanded)
mel_len.append(expanded.shape[0])
if max_len is not None:
output = pad(output, max_len)
else:
output = pad(output)
return output, torch.LongTensor(mel_len).to(x.device)
def expand(self, batch, predicted):
out = list()
for i, vec in enumerate(batch):
expand_size = predicted[i].item()
out.append(vec.expand(max(int(expand_size), 0), -1))
out = torch.cat(out, 0)
return out
def forward(self, x, duration, max_len):
output, mel_len = self.LR(x, duration, max_len)
return output, mel_len
class VariancePredictor(nn.Module):
""" Duration, Pitch and Energy Predictor """
def __init__(self, model_config):
super(VariancePredictor, self).__init__()
self.input_size = model_config["transformer"]["encoder_hidden"]
self.filter_size = model_config["variance_predictor"]["filter_size"]
self.kernel = model_config["variance_predictor"]["kernel_size"]
self.conv_output_size = model_config["variance_predictor"]["filter_size"]
self.dropout = model_config["variance_predictor"]["dropout"]
self.conv_layer = nn.Sequential(
OrderedDict(
[
(
"conv1d_1",
ConvNorm(
self.input_size,
self.filter_size,
kernel_size=self.kernel,
stride=1,
padding=(self.kernel - 1) // 2,
dilation=1,
channel_last=True,
),
),
("relu_1", nn.ReLU()),
("layer_norm_1", nn.LayerNorm(self.filter_size)),
("dropout_1", nn.Dropout(self.dropout)),
(
"conv1d_2",
ConvNorm(
self.filter_size,
self.filter_size,
kernel_size=self.kernel,
stride=1,
padding=1,
dilation=1,
channel_last=True,
),
),
("relu_2", nn.ReLU()),
("layer_norm_2", nn.LayerNorm(self.filter_size)),
("dropout_2", nn.Dropout(self.dropout)),
]
)
)
self.linear_layer = nn.Linear(self.conv_output_size, 1)
def forward(self, encoder_output, mask):
out = self.conv_layer(encoder_output)
out = self.linear_layer(out)
out = out.squeeze(-1)
if mask is not None:
out = out * mask
return out
|
from __future__ import unicode_literals
import re
from collections import OrderedDict
import io
from sqlalchemy import create_engine
from sqlalchemy.orm import joinedload, joinedload_all
import json
from clld.scripts.util import parsed_args
from clld.lib.dsv import reader, UnicodeWriter
from clld.db.meta import DBSession
from clld.db.models.common import Language, ValueSet, ValueSetReference, Value
class GBFeature(object):
@staticmethod
def yield_domainelements(s):
try:
for m in re.split('\s*,|;\s*', re.sub('^multistate\s+', '', s.strip())):
if m.strip():
if m.startswith('As many'):
for i in range(100):
yield '%s' % i, '%s' % i
else:
number, desc = m.split(':')
yield number.strip(), desc.strip()
except:
print s
raise
def __init__(self, d):
self.id = d['GramBank ID'].strip()
self.name = d['Feature']
self.domain = OrderedDict()
for n, desc in self.yield_domainelements(d['Possible Values']):
self.domain[n] = desc
self.domain.update({'?': 'Not known'})
def format_domain(self):
return '; '.join('%s: %s' % item for item in self.domain.items() if item[0] != '?')
def row(gc, f, vs=None, value=None):
return [
gc,
f.id,
f.name,
f.format_domain(),
value or '',
(vs.source or '') if vs else '',
(vs.values[0].comment or '') if vs else '',
]
def export(args, lang, features, gc, ma):
values = {k: row(gc, f) for k, f in features.items()}
errors = []
sources = {}
n = 0
for vs in DBSession.query(ValueSet).filter(ValueSet.language == lang).options(
joinedload(ValueSet.parameter),
joinedload_all(ValueSet.values, Value.domainelement),
joinedload(ValueSet.references, ValueSetReference.source),
):
if vs.parameter.id in features:
n += 1
f = features[vs.parameter.id]
value = vs.values[0].domainelement.name
if value == 'N/A':
value = '?'
assert value in f.domain
values[vs.parameter.id] = row(gc, f, vs, value)
for ref in vs.references:
sources[ref.source.name] = ref.source
print lang.id, ':', n, 'of', len(values)
subdir = args.data_file('grambank', ma)
if not subdir.exists():
subdir.mkdir()
path = lambda suffix: subdir.joinpath(gc + suffix)
with UnicodeWriter(path('.tsv'), delimiter=b'\t') as writer:
writer.writerow(['Language_ID', 'Feature_ID', 'Feature', 'Domain', 'Value', 'Source', 'Comment'])
writer.writerows(sorted(values.values(), key=lambda r: r[1]))
with open(path('.tsv-metadata.json'), 'wb') as fp:
json.dump({
'type': 'FeatureCollection',
'features': [
{
"type": "Feature",
"id": 'http://glottolog.org/resource/languoid/id/%s' % gc,
"geometry": {
"type": "Point",
"coordinates": [lang.longitude, lang.latitude]
},
"properties": {
'name': lang.name,
'glottocode': gc,
'iso-639-3': lang.iso_code,
},
},
],
'comment': 'Converted from NTS data',
#'sources': {
# name: sources[name].bibtex().id for name in sorted(sources.keys())
#}
},
fp,
indent=4,
#allow_unicode=True,
#default_flow_style=False
)
with io.open(path('.bib'), 'w', encoding='utf8') as fp:
for src in set(sources.values()):
rec = src.bibtex()
rec['key'] = src.name
fp.write('%s\n\n' % rec)
return errors
def main(args):
features = reader(args.data_file('grambank_features.csv'), dicts=True, )
features = [GBFeature(f) for f in features]
features = {'%s' % int(f.id[2:]): f for f in features}
errors = []
db = create_engine('postgresql://robert@/glottolog3')
for l in DBSession.query(Language):
if l.id == 'qgr':
continue
gc = l.glottocode
ma = db.execute("""
select
m.id
from
macroarea as m, languoidmacroarea as lm, language as l
where
m.pk = lm.macroarea_pk and lm.languoid_pk = l.pk and l.id = '%s';""" % gc).fetchone()[0]
if ma == 'pacific':
ma = 'papunesia'
errors.extend(export(args, l, features, gc, ma))
with UnicodeWriter(args.data_file('na_errors.tsv'), delimiter=b'\t') as writer:
writer.writerow(['Language', 'Feature', 'Value', 'Source', 'Comment'])
writer.writerows(errors)
if __name__ == '__main__':
main(parsed_args())
|
# Generated by Django 3.1.7 on 2021-05-28 16:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dm_page', '0012_auto_20210527_1458'),
]
operations = [
migrations.AlterField(
model_name='donation',
name='date_donated',
field=models.DateField(blank=True, null=True),
),
]
|
class DataModel:
"""holds the data for STL models"""
def __init__(self):
"""add doc"""
self.header = None
self.triangles = []
self.nr_of_faces = 0
class Triangle:
"""each Triangle object holds coordinates for normals and vertexes"""
def __init__(self):
"""add doc"""
self.normal_coordinates = []
self.vertex_list = []
self.byte_count = 0
|
"""
Checks the github API for projects within a certain set that do not use CI
tools.
"""
import argparse
import json
import requests
import time
from bs4 import BeautifulSoup
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
class Scraper:
"""
This class scrapes GitHub for information, depending on the specified
query.
"""
REQUEST_LIMIT = 2400
def __init__(self, user, authFile=None, target="API"):
if authFile is None:
raise Exception("No authentication file provided!!")
if target == "API":
self.gh_request = "https://api.github.com/"
elif target == "WEB":
self.gh_request = "https://github.com/"
self.parameters = dict()
self.result = dict()
self.user = user
self.requests = 0
self.target = target
token = json.load(open(authFile))
self.user = token['user']
self.token = token['token']
def query(self, endpoint, parameters=dict(), method="GET",
data_type="std"):
"""
Queries the desired endpoint using the given parameters.
:param endpoint: The GitHub endpoint to be queried.
:param parameters: A dictionary containing the endpoint's parameters.
:param data_type: The type of data queried by the user
(diff, patch, std)
:return: The result in the requested format.
"""
result = None
# Perform the request:
gh_query = self.gh_request + endpoint
header_type = ''
if data_type.lower() == "diff":
header_type = 'application/vnd.github.diff'
elif data_type.lower() == "patch":
header_type = 'application/vnd.github.patch'
else:
header_type = 'application/vnd.github+json'
if self.target == "API":
gh_uri = "%s?%s" % (gh_query, urlencode(parameters))
gh_json_result = requests.request(
method, gh_uri, auth=(self.user, self.token),
headers={'Accept': header_type}
)
if data_type.lower() == 'std':
content = gh_json_result.content.decode('utf-8')
if content is not None and not content == '':
result = json.loads(content)
else:
result = json.loads(
'{"message": "No result returned"}'
)
else:
result = gh_json_result.content
elif self.target == "WEB":
gh_uri = "%s" % (gh_query)
gh_result = requests.get(gh_uri)
soup = BeautifulSoup(gh_result.text.encode("utf8"),
'html.parser')
result = soup
self.requests += 1
time.sleep(1)
return result
|
import sigopt
from data_and_model_setup import LoadTransformData, log_inference_metrics
import time
import platform
from xgboost.sklearn import XGBClassifier
def train_xgboost_model(dataset, random_state=1):
print("loading and transforming data")
load_transform_data = LoadTransformData()
trainX, testX, trainY, testY = load_transform_data.load_split_dataset(dataset)
# model architecture
sigopt.log_model("XGBClassifier") # model_keras.__class__
sigopt.log_dataset('Unscaled')
sigopt.log_metadata('Training Records', len(trainX))
sigopt.log_metadata('Testing Reccords', len(testX))
sigopt.log_metadata("Platform", platform.uname())
parameters = {
'objective': 'binary:logistic',
'learning_rate': sigopt.get_parameter('learning_rate', default=0.3),
'n_estimators': sigopt.get_parameter('n_estimators', default=20),
'max_depth': sigopt.get_parameter('max_depth', default=5),
'gamma': sigopt.get_parameter('gamma', default=0),
'min_child_weight': sigopt.get_parameter('min_child_weight', default=1),
'random_state': random_state,
'importance_type': 'gain',
'missing': None,
'verbosity': 2}
model = XGBClassifier(**parameters)
modelfit = model.fit(trainX, trainY)
# Collect model metrics
start = time.perf_counter()
prediction = modelfit.predict(testX)
sigopt.log_metric("Inference Time", time.perf_counter() - start)
probability = modelfit.predict_proba(testX)[:, 1]
log_inference_metrics(prediction, probability, testY, testX)
if __name__ == "__main__":
dataset_file = 'https://www.dropbox.com/s/437qdt4yjj64sxd/Fraud_Detection_SigOpt_dataset.csv?dl=1'
train_xgboost_model(dataset_file)
|
"""
You dug up a treasure with a lock that has N binary dials. The lock can be unlocked by turning one dial at a time towards the unlock pattern.
WARNING: only some patterns are safe (you can access them in a global variable SAFE). Any other pattern will cause the destruction of the contents inside.
Your task is to write a function that will accept a lock description: the current state of dials and the unlock sequence.
Your function should return "TREASURE" if it's possible to unlock the treasure, and "IMPOSSIBLE" otherwise. As a secondary goal,
you'd love to do this as quickly as possible, and record the sequence used.
Example 1:
Initial pattern: 010 Unlocked pattern: 111 Safe patterns: 000 001 010 101 111
Correct response is: TREASURE because there is a safe sequence from the initial pattern to the unlock pattern: 010 -> 000 -> 001 -> 101 -> 111
Example 2:
Initial pattern: 00 Unlocked pattern: 11 Safe patterns: 00 11
Correct response is: "IMPOSSIBLE" as no sequence can go from 00 to 11 with one dial turn at a time.
Example 3:
Initial pattern: 00 Unlocked pattern: 11 Safe patterns: 00 01 11
Correct response is: TREASURE because there is a safe sequence to the unlock pattern (00->01->11).
"""
SAFE = []
# sequence = [00, 01, 11]
# sequence = [010, 000, 001, 101, 111]
def findTreasure(start, final, sequence = []):
if length(final) != length(start):
return "Impossible"
for i in range(0, length(start)):
bit = start[i]
bit = 0 if bit else bit = 1
flip = copy(start)
flip[i] = bit
if flip is final:
sequence.append(flip)
return "TREASURE"
else if flip is in sequence:
continue
else if flip is in SAFE:
sequence.append(start)
return findTreasure(start, final)
return "Impossible"
"""
You own an ice cream shop, and want to help undecided customers with their flavor choices.
You have a history of all of the flavor combinations that people have bought in the past six months.
Write an algorithm that suggests a another scoop, given that the customer has picked up to two themselves.
"""
Customers = { Name : FlavorGraph }
def FlavorGraph:
flavors
Adjacency[flavor]
Chocolate->Vanilla->Strawberry
# listOfChoices = [(Chocolate, Vnilla), (Straweberry)]
def createFlavorGraph(listOfChoices):
graph = FlavorGraph()
for choice in listOfChoices:
for flavor in choice:
otherFlavors = choice.remove(flavor)
if flavor not in graph.vertices():
node = Flavor(flavor)
graph.Adjacency(flavor) = otherFlavors
else:
for otherFlavor in otherFlavors:
graph.weight[graph.edges((flavor, otherFlavor))] += 1 |
# 대신증권 API
# 해외선물 잔고(미결제약정)과 실시간 주문체결 처리 예제
# 해외선물 미결제 내역을 조회 하고 실시간으로 주문 체결 처리를 하는 참고용 예제입니다.
#
# 사용된 PLUS OBJECT
# ■ CpForeTrade.OvfNotPaymentInq - 해외선물 미결제(잔고) 조회
# ■ CpForeDib.OvFutBalance : 해외선물 미결제(잔고) 실시간 업데이트
#
# 제공되는 기능
# - 해외선물 미결제 잔고 조회
# - 실사간 주문 체결 업데이트
#
# 미제공 기능
# - 현재가 조회 및 실시간 현재가 업데이트 미제공 예제임
# - 평가금액 실시간 업데이트 안됨.
#
# ※ 주의사항: 본 예제는 단순 참고용으로만 제공되는 예제임
import sys
from PyQt5.QtWidgets import *
import win32com.client
from pandas import Series, DataFrame
import pandas as pd
import locale
import os
locale.setlocale(locale.LC_ALL, '')
# cp object
g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')
g_objCpStatus = win32com.client.Dispatch('CpUtil.CpCybos')
g_objCpTrade = win32com.client.Dispatch('CpTrade.CpTdUtil')
g_objOptionMgr = win32com.client.Dispatch("CpUtil.CpOptionCode")
gExcelFile = 'ovfuturejango.xlsx'
# CpEvent: 실시간 이벤트 수신 클래스
class CpEvent:
def set_params(self, client, name, caller):
self.client = client # CP 실시간 통신 object
self.name = name # 서비스가 다른 이벤트를 구분하기 위한 이름
self.caller = caller # callback 을 위해 보관
def OnReceived(self):
if self.name == "ovfjango":
pbdata = {}
pbdata["처리구분"] = self.client.GetHeaderValue(2) # “00”-주문, “01”-정정, “02”-취소, “03”-체결
pbdata["code"] = self.client.GetHeaderValue(6) # 종목코드
pbdata["종목명"] = self.client.GetHeaderValue(7) # 종목코드
pbdata["매매구분"] = self.client.GetHeaderValue(8) # 매매구분
pbdata["잔고수량"] = self.client.GetHeaderValue(9) # 잔고수량
pbdata["단가"] = self.client.GetHeaderValue(10) # 단가
pbdata["청산가능"] = self.client.GetHeaderValue(11) # 청산가능수량
pbdata["미체결수량"] = self.client.GetHeaderValue(12) # 미체결수량
pbdata["현재가"] = self.client.GetHeaderValue(13) # 현재가
pbdata["대비부호"] = self.client.GetHeaderValue(14) # 전일대비부호
pbdata["전일대비"] = self.client.GetHeaderValue(15) # 전일대비
pbdata["전일대비율"] = self.client.GetHeaderValue(16) # 전일대비율
pbdata["평가금액"] = self.client.GetHeaderValue(17) # 평가금액
pbdata["평가손익"] = self.client.GetHeaderValue(18) # 평가손익
pbdata["손익률"] = self.client.GetHeaderValue(19) # 손익률
pbdata["매입금액"] = self.client.GetHeaderValue(20) # 매입금액
pbdata["승수"] = self.client.GetHeaderValue(21) # 승수
pbdata["통화코드"] = self.client.GetHeaderValue(23) # 통화코드
print(pbdata)
self.caller.updateContract(pbdata)
class CpPublish:
def __init__(self, name, serviceID):
self.name = name
self.obj = win32com.client.Dispatch(serviceID)
self.bIsSB = False
def Subscribe(self, var, caller):
if self.bIsSB:
self.Unsubscribe()
if (len(var) > 0):
self.obj.SetInputValue(0, var)
handler = win32com.client.WithEvents(self.obj, CpEvent)
handler.set_params(self.obj, self.name, caller)
self.obj.Subscribe()
self.bIsSB = True
def Unsubscribe(self):
if self.bIsSB:
self.obj.Unsubscribe()
self.bIsSB = False
class CpPBOvfJango(CpPublish):
def __init__(self):
super().__init__('ovfjango', 'CpForeDib.OvFutBalance')
# 해외선물 잔고(미결제) 통신
class CpOvfJango:
def __init__(self):
self.acc = g_objCpTrade.AccountNumber[0] # 계좌번호
def Request(self, caller):
if (g_objCpStatus.IsConnect == 0):
print('PLUS가 정상적으로 연결되지 않음. ')
return False
# 해외선물 미결제 잔고
objRq = win32com.client.Dispatch('CpForeTrade.OvfNotPaymentInq')
objRq.SetInputValue(1, self.acc) # 계좌번호
while True:
objRq.BlockRequest()
# 현재가 통신 및 통신 에러 처리
rqStatus = objRq.GetDibStatus()
rqRet = objRq.GetDibMsg1()
print('통신상태', rqStatus, rqRet)
if rqStatus != 0:
return False
# 조회 건수
cnt = objRq.GetHeaderValue(0)
print(cnt)
if cnt == 0:
break
for i in range(cnt):
item = {}
item['code'] = objRq.GetDataValue(3, i) # 코드
item['종목명'] = objRq.GetDataValue(4, i)
item['매매구분'] = objRq.GetDataValue(5, i)
item['잔고수량'] = objRq.GetDataValue(6, i)
item['단가'] = objRq.GetDataValue(7, i)
item['청산가능'] = objRq.GetDataValue(8, i)
item['미체결수량'] = objRq.GetDataValue(9, i)
item['현재가'] = objRq.GetDataValue(10, i)
item['전일대비'] = objRq.GetDataValue(11, i)
item['전일대비율'] = objRq.GetDataValue(12, i)
item['평가금액'] = objRq.GetDataValue(13, i)
item['평가손익'] = objRq.GetDataValue(14, i)
item['손익률'] = objRq.GetDataValue(15, i)
item['매입금액'] = objRq.GetDataValue(16, i)
item['승수'] = objRq.GetDataValue(17, i)
item['통화코드'] = objRq.GetDataValue(18, i)
key = item['code'] + item['매매구분']
caller.ovfJangadata[key] = item
print(item)
if objRq.Continue == False:
print("연속 조회 여부: 다음 데이터가 없음")
break
# print(self.data)
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.bTradeInit = False
# 연결 여부 체크
if (g_objCpStatus.IsConnect == 0):
print("PLUS가 정상적으로 연결되지 않음. ")
return False
if (g_objCpTrade.TradeInit(0) != 0):
print("주문 초기화 실패")
return False
self.bTradeInit = True
self.pbContract = CpPBOvfJango()
self.setWindowTitle('PLUS API TEST')
self.setGeometry(300, 300, 300, 240)
# 해외선물 잔고
self.ovfJangadata = {}
nH = 20
btnPrint = QPushButton('DF Print', self)
btnPrint.move(20, nH)
btnPrint.clicked.connect(self.btnPrint_clicked)
nH += 50
btnExcel = QPushButton('엑셀 내보내기', self)
btnExcel.move(20, nH)
btnExcel.clicked.connect(self.btnExcel_clicked)
nH += 50
btnExit = QPushButton('종료', self)
btnExit.move(20, nH)
btnExit.clicked.connect(self.btnExit_clicked)
self.btnStart_clicked()
def btnStart_clicked(self):
# 요청 필드 배열 - 종목코드, 시간, 대비부호 대비, 현재가, 거래량, 종목명
obj = CpOvfJango()
obj.Request(self);
self.pbContract.Unsubscribe()
self.pbContract.Subscribe("", self)
def btnPrint_clicked(self):
for key, value in self.ovfJangadata.items():
print(key, value)
def btnExit_clicked(self):
self.pbContract.Unsubscribe()
exit()
def btnExcel_clicked(self):
if (len(self.ovfJangadata) == 0):
print('잔고 없음')
return
# df= pd.DataFrame(columns=self.ovfJangadata.keys())
isFirst = True
for k, v in self.ovfJangadata.items():
# 데이터 프레임의 컬럼은 데이터의 key 값으로 생성
if isFirst:
df = pd.DataFrame(columns=v.keys())
isFirst = False
df.loc[len(df)] = v
# create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(gExcelFile, engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
os.startfile(gExcelFile)
return
# 실시간 주문 체결 업데이트
def updateContract(self, pbdata):
key = pbdata['code'] + pbdata['매매구분']
print(key)
# 새로운 잔고 추가
if key not in self.ovfJangadata.keys():
print(key, '찾기 실패')
if pbdata["잔고수량"] == 0:
return
item = {}
item['code'] = pbdata['code']
item['종목명'] = pbdata['종목명']
item['매매구분'] = pbdata['매매구분']
item['잔고수량'] = pbdata['잔고수량']
item['단가'] = pbdata['단가']
item['청산가능'] = pbdata['청산가능']
item['미체결수량'] = pbdata['미체결수량']
item['현재가'] = pbdata['현재가']
item['전일대비'] = pbdata['전일대비']
item['전일대비율'] = pbdata['전일대비율']
item['평가금액'] = pbdata['평가금액']
item['평가손익'] = pbdata['평가손익']
item['손익률'] = pbdata['손익률']
item['매입금액'] = pbdata['매입금액']
item['승수'] = pbdata['승수']
item['통화코드'] = pbdata['통화코드']
self.ovfJangadata[key] = item
print('새로운 잔고 추가 -', key)
return
# 기존 잔고에 대한 처리
item = self.ovfJangadata[key]
item['잔고수량'] = pbdata['잔고수량']
item['청산가능'] = pbdata['청산가능']
if (pbdata["처리구분"] == '00'): # 주문 접수
print('주문 접수 -', key)
self.ovfJangadata[key] = item
return
if item['잔고수량'] == 0: # 잔고 삭제
del self.ovfJangadata[key]
print('잔고 삭제 -', key)
return
print('체결 업데이트-', key)
item['단가'] = pbdata['단가']
item['미체결수량'] = pbdata['미체결수량']
item['현재가'] = pbdata['현재가']
item['전일대비'] = pbdata['전일대비']
item['전일대비율'] = pbdata['전일대비율']
item['평가금액'] = pbdata['평가금액']
item['평가손익'] = pbdata['평가손익']
item['손익률'] = pbdata['손익률']
item['매입금액'] = pbdata['매입금액']
self.ovfJangadata[key] = item
return
if __name__ == '__main__':
app = QApplication(sys.argv)
myWindow = MyWindow()
myWindow.show()
app.exec_() |
r"""
Discrete Valuation Rings (DVR) and Fields (DVF)
"""
#**************************************************************************
# Copyright (C) 2013 Xavier Caruso <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#**************************************************************************
from sage.misc.abstract_method import abstract_method
from sage.categories.category_singleton import Category_singleton
from sage.categories.euclidean_domains import EuclideanDomains
from sage.categories.fields import Fields
class DiscreteValuationRings(Category_singleton):
"""
The category of discrete valuation rings
EXAMPLES::
sage: GF(7)[['x']] in DiscreteValuationRings()
True
sage: TestSuite(DiscreteValuationRings()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: DiscreteValuationRings().super_categories()
[Category of euclidean domains]
"""
return [EuclideanDomains()]
class ParentMethods:
@abstract_method
def uniformizer(self):
"""
Return a uniformizer of this ring.
EXAMPLES::
sage: Zp(5).uniformizer()
5 + O(5^21)
sage: K.<u> = QQ[[]]
sage: K.uniformizer()
u
"""
@abstract_method
def residue_field(self):
"""
Return the residue field of this ring.
EXAMPLES::
sage: Zp(5).residue_field()
Finite Field of size 5
sage: K.<u> = QQ[[]]
sage: K.residue_field()
Rational Field
"""
class ElementMethods:
@abstract_method
def valuation(self):
"""
Return the valuation of this element.
EXAMPLES::
sage: x = Zp(5)(50)
sage: x.valuation()
2
"""
def euclidean_degree(self):
"""
Return the Euclidean degree of this element.
TESTS::
sage: R.<q> = GF(5)[[]]
sage: (q^3).euclidean_degree()
3
sage: R(0).euclidean_degree()
Traceback (most recent call last):
...
ValueError: Euclidean degree of the zero element not defined
"""
if not self:
raise ValueError("Euclidean degree of the zero element not defined")
return self.valuation()
def quo_rem(self, other):
"""
Return the quotient and remainder for Euclidean division
of ``self`` by ``other``.
TESTS::
sage: R.<q> = GF(5)[[]]
sage: (q^2 + q).quo_rem(q)
(1 + q, 0)
sage: (q + 1).quo_rem(q^2)
(0, 1 + q)
sage: q.quo_rem(0)
Traceback (most recent call last):
...
ZeroDivisionError: Euclidean division by the zero element not defined
"""
if not other:
raise ZeroDivisionError("Euclidean division by the zero element not defined")
P = self.parent()
if self.valuation() >= other.valuation():
return P(self / other), P.zero()
else:
return P.zero(), self
def is_unit(self):
"""
Return True if self is invertible.
EXAMPLES::
sage: x = Zp(5)(50)
sage: x.is_unit()
False
sage: x = Zp(7)(50)
sage: x.is_unit()
True
"""
return self.valuation() == 0
def gcd(self,other):
"""
Return the greatest common divisor of self and other,
normalized so that it is a power of the distinguished
uniformizer.
"""
from sage.rings.infinity import Infinity
val = min(self.valuation(), other.valuation())
if val is Infinity:
return self.parent()(0)
else:
return self.parent().uniformizer() ** val
def lcm(self,other):
"""
Return the least common multiple of self and other,
normalized so that it is a power of the distinguished
uniformizer.
"""
from sage.rings.infinity import Infinity
val = max(self.valuation(), other.valuation())
if val is Infinity:
return self.parent()(0)
else:
return self.parent().uniformizer() ** val
class DiscreteValuationFields(Category_singleton):
"""
The category of discrete valuation fields
EXAMPLES::
sage: Qp(7) in DiscreteValuationFields()
True
sage: TestSuite(DiscreteValuationFields()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: DiscreteValuationFields().super_categories()
[Category of fields]
"""
return [Fields()]
class ParentMethods:
@abstract_method
def uniformizer(self):
"""
Return a uniformizer of this ring.
EXAMPLES::
sage: Qp(5).uniformizer()
5 + O(5^21)
"""
@abstract_method
def residue_field(self):
"""
Return the residue field of the ring of integers of
this discrete valuation field.
EXAMPLES::
sage: Qp(5).residue_field()
Finite Field of size 5
sage: K.<u> = LaurentSeriesRing(QQ)
sage: K.residue_field()
Rational Field
"""
class ElementMethods:
@abstract_method
def valuation(self):
"""
Return the valuation of this element.
EXAMPLES::
sage: x = Qp(5)(50)
sage: x.valuation()
2
"""
|
from queue import PriorityQueue
from pxtrade import AbstractEvent
class EventsQueue(PriorityQueue):
def put(self, event):
if not isinstance(event, AbstractEvent):
raise TypeError("Only expecting Event objects in the queue.")
super().put((event.datetime, event))
def __len__(self):
return len(self.queue)
|
# -*- coding: utf-8 -*-
from flask import g, jsonify
import json
import kuas_api.kuas.cache as cache
import kuas_api.modules.error as error
import kuas_api.modules.const as const
from kuas_api.modules.stateless_auth import auth
from .doc import auto
# Nestable blueprints problem
# not sure isn't this a best practice now.
# https://github.com/mitsuhiko/flask/issues/593
#from kuas_api.views.v2 import api_v2
routes = []
def route(rule, **options):
def decorator(f):
url_rule = {
"rule": rule,
"view_func": f,
"options": options if options else {}
}
routes.append(url_rule)
return f
return decorator
@route("/token")
@auto.doc(groups=["public"])
@auth.login_required
def get_auth_token():
"""Login to KUAS, and return token for KUAS API.
:reqheader Authorization: Using Basic Auth
:resjson int duration: The duration of this token to expired.
:resjson string token_type: Token type of this token.
:resjson strin gauth_token: Auth token.
:statuscode 200: success login
:statuscode 401: login fail or auth_token expired
**Request**:
.. sourcecode:: http
GET /latest/token HTTP/1.1
Host: kuas.grd.idv.tw:14769
Authorization: Basic xxxxxxxxxxxxx=
Accept: */*
.. sourcecode:: shell
curl -X GET -u username:password https://kuas.grd.idv.tw:14769/v2/token
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"duration": 3600,
"token_type": "Basic",
"auth_token": "adfakdflakds.fladkjflakjdf.adslkfakdadf"
}
"""
is_login = json.loads(str(cache.red.get(g.username), "utf-8"))['is_login']
token = g.token
return jsonify(
auth_token=token.decode('ascii'),
token_type="Basic",
duration=const.token_duration,
is_login= is_login
)
@route('/versions/<string:device_type>')
@auto.doc(groups=["public"])
def device_version(device_type):
"""Get latest version for app on (`device_type`) in webstore.
:param device_type: device we support
:resjson version: Object of version (see below)
The versions `version` is a json object list below.
:json string device: query device.
:json string version: latest version for device.
**Request**
.. sourcecode:: http
GET /latest/versions/android HTTP/1.1
Host: kuas.grd.idv.tw:14769
.. sourcecode:: shell
curl -X GET https://kuas.grd.idv.tw:14769/v2/versions/android
**Response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"version": {
"device": "android",
"version": "1.5.4"
}
}
"""
if device_type in const.device_version:
result = {
"version": {
"device": device_type,
"version": const.device_version[device_type]
}
}
return jsonify(result)
return error.error_handle(status=404,
developer_message="Device not found.",
user_message="Device not found.")
@route('/servers/status')
@auto.doc(groups=["public"])
def servers_status():
"""Get KUAS API status for service
:resjson list status: Status list (see below)
Servers status list
:json service: service name.
:json status: HTTP status code.
**Request**
.. sourcecode:: http
GET /v2/servers/status HTTP/1.1
Host: kuas.grd.idv.tw:14769
.. sourcecode:: shell
curl -X GET https://kuas.grd.idv.tw:14769/v2/servers/status
**Response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": [
{
"service": "ap",
"status": 200
},
{
"service": "bus",
"status": 200
},
{
"service": "leave",
"status": 200
}
]
}
"""
try:
original_status = cache.server_status()
except Exception as err:
return error.error_handle(status=404,
developer_message=str(err),
user_message="Something wrong.")
status = {
"status": [
{"service": "ap", "status": original_status[0]},
{"service": "bus", "status": original_status[1]},
{"service": "leave", "status": original_status[2]}
]
}
return jsonify(status)
|
import csv
from typing import Any, Dict
from contrib.descriptions import VulnDescriptionProvider
from contrib.internal_types import ScanResult
from contrib.report_builders import ReportBuilder
class CSVReportBuilder(ReportBuilder):
def __init__(self, description_provider: VulnDescriptionProvider):
self.description_provider = description_provider
self._buffer = ''
def build(self) -> Any:
return self._buffer
pass
def add_vulnerable_services(self, scan_results: Dict[str, ScanResult]):
with open('tempcsv.csv', "w+") as csvfile:
wr = csv.writer(csvfile, dialect='excel')
for app_name, result in scan_results.items():
for vulnResult in result.vulns:
for addr, ports in result.locations.items():
description = self.description_provider.get_description(vulnResult.name, vulnResult.vuln_type)
csvRow = [addr, ports, app_name, vulnResult.name, description.text, vulnResult.severity, vulnResult.severity_str, description.url]
wr.writerow(csvRow)
with open('tempcsv.csv', "r+") as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|', skipinitialspace=True)
data = []
VulnData = 'IP, Port, Title, CVE,Description,Risk Score,Severity,References\n'
for row in reader:
data = ' '.join(row)
VulnData = VulnData + data + '\n'
self._buffer = VulnData
return self._buffer |
# Implementation to test the CNN as detailed in:
# 'Segmentation of histological images and fibrosis identification with a convolutional neural network'
# https://doi.org/10.1016/j.compbiomed.2018.05.015
# https://arxiv.org/abs/1803.07301
# Test segmentation performance of the models which were saved at each epoch during training
# Computes mean accuracy and DSC across test set for each model
import numpy as np
import scipy as scp
import tensorflow as tf
import os
import logging
import sys
import network
import utils
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
# os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
n_epochs = 100 # Number of models
h = 1536 # Image height
w = 2064 # Image width
img_idx = 0 # test_(n-1).png in folder, n-1 = img_idx
n_predict = 48 # Number of test images
if not os.path.exists("predictions test"):
os.makedirs("predictions test")
# Initialise model
logging.info("Getting predictions")
convnet = network.CNN(keep_rate=1.0, train_mode=False)
images = tf.placeholder(tf.float32, shape=(1, h, w, 3))
# Build network
convnet.build(images)
logging.info("Finished building network")
# Get and save predictions
epoch_acc = np.zeros(n_epochs)
epoch_dsc = np.zeros(n_epochs)
for j in range(n_epochs):
init = tf.global_variables_initializer()
# if restore is True:
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
# Reload current model
saver.restore(sess, "model/epoch_%d/model.ckpt" %(j+1))
logging.info("Model restored for prediction")
for i in range(img_idx,(img_idx+n_predict)):
# Get prediction for input image
print("Epoch %d, image %d of %d" %((j+1), (i+1), n_predict))
unlabelled = utils.get_unlabelled(i, batch_size=1, test=True)
pred = sess.run(convnet.out_max, feed_dict={images: unlabelled})
# Compute accuracy and dsc if mask is available
if os.path.isfile("testing set/test_%d_mask.png" %(i+1)):
labels = utils.get_labelled(i, 1, test=True)
accuracy, dsc = utils.compute_accuracy(pred, labels)
print("Prediction percent accuracy: %.3f and DSC: %.3f" %(accuracy, dsc))
epoch_acc[j] += accuracy
epoch_dsc[j] += dsc
logging.info("Creating output map")
map = utils.generate_map(pred)
scp.misc.imsave('predictions test/pred_%d_epoch_%d_a_%.3f_d_%.3f.png'
%(i+1, j+1, accuracy, dsc), map)
else:
print("Mask not found. Cannot compute accuracy and DSC")
logging.info("Creating output map")
map = utils.generate_map(pred)
scp.misc.imsave('predictions test/pred_%d_epoch_%d.png' %(i+1, j+1), map)
# Stats for each epoch
epoch_acc = np.divide(epoch_acc, n_predict)
epoch_dsc = np.divide(epoch_dsc, n_predict)
print('Accuracy each epoch')
print(epoch_acc)
print('DSC each epoch')
print(epoch_dsc)
print('Best accuracy and DSC with epoch')
print(np.amax(epoch_acc), np.argmax(epoch_acc)+1, np.amax(epoch_dsc), np.argmax(epoch_dsc)+1) |
"""Service implementation for the Galaxy application."""
import os
import urllib2
import subprocess
from datetime import datetime
from cm.services.apps import ApplicationService
from cm.services import service_states
from cm.services import ServiceRole
from cm.services import ServiceDependency
from cm.util import paths
from cm.util import misc
from cm.util.decorators import TestFlag, delay
from cm.util.galaxy_conf import attempt_chown_galaxy
from cm.util.galaxy_conf import galaxy_option_manager
from cm.util.galaxy_conf import populate_process_options
from cm.util.galaxy_conf import populate_dynamic_options
from cm.util.galaxy_conf import populate_galaxy_paths
from cm.util.galaxy_conf import populate_admin_users
import logging
log = logging.getLogger('cloudman')
NUM_START_ATTEMPTS = 2 # Number of times we attempt to start Galaxy
class GalaxyService(ApplicationService):
def __init__(self, app):
super(GalaxyService, self).__init__(app)
self.name = ServiceRole.to_string(ServiceRole.GALAXY)
self.svc_roles = [ServiceRole.GALAXY]
self.remaining_start_attempts = NUM_START_ATTEMPTS
# Indicates if the environment for running Galaxy has been configured
self.configured = False
self.ssl_is_on = False
# Environment variables to set before executing galaxy's run.sh
self.env_vars = {}
self.dependencies = [
ServiceDependency(self, ServiceRole.JOB_MANAGER),
ServiceDependency(self, ServiceRole.GALAXY_POSTGRES),
ServiceDependency(self, ServiceRole.GALAXY_DATA),
ServiceDependency(self, ServiceRole.GALAXY_INDICES),
# ServiceDependency(self, ServiceRole.PROFTPD),
ServiceDependency(self, ServiceRole.GALAXY_TOOLS)
]
self.option_manager = galaxy_option_manager(app)
@property
def galaxy_home(self):
"""
Return the path where the Galaxy application is available
"""
return self.app.path_resolver.galaxy_home
def start(self):
self.state = service_states.STARTING
self.time_started = datetime.utcnow()
if not self.activated:
self.activated = True
log.debug("Service {0} self-activated".format(self.get_full_name()))
self.manage_galaxy(True)
def remove(self, synchronous=False):
log.info("Removing '%s' service" % self.name)
super(GalaxyService, self).remove(synchronous)
self.state = service_states.SHUTTING_DOWN
# Reset the number of restart attempts
log.debug("Resetting Galaxy remaining_start_attempts to {0}."
.format(self.remaining_start_attempts))
self.remaining_start_attempts = NUM_START_ATTEMPTS
self.manage_galaxy(False)
def restart(self):
log.info('Restarting Galaxy service')
self.remove()
self.status()
self.start()
@TestFlag(None)
def manage_galaxy(self, to_be_started=True):
"""
Use this method to start and stop Galaxy application.
:type to_be_started: bool
:param to_be_started: If set, this method will attempt to start the
Galaxy application process. If not set, the
method will attempt to shut down the application
process.
"""
log.debug("Using Galaxy from '{0}'".format(self.galaxy_home))
os.putenv("GALAXY_HOME", self.galaxy_home)
os.putenv("TEMP", self.app.path_resolver.galaxy_temp)
os.putenv("TMPDIR", self.app.path_resolver.galaxy_temp)
self.env_vars["GALAXY_HOME"] = self.galaxy_home
self.env_vars["TEMP"] = self.app.path_resolver.galaxy_temp
self.env_vars["TMPDIR"] = self.app.path_resolver.galaxy_temp
conf_dir = self.option_manager.setup()
if conf_dir:
self.env_vars["GALAXY_UNIVERSE_CONFIG_DIR"] = conf_dir
if self.multiple_processes():
self.env_vars["GALAXY_RUN_ALL"] = "TRUE"
# HACK: Galaxy has a known problem when starting from a fresh
# configuration in multiple process mode. Each process attempts to
# create the same directories and one or more processes can fail to
# start because it "failed" to create said directories (because
# another process created them first). This hack staggers
# the process starts in an attempt to circumvent this problem.
patch_run_sh_command = ("sudo sed -i -e \"s/server.log \\$\\@$/\\0; "
"sleep 4/\" %s/run.sh" % self.galaxy_home)
misc.run(patch_run_sh_command)
self.extra_daemon_args = ""
else:
# Instead of sticking with default paster.pid and paster.log,
# explicitly set pid and log file to ``main.pid`` and ``main.log``
# to bring single process case inline with defaults for for multiple
# process case (i.e. when GALAXY_RUN_ALL is set and multiple servers
# are defined).
# self.extra_daemon_args = "--pid-file=main.pid --log-file=main.log"
# No longer required
pass
if to_be_started and self.remaining_start_attempts > 0:
self.status()
if not self.configured:
log.debug("Setting up Galaxy application")
# Set job manager configs if necessary
for job_manager_svc in self.app.manager.service_registry.active(
service_role=ServiceRole.JOB_MANAGER):
if ServiceRole.SGE in job_manager_svc.svc_roles:
log.debug("Running on SGE; setting env_vars")
self.env_vars["SGE_ROOT"] = self.app.path_resolver.sge_root,
self.env_vars["DRMAA_LIBRARY_PATH"] = self.app.path_resolver.drmaa_library_path
# Make sure Galaxy home dir exists
if not os.path.exists(self.galaxy_home):
log.error("Galaxy application directory '%s' does not "
"exist! Aborting." % self.galaxy_home)
log.debug("ls /mnt/: %s" % os.listdir('/mnt/'))
self.state = service_states.ERROR
self.last_state_change_time = datetime.utcnow()
return False
# Ensure the necessary directories exist
for dir_name in [paths.P_GALAXY_INDICES,
('%s/tmp/job_working_directory' %
self.app.path_resolver.galaxy_data)]:
misc.make_dir(dir_name, 'galaxy')
self.configured = True
if not self._is_galaxy_running():
log.debug("Starting Galaxy...")
self.update_galaxy_config()
start_command = self.galaxy_run_command(
"%s --daemon" % self.extra_daemon_args)
attempt_chown_galaxy(self.galaxy_home)
if misc.run(start_command):
self.remaining_start_attempts -= 1
elif self.remaining_start_attempts > 0:
log.debug("It seems Galaxy failed to start; will atempt to "
"auto-restart (up to {0} more time(s))."
.format(self.remaining_start_attempts))
self.state = service_states.UNSTARTED
self.last_state_change_time = datetime.utcnow()
else:
log.debug("It seems Galaxy failed to start; setting service "
"state to {0}.".format(service_states.ERROR))
self.state = service_states.ERROR
self.last_state_change_time = datetime.utcnow()
else:
log.debug("Galaxy already running.")
else:
log.info("Shutting down Galaxy...")
self.state = service_states.SHUTTING_DOWN
stop_command = self.galaxy_run_command(
"%s --stop-daemon" % self.extra_daemon_args)
if self._is_galaxy_running():
misc.run(stop_command)
if not self._is_galaxy_running():
log.debug("Galaxy not running; setting service state to SHUT_DOWN.")
self.state = service_states.SHUT_DOWN
self.last_state_change_time = datetime.utcnow()
# Move all log files
subprocess.call("bash -c 'for f in $GALAXY_HOME/{main,handler,manager,web}*.log; "
"do mv \"$f\" \"$f.%s\"; done'" % datetime.utcnow()
.strftime('%H_%M'), shell=True)
def multiple_processes(self):
"""
Check CloudMan's config if Galaxy should be setup to run in multiple
processes mode.
:rtype: bool
:return: ``True`` if Galaxy should be setup to use multiple processes,
``False`` otherwise.
"""
return self.app.config.multiple_processes
def galaxy_run_command(self, args):
"""
Compose the command used to manage Galaxy process.
This will source Galaxy's virtualenv and compose the run command
with provided `args`.
:type args: string
:param args: Arguments to feed to Galaxy's run command, for example:
`--daemon` or `--stop-daemon`.
"""
env_exports = "; ".join(["export %s='%s'" % (
key, value) for key, value in self.env_vars.iteritems()])
venv = "source $GALAXY_HOME/.venv/bin/activate"
run_command = '%s - galaxy -c "%s; %s; sh $GALAXY_HOME/run.sh %s"' % (
paths.P_SU, env_exports, venv, args)
return run_command
@delay
def status(self):
"""Set the status of the service based on the state of the app process."""
old_state = self.state
if self._is_galaxy_running():
self.state = service_states.RUNNING
elif (self.state == service_states.SHUTTING_DOWN or
self.state == service_states.SHUT_DOWN or
self.state == service_states.UNSTARTED or
self.state == service_states.WAITING_FOR_USER_ACTION):
pass
else:
if self.state == service_states.STARTING and \
(datetime.utcnow() - self.last_state_change_time).seconds < 200:
# Give Galaxy a minutes to start; otherwise, because
# the monitor is running as a separate thread, it often happens
# that the .pid file is not yet created after the Galaxy process
# has been started so the monitor thread erroneously reports
# as if starting the Galaxy process has failed.
pass
else:
log.error("Galaxy daemon not running.")
if self.remaining_start_attempts > 0:
log.debug("Remaining Galaxy start attempts: {0}; setting "
"svc state to UNSTARTED"
.format(self.remaining_start_attempts))
self.state = service_states.UNSTARTED
self.last_state_change_time = datetime.utcnow()
else:
log.debug("No remaining Galaxy start attempts; setting svc "
"state to ERROR")
self.state = service_states.ERROR
self.last_state_change_time = datetime.utcnow()
if old_state != self.state:
log.info("Galaxy service state changed from '%s' to '%s'" % (
old_state, self.state))
self.last_state_change_time = datetime.utcnow()
if self.state == service_states.RUNNING:
# Once the service gets running, reset the number of start attempts
self.remaining_start_attempts = NUM_START_ATTEMPTS
log.debug("Granting SELECT permission to galaxyftp user on "
"'galaxy' database")
misc.run('%s - postgres -c "%s -p %s galaxy -c \\\"GRANT SELECT ON galaxy_user TO galaxyftp\\\" "'
% (paths.P_SU, self.app.path_resolver.psql_cmd,
self.app.path_resolver.psql_db_port),
"Error granting SELECT grant to 'galaxyftp' user",
"Successfully added SELECT grant to 'galaxyftp' user")
# Force cluster configuration state update on status change
self.app.manager.console_monitor.store_cluster_config()
def _is_galaxy_running(self):
"""Check is Galaxy process is running and the UI is accessible."""
if self._check_daemon('galaxy'):
dns = "http://127.0.0.1:8080"
running_error_codes = [403] # Error codes under which Galaxy runs
try:
urllib2.urlopen(dns)
return True
except urllib2.HTTPError, e:
return e.code in running_error_codes
except:
return False
else:
log.debug("Galaxy UI does not seem to be accessible.")
return False
def update_galaxy_config(self):
"""
Update Galaxy application configuration.
Optionally set Galaxy to use multiple processes, then populate dynamic
options (i.e., arbitrary options coming from user data), adjust system
paths and set admin users.
"""
log.debug("Updating Galaxy config")
if self.multiple_processes():
populate_process_options(self.option_manager)
populate_dynamic_options(self.option_manager)
populate_galaxy_paths(self.option_manager)
populate_admin_users(self.option_manager)
def add_galaxy_admin_users(self, admins_list=[]):
"""
Set email addresses provided as Galaxy admin users.
:type admins_list: list
:param admins_list: A list of email addresses corresponding to
registered Galaxy users.
"""
self.option_manager.app.config.galaxy_admin_users = admins_list
|
import matplotlib.pyplot as plt
from sklearn.feature_selection import mutual_info_regression
import numpy as np
import pymc3 as pm
import scipy.stats as st
import pickle
import datetime
class SimulationMetrics:
"""
A method used to convert traces to something a data analyst can use to analyse
"""
def __init__(self, traces=[], path=""):
"""
Constructor for SimulationMetrics
Parameters:
----------
traces: List[Pymc3.trace]
- A list of traces returned from simulation
path: String
- The path to a stored SimulationMetrics
"""
if isinstance(traces, str):
path = traces
traces = []
if len(traces):
self.traces = traces
else:
self.traces = self.load_from_file(path)
self.I = []
def __str__(self):
"""
Overrides the __str__ str method to print traces
"""
return self.traces.__str__()
def load_from_file(self, location):
"""
A method which loads a pickled object as the trace
Returns:
----------------
- A "un"pickled object
Parameters:
----------------
Location: str
- The location of the file
"""
with open(location, "rb") as file:
return pickle.load(file)
def plot_mutual_bar(self, shift=0):
"""
A method used in case of multiple parameters
Is needed since the traces are than stored in a different sense
Parameters
------------------
Shift: int
- Since there are quite a large number of simulation a shift in the data can be needed
"""
start, stop = 0,10
executions = 5 #len(sm.traces)//2//10
fig, ax = plt.subplots(executions,2, figsize=(20,18))
for j in range(shift, shift+executions):
for p in range(2):
start = (p*10)+(j*20)
stop = ((p*10)+10)+(j*20)
traces = self.traces[start:stop]
Is = {}
labels = []
for i in range(10):
trace = traces[i][0]
name = traces[i][2][0]
alice = trace[f"intDist_{i}"]
output = trace[f"Output_{i}"]
I = mutual_info_regression([[a] for a in alice], output, discrete_features=True)[0]
if name[0] in Is:
Is[name[0]].append(I)
else:
labels.append(name[0])
Is[name[0]] = [I]
values = len(Is.keys())
vals = [max(v[1]) for v in Is.items()]
ax[j-shift][p].bar(labels, vals)
ax[j-shift][p].set_ylim(0,7)
ax[j-shift][p].set_title(f"Parameter {p} opposite was {self.traces[start+stop//2][2][1]}")
ax[j-shift][p].set_ylabel("$I(X;Y)$")
plt.tight_layout()
plt.show()
def plot_mutual_information(self, figsize=(16,8), as_bar=True):
"""
Plots the mutual information as a graph for each distribution
Parameters
--------------
figsize: Tuple<Int>
- The size of the images
as_bar: bool
- Determines if the distribution should be a dot plot or a bar plot
"""
I = self.mutual_information()
size = len(I) if len(I) > 1 else 2
plt.style.use('seaborn-darkgrid')
_, ax = plt.subplots(2, 2,figsize=figsize)
ylim = round(max(self.highest_leakage(head=1, verbose=0), key=lambda x: x[0])[0][0]+0.5)
for pos, (axs, values) in enumerate(zip(ax.flatten(), I)):
x = 0
items = values.items()
labels = []
best_vals = []
for k,v in items:
if as_bar:
best = max(v, key=lambda x: x[0])
best_vals.append(best[0])
else:
for value, info in v:
axs.plot([x], value, "x", label=str(info[1:]))
best_info = round(max(v, key=lambda x: x[0])[0],2)
axs.annotate(str(best_info), xy=(x, best_info), fontsize=16)
if str(k) == "TruncatedNormal":
labels.append("Truncated \n Normal")
else:
labels.append(str(k))
x+=1
if as_bar:
axs.bar([i for i in range(len(best_vals))], best_vals)
alice = {0: (0,100), 1: (0,300), 2: (0,10), 3: (0,1)}
pi_pos = "A_{\pi_" + str(pos+1) + "}"
title = f"$I(Y_{pos+1};{pi_pos})$ for parameter {pos+1} where ${pi_pos}$ ~ $U$ {alice[pos]}"
ylabel = f"$I(Y_{pos+1};{pi_pos})$"
axs.set_title(title, fontsize=16)
axs.set_ylabel(ylabel, fontsize=14)
axs.set_xlabel(f"Distributions", fontsize=14)
axs.set_xticks(range(len(labels)))
axs.set_xticklabels(labels, fontsize=14)
axs.set_ylim(0,ylim)
pos += 1
plt.tight_layout()
plt.show()
def highest_leakage(self, head=1, verbose=1):
"""
A method used to calculate the highest leakage grouped by each distribution
Returns
-----------
List[Tuple[Float, Tuple[String, ]]]
- Returns a list containing the mutual information next to the specific distribution
Parameters
-----------
head: Int
- Determines how many distributions are included
verbose: int
- Detmines if the distributions should be printed
"""
if not len(self.I):
self.mutual_information()
best_vals = []
for parameter_pos, l in enumerate(self.I):
best_dist = []
for k,v in l.items():
best = max(v, key=lambda x: x[0])
best_dist.append(best)
best_dist = list(sorted(best_dist, key=lambda x: x[0], reverse=True))
if verbose:
print(f"The distribution that had the most leakage was {best_dist[:head]} for parameter {parameter_pos}")
best_vals.append(best_dist[:head])
return best_vals
def save_to_file(self, location=""):
"""
Save the particular trace to a file with the format: Metrics-%Y-%m-%d-%H-%M-%S.priv
Parameter:
-----------
location: string
- The location in which the files should be saved
"""
date = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
with open(location+f"metrics-{date}.priv", "wb") as file:
pickle.dump(self.traces, file)
def mutual_information(self):
"""
Calculates the mutual information for each distribution and appends them to a global variable I
Returns
--------
List[float, List[String,]]
- A list of the mutual information paired with its respective distribution
"""
_, names, _ = self.traces[0]
size = len(names)
mutual_information = [{} for _ in range(size)]
for i in range(size):
for trace, names, info in self.traces:
discrete = ("int" in str(names[i]))
alice = trace[names[i]]
try:
output = trace["Output"]
except:
pos = int(str(names[i]).split("_")[-1])
if pos < 10:
output = trace[f"Output_{pos}"]
else:
continue
I_ao = mutual_info_regression([[j] for j in alice], output, discrete_features=discrete)[0]
while (len(info) == 1):
info = info[0] # Used to unwrap the inner information in case of subtypes such as List[List[Tuple[...]]]
if isinstance(info, tuple) or (isinstance(info, list) and isinstance(info[0], list)):
info = info[i]
if info[0] in mutual_information[i]:
mutual_information[i][info[0]].append((I_ao,info))
else:
mutual_information[i][info[0]] = [(I_ao, info)]
self.I = mutual_information
return mutual_information |
import json
from Name import NameDefinition, Gender, Region, NameSource
class TestNames:
def test_meaning_none(self):
name = NameDefinition(name="name1", gender=Gender.boy, meaning=None)
assert len(name.meanings) is 0
def test_meaning_merge_same_origin(self):
name = NameDefinition(name="name1", gender=Gender.boy, meaning="meaning1", origin=Region.United_States)
name.add_meaning(meaning="meaning2", origin=Region.United_States)
assert len(name.meanings) is 2
assert name.meanings[0].meaning == "meaning1"
assert name.meanings[0].origins == [Region.United_States]
assert name.meanings[1].meaning == "meaning2"
assert name.meanings[1].origins == [Region.United_States]
def test_meaning_merge_same_meaning_different_origins(self):
name = NameDefinition(name="name1", gender=Gender.boy, meaning="meaning1", origin=Region.United_States)
name.add_meaning(meaning="meaning1", origin=Region.United_Kingdom)
assert len(name.meanings) is 1
assert name.meanings[0].meaning == "meaning1"
assert name.meanings[0].origins == [Region.United_States, Region.United_Kingdom]
def test_gender_boy_to_unisex(self):
name = NameDefinition(name="name1", gender=Gender.boy)
assert name.gender == Gender.boy
name.append_attrs(gender=Gender.girl)
assert name.gender == Gender.unisex
def test_gender_girl_to_unisex(self):
name = NameDefinition(name="name1", gender=Gender.girl)
assert name.gender == Gender.girl
name.append_attrs(gender=Gender.boy)
assert name.gender == Gender.unisex
def test_gender_unisex_to_girl_still_unisex(self):
name = NameDefinition(name="name1", gender=Gender.unisex)
assert name.gender == Gender.unisex
name.append_attrs(gender=Gender.girl)
assert name.gender == Gender.unisex
def test_gender_unisex_to_boy_still_unisex(self):
name = NameDefinition(name="name1", gender=Gender.unisex)
assert name.gender == Gender.unisex
name.append_attrs(gender=Gender.boy)
assert name.gender == Gender.unisex
def test_name_to_dict(self):
name = NameDefinition(name="name1", gender=Gender.boy, meaning="meaning1", origin=Region.United_States)
name_dict = name.to_dict()
name_json = json.dumps(name_dict)
assert name_json == '{"name": "name1", "gender": "boy", "meanings": [{"meaning": "meaning1", "origins": ["United States"]}], "known_persons": "", "source": []}'
def test_get_all_origins_same_meaning(self):
name = NameDefinition(name="name1", gender=Gender.boy, meaning="meaning1", origin=Region.United_States)
name.add_meaning("meaning1", origin=Region.United_Kingdom)
expected_meanings = [Region.United_States, Region.United_Kingdom]
expected_meanings.sort()
assert name.get_all_origins() == expected_meanings
def test_get_all_origins_different_meanings(self):
name = NameDefinition(name="name1", gender=Gender.boy, meaning="meaning1", origin=Region.United_States)
name.add_meaning("meaning1", origin=Region.United_Kingdom)
name.add_meaning("meaning2", origin=Region.India)
name.add_meaning("meaning2", origin=Region.Tamil)
expected_meanings = [Region.United_Kingdom, Region.United_States, Region.India, Region.Tamil]
expected_meanings.sort()
assert name.get_all_origins() == expected_meanings
def test_get_all_meanings_same_meaning(self):
name = NameDefinition(name="name1", gender=Gender.boy, meaning="meaning1", origin=Region.United_States)
name.add_meaning("meaning1", origin=Region.United_Kingdom)
assert name.get_all_meanings() == ["meaning1"]
def test_get_all_meanings_different_meanings(self):
name = NameDefinition(name="name1", gender=Gender.boy, meaning="meaning1", origin=Region.United_States)
name.add_meaning("meaning1", origin=Region.United_Kingdom)
name.add_meaning("meaning2", origin=Region.India)
name.add_meaning("meaning2", origin=Region.Tamil)
assert name.get_all_meanings() == ["meaning1", "meaning2"]
def test_append_source(self):
name = NameDefinition(name="name1", gender=Gender.boy, source=NameSource.ssa)
name.append_attrs(source=NameSource.pantheon)
assert len(name.sources) is 2
assert name.sources == {NameSource.ssa, NameSource.pantheon}
def test_merge_source(self):
name1 = NameDefinition(name="name1", gender=Gender.boy, source=NameSource.ssa)
name2 = NameDefinition(name="name1", gender=Gender.boy, source=NameSource.pantheon)
name1.merge_name(name2)
assert len(name1.sources) is 2
assert name1.sources == {NameSource.ssa, NameSource.pantheon}
|
# ============================================================================
# 付録 A 設置する暖房設備機器又は放熱器の種類に応じた暖房方式及び運転方法の決定方法
# 並びに評価上想定される暖房設備機器又は放熱器の種類
# ============================================================================
# ============================================================================
# A.1 設置する暖房設備機器又は放熱器の種類に応じた暖房方式及び運転方法の決定方法
# ============================================================================
def calc_heating_mode(region, H_A=None, H_MR=None, H_OR=None):
"""暖房方式及び運転方法の区分を取得する
Args:
region(int): 省エネルギー地域区分
H_A(dict, optional): 暖房方式 (Default value = None)
H_MR(dict, optional): 主たる居室の暖房機器の仕様 (Default value = None)
H_OR(dict, optional): その他の居室の暖房機器の仕様 (Default value = None)
Returns:
tuple(str, str): 暖房方式及び運転方法の区分
"""
if H_A is None:
if H_MR is None and H_OR is None:
return None, None
default_spec = get_default_heating_spec(region)
if H_MR['type'] in ['設置しない', 'その他']:
H_MR_type = default_spec[0]['type']
else:
H_MR_type = H_MR['type']
if H_OR is not None:
if H_OR['type'] in ['設置しない', 'その他']:
H_OR_type = default_spec[1]['type']
else:
H_OR_type = H_OR['type']
y = get_index_of_table_a_1(H_MR_type)
def to_roha(s):
"""
Args:
s:
Returns:
"""
if type(s) is tuple:
return (to_roha(s[0]), to_roha(s[1]))
else:
if s == '連続':
return 'ろ'
elif s == '間歇':
return 'は'
else:
raise ValueError(s)
if H_OR is not None:
x = get_index_of_table_a_1(H_OR_type)
return to_roha(get_table_a_1_a()[y][x])
else:
tmp = get_table_a_1_b()[y]
if type(tmp) is tuple:
if region in [1, 2]:
return to_roha(tmp[0]), None
elif region in [3, 4, 5, 6, 7]:
return to_roha(tmp[1]), None
else:
raise ValueError(region)
else:
return to_roha(tmp), None
if H_A['type'] == 'ダクト式セントラル空調機':
# 住宅全体を連続的に暖房する方式
return 'い'
else:
raise ValueError(H_A['type'])
def get_index_of_table_a_1(type):
"""表A.1における行番号を取得する
Args:
type(str): 主たる居室に設置する暖冷房設備機器等
Returns:
int: 表A.1における行番号
"""
key_table = {
'電気蓄熱暖房器': 0,
'温水暖房用パネルラジエーター': 1,
'温水暖房用床暖房': 2,
'温水暖房用ファンコンベクター': 3,
'ルームエアコンディショナー': 4,
'FF暖房機': 5,
'電気ヒーター床暖房': 6,
'ルームエアコンディショナー付温水床暖房機': 7
}
return key_table[type]
def get_table_a_1_a():
"""表 A.1(a) 主たる居室及びその他の居室の運転方法(その他の居室がある場合)
Args:
Returns:
list: 表 A.1(a) 主たる居室及びその他の居室の運転方法(その他の居室がある場合)
"""
table_a_1_a = [
# 電気蓄熱暖房器
(
('連続', '連続'),
('連続', '連続'),
('連続', '連続'),
('連続', '間歇'),
('連続', '間歇'),
('連続', '間歇'),
('連続', '間歇'),
('連続', '間歇'),
),
# パネルラジエータ―
(
('連続', '連続'),
('連続', '連続'),
('連続', '連続'),
('連続', '間歇'),
('連続', '間歇'),
('連続', '間歇'),
('連続', '間歇'),
('連続', '間歇'),
),
# 温水床暖房
(
('連続', '連続'),
('連続', '連続'),
('連続', '連続'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
),
# ファンコンベクター
(
('間歇', '連続'),
('間歇', '連続'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
),
# ルームエアコンディショナー
(
('間歇', '連続'),
('間歇', '連続'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
),
# FF暖房機
(
('間歇', '連続'),
('間歇', '連続'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
),
# 電気ヒーター床暖房
(
('間歇', '連続'),
('間歇', '連続'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
),
# ルームエアコンディショナー付温水床暖房
(
('間歇', '連続'),
('間歇', '連続'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
('間歇', '間歇'),
)
]
return table_a_1_a
def get_table_a_1_b():
"""表 A.1(b) 主たる居室の運転方法(その他の居室がない場合)
Args:
Returns:
list: 表 A.1(b) 主たる居室の運転方法(その他の居室がない場合)
"""
table_a_1_b = [
'連続',
'連続',
('連続', '間歇'),
'間歇',
'間歇',
'間歇',
'間歇',
'間歇',
]
return table_a_1_b
def get_default_heating_spec(region):
"""暖房設備機器等が設置されない場合の評価上想定される暖房設備機器等の種(表A.3)を取得する
Args:
region(int): 省エネルギー地域区分
Returns:
tuple(dict, dict, dict): 暖房設備機器等が設置されない場合の評価上想定される暖房設備機器等の種
"""
def to_spec(type):
"""
Args:
type(str):
Returns:
"""
if type == '温水暖房用パネルラジエーター':
return {
'type': type
}
elif type == 'FF暖房機':
return {
'type': type,
'e_rtd': 0.86
}
elif type == 'ルームエアコンディショナー':
return {
'type': type,
'e_class': 'ろ',
'dualcompressor': False
}
else:
raise ValueError(type)
if region != 8:
type_list = get_table_a_5()[region - 1]
if region in [1, 2]:
hw_spec = {
'type': '石油従来型温水暖房機',
'e_rtd': 0.83
}
return to_spec(type_list[0]), to_spec(type_list[1]), hw_spec
else:
return to_spec(type_list[0]), to_spec(type_list[1]), None
else:
return None, None, None
def get_default_heatsource(region):
"""温水暖房用熱源機を設置しない場合又はその他の温水暖房機を設置する場合に想定する温水暖房用熱源機の仕様を取得する
Args:
region(int): 省エネルギー地域区分
Returns:
dict: 温水暖房用熱源機を設置しない場合又はその他の温水暖房機を設置する場合に想定する温水暖房用熱源機の仕様
"""
if region == 8:
return None
hs_type = get_table_a_6()[region - 1]
e_rtd_table = {
'石油従来型温水暖房機': 0.830,
'ガス従来型温水暖房機': 0.825
}
e_rtd = e_rtd_table[hs_type]
return {
'type': hs_type,
'e_rtd_hs': e_rtd,
# 配管を設置しない場合においては、配管の断熱措置を「断熱被覆のないもの」として評価
'pipe_insulation': False,
'underfloor_pipe_insulation': False
}
def get_table_a_5():
"""表 A.5 主たる居室若しくはその他の居室に暖房設備機器等を設置しない場合又は表 A.1 に掲げる 暖房設備機器等以外の暖房設備機器等を設置する場合の評価において想定する暖房設備機器等
Args:
Returns:
表 A.5 主たる居室若しくはその他の居室に暖房設備機器等を設置しない場合又は表 A.1 に掲げる 暖房設備機器等以外の暖房設備機器等を設置する場合の評価において想定する暖房設備機器等
"""
table_a_5 = [
('温水暖房用パネルラジエーター', '温水暖房用パネルラジエーター'),
('温水暖房用パネルラジエーター', '温水暖房用パネルラジエーター'),
('FF暖房機', 'FF暖房機'),
('FF暖房機', 'FF暖房機'),
('ルームエアコンディショナー', 'ルームエアコンディショナー'),
('ルームエアコンディショナー', 'ルームエアコンディショナー'),
('ルームエアコンディショナー', 'ルームエアコンディショナー'),
]
return table_a_5
def get_table_a_6():
"""表 A.6 温水暖房用熱源機を設置しない又はその他の温水暖房用熱源機を設置する場合の評価において想定する温水暖房用熱源機
Args:
Returns:
list: 表 A.6 温水暖房用熱源機を設置しない又はその他の温水暖房用熱源機を設置する場合の評価において想定する温水暖房用熱源機
"""
table_a_6 = [
'石油従来型温水暖房機',
'石油従来型温水暖房機',
'石油従来型温水暖房機',
'石油従来型温水暖房機',
'ガス従来型温水暖房機',
'ガス従来型温水暖房機',
'ガス従来型温水暖房機',
]
return table_a_6
|
from django.apps import AppConfig
class SitespaceConfig(AppConfig):
name = 'SiteSpace'
|
StudentofFRI = ["Anton", "Budi", "Doni", "Huda"]
print("List of Student = ")
print(StudentofFRI[0])
print(StudentofFRI[1])
print(StudentofFRI[2])
print(StudentofFRI[3]) |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import allreducer as ar
import torch
import torch.nn as nn
import time
from mpi4py import MPI
from compression import NoneCompressor
from settings import logger
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue
else:
import queue as Queue
class _DistributedOptimizer(torch.optim.Optimizer):
def __init__(self, params, named_parameters, compressor, is_sparse=True, err_handler=None, layerwise_times=None, sigma_scale=2.5, density=0.01, norm_clip=None, writer=None):
super(self.__class__, self).__init__(params)
self._compressor= compressor
self._sparse = is_sparse
self._layerwise_times = layerwise_times
self._msg_queue = Queue.Queue()
self._msg_queue2 = Queue.Queue()
if named_parameters is not None:
named_parameters = list(named_parameters)
else:
named_parameters = []
# make sure that named_parameters are tuples
if any([not isinstance(p, tuple) for p in named_parameters]):
raise ValueError('named_parameters should be a sequence of '
'tuples (name, parameter), usually produced by '
'model.named_parameters().')
if len(named_parameters) > 0:
self._parameter_names = {v: k for k, v
in sorted(named_parameters)}
else:
self._parameter_names = {v: 'allreduce.noname.%s' % i
for param_group in self.param_groups
for i, v in enumerate(param_group['params'])}
self._handles = {}
self._grad_accs = []
self._requires_update = set()
self._register_hooks()
self._lock = threading.Lock()
self._key_lock = threading.Lock()
self.momentum_correction = False
self._allreducer = ar.AllReducer(named_parameters, self._lock, self._key_lock, compressor, sparse=self._sparse, err_callback=err_handler, layerwise_times=layerwise_times, sigma_scale=sigma_scale, density=density, norm_clip=norm_clip, msg_queue=self._msg_queue, msg_queue2=self._msg_queue2, writer=writer)
self.allreducer_thread = threading.Thread(name='allreducer', target=self._allreducer.run)
self.allreducer_thread.start()
self.local = False
self._synced = False
def _register_hooks(self):
for param_group in self.param_groups:
for p in param_group['params']:
if p.requires_grad:
p.grad = p.data.new(p.size()).zero_()
self._requires_update.add(p)
p_tmp = p.expand_as(p)
grad_acc = p_tmp.grad_fn.next_functions[0][0]
grad_acc.register_hook(self._make_hook(p))
self._grad_accs.append(grad_acc)
def _make_hook(self, p):
def hook(*ignore):
assert p not in self._handles
assert not p.grad.requires_grad
if not self.local:
name = self._parameter_names.get(p)
d_p = p.grad.data
if self.momentum_correction:
param_state = self.state[p]
momentum = 0.9
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p)
d_p = buf
self._handles[p] = self._allreducer.add_tensor(name, d_p)
torch.cuda.synchronize()
#if rank() == 0:
# logger.info('-->pushed time [%s]: %s, norm: %f', name, time.time(), p.grad.data.norm())
self._msg_queue.put(name)
return hook
def synchronize(self):
if not self._synced:
self._msg_queue2.get() # wait for allreducer
self._synced = True
for p, value in self._handles.items():
output = self._allreducer.get_result(value)
p.grad.data.set_(output.data)
self._handles.clear()
def _step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
name = self._parameter_names.get(p)
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#if name.find('bias') >= 0 or name.find('bn') >= 0:
# print('batch norm or bias detected, continue, %s' % name)
if momentum != 0 and not self.momentum_correction:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
def _step_with_mc(self, closure=None):
"""Performs a single optimization step with momemtum correction.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
name = self._parameter_names.get(p)
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
def step(self, closure=None):
if not self.local:
self.synchronize()
ret = self._step(closure)
self._synced = False
return ret
def stop(self):
self._allreducer.stop()
self._msg_queue.put('STOP')
def add_train_epoch(self):
self._allreducer.train_epoch += 1
def get_current_density(self):
return self._allreducer.get_current_density()
def DistributedOptimizer(optimizer, named_parameters=None, compression=NoneCompressor, is_sparse=False, err_handler=None, layerwise_times=None, sigma_scale=2.5, density=0.1, norm_clip=None, writer=None):
cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
dict(_DistributedOptimizer.__dict__))
return cls(optimizer.param_groups, named_parameters, compression, is_sparse, err_handler, layerwise_times, sigma_scale=sigma_scale, density=density, norm_clip=norm_clip, writer=writer)
def rank():
return MPI.COMM_WORLD.rank
def size(self):
return MPI.COMM_WORLD.size
|
#! /usr/bin/env python
## RFC5246 Data definition language parser
## Rich Salz, [email protected], June 2013.
## Copyright 2013-2015, Rich Salz
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import os, StringIO, subprocess, sys
tests = (
(
"uint8 f; uint8 f;\n",
"INFILE:1: error near ``;'': Duplicate symbol ``f'' found\n"
),
(
"",
"INFILE:1: error near ``'': syntax error, unexpected $end\n"
),
(
"uint16 f<10..1>;\n",
"INFILE:1: error near ``;'': ``f'' range 10 less than 1\n"
),
(
"uint16 f[0];\n",
"INFILE:1: error near ``;'': ``f'' size (0) is not positive\n"
),
(
"uint16 f[2^33-1];\n",
"INFILE:1: error near ``2^33-1'': Exponent out of range\n"
"INFILE:1: error near ``;'': ``f'' size (0) is not positive\n"
),
(
"uint16 f[2^3-1];\n",
"INFILE:1: error near ``2^3-1'': Bad exponent\n"
"INFILE:1: error near ``;'': ``f'' size (0) is not positive\n"
),
(
"uint16 f[2^3-2];\n",
"INFILE:1: error near ``2^3-2'': Bad exponent\n"
"INFILE:1: error near ``;'': ``f'' size (0) is not positive\n"
),
(
"enum { a, a } b;",
"INFILE:1: error near ``a'': Duplicate ``a'' in enum\n"
),
(
"enum { c(1), a(1..3) } b;",
"INFILE:1: error near ``)'': Enum range for ``a'' for non-reserved name\n"
),
(
"enum { c(1), reserved(1..3) } b;",
""
),
(
"enum { a(1), b } c;",
"INFILE:1: error near ``}'': syntax error, unexpected '}', expecting '('\n"
),
(
"enum { a(1), b, } c;",
"INFILE:1: error near ``,'': syntax error, unexpected ',', expecting '('\n"
),
(
"enum { a(20), b(20), (12) } d; ",
"INFILE:1: error near ``;'': Value for ``a'' is too big (20 > 12)\n"
"INFILE:1: error near ``;'': Enum ``b'' duplicates value 20\n"
"INFILE:1: error near ``;'': Value for ``b'' is too big (20 > 12)\n"
),
(
'''extern f;
uint32 g[f];
uint32 gg[ff];''',
"INFILE:3: error near ``;'': Unknown size reference ``ff''\n"
),
(
"select (f) { case a: ; } f;",
"INFILE:1: error near ``{'': Unknown variant selector ``f''\n"
),
(
'''extern f;
select (f) { case a: ; } g;''',
""
),
(
'''enum { true, false } bool;
select (bool) {
case a: case a: ;
} g;''',
"INFILE:3: error near ``:'': Duplicate case ``a''\n"
),
(
'''extern f; extern extensions;
select (f) {
case a: case b: case c: digitally-signed extensions;
case d: ;
} s;''',
""
),
(
'''extern f; extern extensions;
select (f) {
case a: case b: case c: digitally-signed extensions;
case a: ;
} s;''',
"INFILE:5: error near ``;'': Duplicate case ``a'' in ``s''\n"
),
(
'''extern f; extern extensions;
select (f) {
case a: case b: case b: digitally-signed extensions;
} s;''',
"INFILE:3: error near ``:'': Duplicate case ``b''\n"
),
(
'''extern f; extern extensions;
select (f) {
case a: case b: case c: digitally-signed extensions ; uint8 spacer;
} s;''',
"INFILE:3: error near ``uint8'': syntax error, unexpected tUINT8, expecting tCASE or '}'\n"
),
(
'''extern f; extern extensions; extern d;
select (f) {
case a: case b: case c:
digitally-signed extensions;
d e;
} s;''',
""
),
(
'''extern f; extern extensions;
select (f) {
case a: digitally-signed extensions ;
case b: f;
} s;''',
""
),
(
'''extern f; extern extensions;
select (f) {
case a: digitally-signed extensions ;
case b: f field;
} s;''',
""
),
(
'''extern f; extern extensions;
select (f) {
case c: f2 field;
} s;''',
"INFILE:4: error near ``}'': Unknown member type ``f2''\n"
),
(
"uint8 f; extern f;",
""
),
(
"extern f; uint8 f;",
""
),
(
"struct { uint8 f; uint8 f; } s;",
"INFILE:1: error near ``;'': Duplicate item ``f'' in ``s''\n"
),
(
'''extern ZZ;
struct { uint8 f; ZZ g; } s;''',
""
),
(
"struct { uint8 f; mytype g; } s;",
"INFILE:1: error near ``;'': Unknown member type ``mytype''\n"
),
(
'''struct {
uint8 size;
opaque g[s.size];
} s;''',
"INFILE:3: error near ``;'': Unknown size reference ``s.size''\n"
"INFILE:3: error near ``;'': Note: cannot resolve dotted items.\n"
),
(
'''struct {
uint8 size;
} s;
opaque g[s.size];
opaque h[s.size];''',
"INFILE:4: error near ``;'': Unknown size reference ``s.size''\n"
"INFILE:4: error near ``;'': Note: cannot resolve dotted items.\n"
"INFILE:5: error near ``;'': Unknown size reference ``s.size''\n"
),
(
"struct { } empty;",
""
),
(
'''// comment
uint8 foo;
struct {foo bar;} baz;''',
""
),
(
"uint8 f; /* comment",
"INFILE:1: error near ``'': EOF in comment\n"
),
(
"uint8 f; // /* comment",
""
),
(
'''extern Extension; extern extensions_present;
uint8 ProtocolVersion; opaque Random[12]; opaque SessionID[2^8-1];
struct {
ProtocolVersion client_version;
Random random;
SessionID session_id;
select (extensions_present) {
case false:
;
case true:
Extension extensions<0..2^16-1>;
};
} ClientHello;''',
""
),
(
'''uint8 f; struct {} empty;
select (f) {
case false:
select (f) {
case false: ;
}
}''',
"INFILE:4: error near ``select'': syntax error, unexpected tSELECT\n"
),
(
'''struct {
struct {
uint16 x[2];
} n;
uint32 x;
} b;''',
""
),
(
'''struct { uint8 f[12]; } x;
struct {
struct {
x nested[2];
} n;
uint32 x;
} b;''',
""
),
(
'''struct {
struct {
foo nested[2];
} n;
uint32 x;
} b;''',
"INFILE:4: error near ``;'': Unknown member type ``foo''\n"
),
(
'''struct {
select (b.x) {
case foo: ;
} n;
uint32 x;
} b;''',
"INFILE:2: error near ``{'': Unknown variant selector ``b.x''\n"
"INFILE:2: error near ``{'': Note: cannot resolve dotted items.\n"
),
)
infile = "test-in"
outfile = "test-out"
passed = 0
failed = 0
argv = ( "./parser", infile )
for test,expected in tests:
open(infile, "w").write(test)
subprocess.Popen(argv, stdout=open(outfile, "w")).wait()
results = open(outfile).read()
expected = expected.replace("INFILE", infile)
if results == expected:
passed += 1
else:
failed += 1
print "---\nFail"
print " - Test", passed + failed + 1, "\n", test, "\n"
print " - Expected\n", expected, "\n"
print " - Got\n", results, "\n"
print "---"
os.unlink(infile)
os.unlink(outfile)
print "passed", passed, "failed", failed, "total", passed + failed
sys.exit(failed)
|
from flask import Flask
from flask_bcrypt import Bcrypt
from modules.secrets import secrets
app = Flask('bookmark')
app.secret_key = secrets.bookmark_secret
bcrypt = Bcrypt(app) |
from .list_builder import ListBuilder, ListRootBuilder
from moff.node import OrderedListNode, ListItemNode, node
class OrderedListRootBuilder (ListRootBuilder):
pass
class OrderedListBuilder (ListBuilder):
# override
def build_node(self):
lnode = OrderedListNode()
for anyone in self.get_collection():
inode = ListItemNode()
inode.add_node(node(anyone))
lnode.add_node(inode)
return lnode
|
# -*- coding: utf-8 -*-
"""
Enelvo
~~~~~~~~~~~~~~~~~~~
A flexible normalizer for user-generated content.
:copyright: (c) 2017-2019 by Thales Bertaglia
:licence: MIT, see LICENSE for more details
"""
import logging
try:
from importlib.metadata import version, PackageNotFoundError # type: ignore
except ImportError: # pragma: no cover
from importlib_metadata import version, PackageNotFoundError # type: ignore
try:
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
__prog__ = "enelvo"
__title__ = "Enelvo"
__summary__ = "A flexible normaliser for user-generated content."
__uri__ = "https://www.github.com/tfcbertaglia/enelvo"
__author__ = "Thales Bertaglia"
__email__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "Copyright 2017-2020 Thales Bertaglia"
# the user should dictate what happens when a logging event occurs
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
# Generated by Django 2.2.1 on 2019-07-03 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Alumno', '0004_alumno_token'),
]
operations = [
migrations.AlterField(
model_name='alumno',
name='token',
field=models.IntegerField(max_length=254, null=True),
),
]
|
from piston.commands.file import run_file # noqa: F401
from piston.commands.input import user_input # noqa: F401
from piston.commands.link import run_link # noqa: F401
from piston.commands.shell import Shell # noqa: F401
from piston.commands.theme_list import theme_list # noqa: F401
|
from sleap import Labels, Instance
from typing import List, Tuple
import numpy as np
def get_stable_node_pairs(
all_points_arrays: np.ndarray, node_names, min_dist: float = 0.0
):
"""Returns sorted list of node pairs with mean and standard dev distance."""
# Calculate distance from each point to each other point within each instance
intra_points = (
all_points_arrays[:, :, np.newaxis, :] - all_points_arrays[:, np.newaxis, :, :]
)
intra_dist = np.linalg.norm(intra_points, axis=-1)
# Find mean and standard deviation for distances between each pair of nodes
inter_std = np.nanstd(intra_dist, axis=0)
inter_mean = np.nanmean(intra_dist, axis=0)
# Clear pairs with too small mean distance
inter_std[inter_mean <= min_dist] = np.nan
# Ravel so that we can sort along single dimension
flat_inter_std = np.ravel(inter_std)
flat_inter_mean = np.ravel(inter_mean)
# Get indices for sort by standard deviation (asc)
sorted_flat_inds = np.argsort(flat_inter_std)
sorted_inds = np.stack(np.unravel_index(sorted_flat_inds, inter_std.shape), axis=1)
# Take every other, since we'll get A->B and B->A for each pair
sorted_inds = sorted_inds[::2]
sorted_flat_inds = sorted_flat_inds[::2]
# print(all_points_arrays.shape)
# print(intra_points.shape)
# print(intra_dist.shape)
# print(inter_std.shape)
# print(sorted_inds.shape)
# Make sorted list of data to return
results = []
for inds, flat_idx in zip(sorted_inds, sorted_flat_inds):
node_a, node_b = inds
std, mean = flat_inter_std[flat_idx], flat_inter_mean[flat_idx]
if mean <= min_dist:
break
results.append(dict(node_a=node_a, node_b=node_b, std=std, mean=mean))
return results
def get_most_stable_node_pair(
all_points_arrays: np.ndarray, min_dist: float = 0.0
) -> Tuple[int, int]:
"""Returns pair of nodes which are at stable distance (over min threshold)."""
all_pairs = get_stable_node_pairs(all_points_arrays, min_dist)
return all_pairs[0]["node_a"], all_pairs[0]["node_b"]
def align_instances(
all_points_arrays: np.ndarray,
node_a: int,
node_b: int,
rotate_on_node_a: bool = False,
) -> np.ndarray:
"""Rotates every instance so that line from node_a to node_b aligns."""
# For each instance, calculate the angle between nodes A and B
node_to_node_lines = (
all_points_arrays[:, node_a, :] - all_points_arrays[:, node_b, :]
)
theta = np.arctan2(node_to_node_lines[:, 1], node_to_node_lines[:, 0])
# Make rotation matrix for each instance based on this angle
R = np.ndarray((len(theta), 2, 2))
c, s = np.cos(theta), np.sin(theta)
R[:, 0, 0] = c
R[:, 1, 1] = c
R[:, 0, 1] = -s
R[:, 1, 0] = s
# Rotate each instance by taking dot product with its corresponding rotation
rotated = np.einsum("aij,ajk->aik", all_points_arrays, R)
if rotate_on_node_a:
# Shift so that rotation is "around" node A
node_a_pos = points[:, node_a, :][:, np.newaxis, :]
else:
# Shift so node A is at fixed position for every instance
node_a_pos = rotated[:, node_a, :][:, np.newaxis, :]
# Do the shift
rotated -= node_a_pos
return rotated
def align_instances_on_most_stable(
all_points_arrays: np.ndarray, min_stable_dist: float = 4.0
) -> np.ndarray:
"""
Gets most stable pair of nodes and aligned instances along these nodes.
"""
node_a, node_b = get_most_stable_node_pair(
all_points_arrays, min_dist=min_stable_dist
)
aligned = align_instances(all_points_arrays, node_a, node_b, rotate_on_node_a=False)
return aligned
def get_mean_and_std_for_points(
aligned_points_arrays: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Returns mean and standard deviation for every node given aligned points.
"""
mean = np.nanmean(aligned_points_arrays, axis=0)
stdev = np.nanstd(aligned_points_arrays, axis=0)
return mean, stdev
def make_mean_instance(
aligned_points_arrays: List[np.ndarray], std_thresh: int = 0
) -> Instance:
mean, stdev = get_mean_and_std_for_points(aligned_points_arrays)
# Remove points with standard deviation higher than threshold
if std_thresh:
mean[stdev > std_thresh] = np.nan
from sleap import Instance
from sleap.instance import Point
OFFSET = 0 # FIXME
new_instance = Instance(
skeleton=labels.skeletons[0],
points=[Point(p[0] + OFFSET, p[1] + OFFSET) for p in mean],
)
return new_instance
def align_instance_points(source_points_array, target_points_array):
"""Transforms source for best fit on to target."""
# Find (furthest) pair of points in target to use for alignment
pairwise_distances = np.linalg.norm(
target_points_array[:, np.newaxis, :] - target_points_array[np.newaxis, :, :],
axis=-1,
)
node_a, node_b = np.unravel_index(
np.nanargmax(pairwise_distances), shape=pairwise_distances.shape
)
# Align source to target
source_line = source_points_array[node_a] - source_points_array[node_b]
target_line = target_points_array[node_a] - target_points_array[node_b]
source_theta = np.arctan2(source_line[1], source_line[0])
target_theta = np.arctan2(target_line[1], target_line[0])
rotation_theta = source_theta - target_theta
c, s = np.cos(rotation_theta), np.sin(rotation_theta)
R = np.array([[c, -s], [s, c]])
rotated = source_points_array.dot(R)
# Shift source to minimize total point different from target
target_row_mask = ~np.isnan(target_points_array)[:, 0]
shift = np.mean(
rotated[target_row_mask] - target_points_array[target_row_mask], axis=0
)
rotated -= shift
return rotated
def get_instances_points(instances: List[Instance]) -> np.ndarray:
"""Returns single (instance, node, 2) matrix with points for all instances."""
return np.stack([inst.points_array for inst in instances])
def get_template_points_array(instances: List[Instance]) -> np.ndarray:
"""Returns mean of aligned points for instances."""
points = get_instances_points(instances)
node_a, node_b = get_most_stable_node_pair(points, min_dist=4.0)
aligned = align_instances(points, node_a=node_a, node_b=node_b)
points_mean, points_std = get_mean_and_std_for_points(aligned)
return points_mean
if __name__ == "__main__":
# filename = "tests/data/json_format_v2/centered_pair_predictions.json"
# filename = "/Volumes/fileset-mmurthy/shruthi/code/sleap_expts/preds/screen_all.5pts_tmp_augment_200122/191210_102108_18159112_rig3_2.preds.h5"
filename = "/Volumes/fileset-mmurthy/talmo/wt_gold_labeling/100919.sleap_wt_gold.13pt_init.n=288.junyu.h5"
labels = Labels.load_file(filename)
points = get_instances_points(labels.instances())
get_stable_node_pairs(points, np.array(labels.skeletons[0].node_names))
# import time
#
# t0 = time.time()
# labels.add_instance(
# frame=labels.find_first(video=labels.videos[0]),
# instance=make_mean_instance(align_instances(points, 12, 0))
# )
# print(labels.find_first(video=labels.videos[0]))
# print("time", time.time() - t0)
#
# Labels.save_file(labels, "mean.h5")
# R = np.array(((c, -s), (s, c)))
# a_rotated = a.dot(R)
# a_rotated += a[0] - a_rotated[0]
|
"""
desinging optimum quantizers for different probability distributions.
"""
import itertools
import numpy as np
import scipy.stats as stat
class OptimumQuantizer:
def __init__(self):
self._model = None
self._valid_levels = None
self._quantizer_bins = None
self._quantizer_centers = None
def initialize_quantizer(self, model, num_levels=(2, 4, 8), sparsity_thr=1e-4, x=None):
self._model = model
self._valid_levels = np.array(num_levels)
self._quantizer_bins = [None] * len(self._valid_levels)
self._quantizer_centers = [None] * len(self._valid_levels)
if model == 'normal' or model == 'n':
self._initialize_normal_quantizer()
elif model == 'sparse-normal' or model == 'sn':
self._initialize_sparse_normal_quantizer(sparsity_thr)
elif model == 'folded-normal' or model == 'fn':
self._initialize_folded_normal_quantizer()
elif model == 'sparse-folded-normal' or model == 'sfn':
self._initialize_sparse_folded_normal_quantizer(sparsity_thr)
elif model == 'uniform' or model == 'u':
self._initialize_uniform_quantizer()
elif model == 'sparse-uniform' or model == 'su':
self._initialize_sparse_uniform_quantizer(sparsity_thr)
elif model == 'empirical' or model == 'e':
self._initialize_empirical_quantizer(x)
else:
raise ValueError('Unknown data distribution model!')
def quantize(self, x, num_levels):
if num_levels not in self._valid_levels:
raise ValueError('Quantizer for the given number of levels has not been initialized.')
q_idx = np.where(self._valid_levels == num_levels)[0][0]
q = np.digitize(x, self._quantizer_bins[q_idx])
return q, self._quantizer_centers[q_idx]
def dequantize(self, q, num_levels):
if num_levels not in self._valid_levels:
raise ValueError('Quantizer for the given number of levels has not been initialized.')
q_idx = np.where(self._valid_levels == num_levels)[0][0]
x = self._quantizer_centers[q_idx][q]
return x
# =========================================================================
# using Lloyd-Max algorithm, find the optimum quantizer for different distributions
def _initialize_normal_quantizer(self):
s = np.sqrt(2*np.pi)
max_iterations = 1000
for n, num_levels in enumerate(self._valid_levels):
# initialize quantizer's thresholds and centers
bins = np.linspace(-1, 1, num_levels + 1)
centers = (bins[1:] + bins[:-1]) / 2
bins = bins[1:-1]
for _ in range(max_iterations):
old_centers = centers.copy()
cdf_x = stat.norm.cdf(bins)
exp_x = -np.exp(-bins**2 / 2) / s
# a- updating centers
centers[0] = exp_x[0] / cdf_x[0]
centers[1:-1] = (exp_x[1:] - exp_x[0:-1]) / (cdf_x[1:] - cdf_x[0:-1])
centers[-1] = -exp_x[-1] / (1-cdf_x[-1])
# b- update bins
bins = (centers[:-1] + centers[1:]) / 2
# c- check for convergence
if np.max(np.abs(centers - old_centers)) < 1e-3:
break
self._quantizer_bins[n] = bins
self._quantizer_centers[n] = centers
def _initialize_sparse_normal_quantizer(self, thr):
s = np.sqrt(2*np.pi)
max_iterations = 1000
for n, num_levels in enumerate(self._valid_levels):
# initialize quantizer's thresholds and centers
K = 1 + num_levels // 2
bins = np.linspace(thr, 1, K)
bins = np.concatenate((np.linspace(-1, -thr, K), np.linspace(thr, 1, K)))
centers = (bins[1:] + bins[:-1]) / 2
bins = bins[1:-1]
for _ in range(max_iterations):
old_centers = centers.copy()
cdf_x = stat.norm.cdf(bins)
exp_x = -np.exp(-bins**2 / 2) / s
# a- updating centers
centers[0] = exp_x[0] / cdf_x[0]
centers[1:-1] = (exp_x[1:] - exp_x[0:-1]) / (cdf_x[1:] - cdf_x[0:-1])
centers[-1] = -exp_x[-1] / (1-cdf_x[-1])
# b- update bins
bins = (centers[:-1] + centers[1:]) / 2
bins[K - 2] = -thr
bins[K - 1] = thr
# c- check for convergence
if np.max(np.abs(centers - old_centers)) < 1e-3:
break
self._quantizer_bins[n] = bins
self._quantizer_centers[n] = centers
def _initialize_folded_normal_quantizer(self):
s = np.sqrt(2 / np.pi)
max_iterations = 1000
for n, num_levels in enumerate(self._valid_levels):
# initialize quantizer's thresholds and centers
bins = np.linspace(0, 1, num_levels + 1)
centers = (bins[1:] + bins[:-1]) / 2
bins = bins[1:-1]
for _ in range(max_iterations):
old_centers = centers.copy()
cdf_x = 2 * stat.norm.cdf(bins) - 1
mean_x = s * (1 - np.exp(-bins**2 / 2))
# a- updating centers
centers[0] = mean_x[0] / cdf_x[0]
centers[1:-1] = (mean_x[1:] - mean_x[0:-1]) / (cdf_x[1:] - cdf_x[0:-1])
centers[-1] = (s - mean_x[-1]) / (1-cdf_x[-1])
# b- update bins
bins = (centers[:-1] + centers[1:]) / 2
# c- check for convergence
if np.max(np.abs(centers - old_centers)) < 1e-3:
break
self._quantizer_bins[n] = bins
self._quantizer_centers[n] = centers
def _initialize_sparse_folded_normal_quantizer(self, thr):
s = np.sqrt(2 / np.pi)
max_iterations = 1000
for n, num_levels in enumerate(self._valid_levels):
# initialize quantizer's thresholds and centers
bins = np.linspace(thr, 1, num_levels + 1)
centers = np.concatenate(([0], (bins[1:] + bins[:-1]) / 2))
bins = bins[:-1]
for _ in range(max_iterations):
old_centers = centers.copy()
cdf_x = 2 * stat.norm.cdf(bins) - 1
mean_x = s * (1 - np.exp(-bins**2 / 2))
# a- updating centers
centers[1:-1] = (mean_x[1:] - mean_x[0:-1]) / (cdf_x[1:] - cdf_x[0:-1])
centers[-1] = (s - mean_x[-1]) / (1-cdf_x[-1])
# b- update bins
bins = (centers[:-1] + centers[1:]) / 2
bins[0] = thr
# c- check for convergence
if np.max(np.abs(centers - old_centers)) < 1e-3:
break
self._quantizer_bins[n] = bins
self._quantizer_centers[n] = centers
def _initialize_uniform_quantizer(self):
for n, num_levels in enumerate(self._valid_levels):
bins = np.linspace(0, 1, num_levels + 1)
centers = (bins[1:] + bins[:-1]) / 2
bins = bins[1:-1]
self._quantizer_bins[n] = bins
self._quantizer_centers[n] = centers
def _initialize_sparse_uniform_quantizer(self, thr):
for n, num_levels in enumerate(self._valid_levels):
bins = np.linspace(thr, 1, num_levels + 1)
bins = np.concatenate(([-thr], bins))
centers = (bins[1:] + bins[:-1]) / 2
bins = bins[1:-1]
self._quantizer_bins[n] = bins
self._quantizer_centers[n] = centers
def _initialize_empirical_quantizer(self, X):
x = np.reshape(X, newshape=-1)
min_x = np.min(x)
max_x = np.max(x)
for n, num_levels in enumerate(self._valid_levels):
# initialize bins
bins = np.linspace(min_x, max_x, num_levels + 1)
centers = (bins[:-1] + bins[1:]) / 2
bins = bins[1:-1]
for _ in range(1000):
centers_old = centers.copy()
# quantize input vector
q = np.digitize(x, bins)
_optimize_centers_average(x, q, centers, num_levels)
bins = (centers[1:] + centers[:-1]) / 2
if np.max(np.abs(centers - centers_old)) < 1e-3:
break
self._quantizer_bins[n] = bins
self._quantizer_centers[n] = centers
# =============================================================================
# optimize quantizer's reconstruction points by averaging the points in each bin
def _optimize_centers_average(w, q, center, num_levels):
for n in range(num_levels):
if n in q:
center[n] = np.mean(w[q == n])
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By:
# Maintained By:
import datetime
import factory
import random
from factory.base import BaseFactory, FactoryMetaClass, CREATE_STRATEGY
from factory.fuzzy import FuzzyChoice, FuzzyDate, FuzzyDateTime, FuzzyInteger
from factory.compat import UTC
from ggrc import models
from ggrc.models.reflection import AttributeInfo
def random_string(prefix=''):
return '{prefix}{suffix}'.format(
prefix=prefix,
suffix=random.randint(0,9999999999),
)
def random_string_attribute(prefix=''):
return factory.LazyAttribute(lambda m: random_string(prefix))
class FactoryAttributeGenerator(object):
"""Use the SQLAlchemy ORM model to generate factory attributes."""
@classmethod
def generate(cls, attrs, model_class, attr):
"""Generate a factory attribute for `attr` by inspecting the mapping
type of the attribute in `model_class`. Add the attribute to the
`attrs` dictionary.
"""
if (hasattr(attr, '__call__')):
attr_name = attr.attr_name
value = []
else:
attr_name = attr
class_attr = getattr(model_class, attr_name)
#look up the class method to use to generate the attribute
method = getattr(cls, class_attr.__class__.__name__)
value = method(attr_name, class_attr)
attrs[attr_name] = value
@classmethod
def InstrumentedAttribute(cls, attr_name, class_attr):
method = getattr(cls, class_attr.property.__class__.__name__)
return method(attr_name, class_attr)
@classmethod
def ColumnProperty(cls, attr_name, class_attr):
method = getattr(
cls,
class_attr.property.expression.type.__class__.__name__,
cls.default_column_handler)
return method(attr_name, class_attr)
@classmethod
def default_column_handler(cls, attr_name, class_attr):
return random_string_attribute(attr_name)
@classmethod
def DateTime(cls, attr_name, class_attr):
return FuzzyDateTime(
datetime.datetime(2013,1,1,tzinfo=UTC),
datetime.datetime.now(UTC) + datetime.timedelta(days=730),
)
@classmethod
def Date(cls, attr_name, class_attr):
return FuzzyDate(
datetime.date(2013,1,1),
datetime.date.today() + datetime.timedelta(days=730),
)
@classmethod
def Boolean(cls, attr_name, class_attr):
return FuzzyChoice([True, False])
@classmethod
def Integer(cls, attr_name, class_attr):
return FuzzyInteger(0,100000)
@classmethod
def RelationshipProperty(cls, attr_name, class_attr):
if class_attr.property.uselist:
return []
else:
return None
@classmethod
def AssociationProxy(cls, attr_name, class_attr):
return []
@classmethod
def property(cls, attr_name, class_atr):
return None
class ModelFactoryMetaClass(FactoryMetaClass):
def __new__(cls, class_name, bases, attrs, extra_attrs=None):
"""Use model reflection to build up the list of factory attributes.
The default attributes can be overridden by defining a subclass
of `ModelFactory` and defining the attribute to be overriden.
"""
model_class = attrs.pop('MODEL', None)
if model_class:
attrs['FACTORY_FOR'] = dict
attribute_info = AttributeInfo(model_class)
for attr in attribute_info._create_attrs:
if hasattr(attr, '__call__'):
attr_name = attr.attr_name
else:
attr_name = attr
if not hasattr(cls, attr_name):
FactoryAttributeGenerator.generate(attrs, model_class, attr)
return super(ModelFactoryMetaClass, cls).__new__(
cls, class_name, bases, attrs)
ModelFactory = ModelFactoryMetaClass(
'ModelFactory', (BaseFactory,), {
'ABSTRACT_FACTORY': True,
'FACTORY_STRATEGY': CREATE_STRATEGY,
'__doc__': """ModelFactory base with build and create support.
This class has supports SQLAlchemy ORM.
""",
})
def factory_for(model_class):
"""Get the factory for a model by name or by class.
If there is a factory defined for this model in globals() that factory
will be used. Otherwise, one will be created and added to globals().
"""
if type(model_class) is str or type(model_class) is unicode:
factory_name = model_class
import ggrc.models
model_class = getattr(ggrc.models, model_class)
else:
factory_name = model_class.__name__
factory_name = '{0}Factory'.format(factory_name)
factory = globals().get(factory_name, None)
if not factory:
class model_factory(ModelFactory):
MODEL = model_class
model_factory.__name__ = factory_name
globals()[factory_name] = model_factory
factory = model_factory
return factory
class ProgramFactory(ModelFactory):
MODEL = models.Program
kind = FuzzyChoice(['Directive', 'Company Controls']) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file Occupancy_scraper.py
@author Colin Laganier
@version V0.3
@date 2021-12-05
@brief This script scrapes the Imperial Occupancy platform to retrieve the occupancy of Level 2.
"""
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import DesiredCapabilities
from time import sleep
from paho.mqtt import client as mqtt_client
import config
import random
from datetime import datetime
broker = config.IPAddress
port = 1883
topic = "smellStation/occupancy"
# generate client ID with pub prefix randomly
client_id = f'python-mqtt-{random.randint(0, 1000)}'
def ConnectMQTT():
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print("Failed to connect, return code %d\n", rc)
client = mqtt_client.Client(client_id)
# client.username_pw_set(config.username, config.password)
client.on_connect = on_connect
client.connect(broker, port)
return client
def Publish(client):
current_occupancy = GetOccupancy()
result = client.publish(topic, current_occupancy)
status = result[0]
if status == 0:
print(f"Sent message - sleep for 60 seconds")
sleep(60)
print("wake")
else:
print(f"Failed to send message")
def PublishExpected():
current_time = datetime.now()
if (current_time.minute % 10 == 0):
return True
else:
return False
def MQTT():
client = ConnectMQTT()
if PublishExpected():
Publish(client)
def GetOccupancy():
attempt = 0
opts = webdriver.ChromeOptions()
d = DesiredCapabilities.CHROME
browser = webdriver.Chrome(desired_capabilities=d, options=opts)
for attempt in range(5):
try:
browser.get(config.url)
sleep(5)
# Login to platform
login_elem = browser.find_elements(
By.XPATH, "//*[@class='align-center background-green display-block clickable mb4 px2 py3 font-size-3 font-normal']")[0]
login_elem.click()
sleep(5)
browser.find_element(By.ID, "username").send_keys(config.username)
browser.find_element(
By.ID, "password_label").send_keys(config.password)
browser.find_element(By.ID, "submitButton").submit()
sleep(5)
occupancy = browser.find_element(By.CSS_SELECTOR,
"#root > div > div > main > div > div.flex__1.overflow-auto.mb3 > div > div:nth-child(4) > div > div > div > div > span").text
print(occupancy)
browser.quit()
return occupancy
except:
attempt += 1
if attempt == 5:
browser.quit()
print("Scraping failed too many times")
# SendWarningEmail()
return -1
if __name__ == '__main__':
while True:
MQTT()
|
from .instrument import instrument_factory
__VERSION__ = "1.0"
def get(resource_name, resource_manager=None, identity=None, timeout=10):
return instrument_factory(resource_name, resource_manager, identity, timeout)
|
#-*-coding: utf-8 -*-
"""
/dms/webquest/views_navigation_left.py
.. enthaelt den View zum Aendern des linken Navigationsbereichs
Django content Management System
Hans Rauch
[email protected]
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 30.04.2008 Beginn der Arbeit
"""
from django.http import HttpResponse, HttpResponseRedirect
from django import newforms as forms
from django.utils.translation import ugettext as _
from dms.roles import require_permission
from dms.queries import get_site_url
from dms.queries import get_min_max_menu_left
from dms.queries import delete_menuitem_navmenu_left
from dms.utils_navigation import save_menus_left
from dms.folder.utils import get_folder_content
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def get_menu_left_webquest(item_containers, item_container):
""" .. liefert das Webquest-Menu """
start_name = u'webquest'
content = '0 | %s | %s | Webquest | Startseite | %s\n' % \
(start_name, item_container.get_absolute_url(),
'<b><i><span class="red">::</span></i></b>')
content += '999\n'
for i in item_containers:
if i.item.app.name in ['dmsDocument'] or i.item.app.is_folderish:
content += '1 | %s | %s | %s\n' % \
(i.item.name, i.get_absolute_url(), i.item.title)
return content
# -----------------------------------------------------
def create_new_menu_webquest(item_container):
item_containers, sections, d_sections = get_folder_content(item_container)
n_min, n_max = get_min_max_menu_left()
menu_left_id = 1 + max(abs(n_min), n_max)
text = get_menu_left_webquest(item_containers, item_container)
save_menus_left(menu_left_id, text)
item_container.container.menu_left_id = menu_left_id
item_container.container.nav_name_left = 'webquest|'
item_container.container.save()
for ic in item_containers:
if ic.item.app.is_folderish:
ic.container.menu_left_id = menu_left_id
ic.container.nav_name_left = _(u'webquest|') + ic.item.name
ic.container.save()
return menu_left_id
# -----------------------------------------------------
@require_permission('perm_edit_folderish')
def webquest_navigation_left(request, item_container):
""" Eigenschaften des Ordners aendern """
menu_id = item_container.container.menu_left_id
delete_menuitem_navmenu_left(menu_id)
#sub_menu = item_container.container.nav_name_left
menu_id_new = create_new_menu_webquest(item_container)
item_container.container.menu_id = menu_id_new
item_container.container.save()
return HttpResponseRedirect(get_site_url(item_container, 'index.html'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-12-15 22:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mentoring', '0020_auto_20161215_1455'),
]
operations = [
migrations.AlterField(
model_name='mentoreducation',
name='degree',
field=models.CharField(choices=[('ba', 'Bachelor of Arts'), ('bs', 'Bachelor of Sciences'), ('m', 'Masters'), ('d', 'Ph.D'), ('pd', 'MD Ph.D'), ('md', 'MD')], default='', max_length=3),
preserve_default=False,
),
migrations.AlterField(
model_name='mentoreducation',
name='graduation_year',
field=models.DateField(blank=True, null=True),
),
]
|
from celery.utils import get_cls_by_name
ALIASES = {
"processes": "celery.concurrency.processes.TaskPool",
"eventlet": "celery.concurrency.evlet.TaskPool",
"gevent": "celery.concurrency.evg.TaskPool",
}
def get_implementation(cls):
return get_cls_by_name(cls, ALIASES)
|
# Kenny Sprite Sheet Slicer
# Sprite.py
# Copyright Will Blankenship 2015
class Sprite:
def reverse_y(self, image_height):
self.y = str(image_height - int(self.y) - int(self.height))
def __init__(self, name, x, y, width, height):
self.name = name
self.x = x
self.y = y
self.width = width
self.height = height
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.alerting_config
import cohesity_management_sdk.models.environment_type_job_parameters
import cohesity_management_sdk.models.indexing_policy
class RpoPolicySettings(object):
"""Implementation of the 'RpoPolicySettings' model.
Specifies all the additional settings that are applicable only
to an RPO policy. This can include storage domain, settings of different
environments, etc.
Attributes:
alerting_config (AlertingConfig): Specifies optional settings for
alerting.
alerting_policy (list of AlertingPolicyEnum): Array of Job Events.
During Job Runs, the following Job Events are generated: 1) Job
succeeds 2) Job fails 3) Job violates the SLA These Job Events can
cause Alerts to be generated. 'kSuccess' means the Protection Job
succeeded. 'kFailure' means the Protection Job failed.
'kSlaViolation' means the Protection Job took longer than the time
period specified in the SLA.
environment_type_job_params (EnvironmentTypeJobParameters): Specifies
additional parameters that are common to all Protection Sources in
a Protection Job created for a particular environment type.
indexing_policy (IndexingPolicy): Specifies settings for indexing
files found in an Object (such as a VM) so these files can be
searched and recovered. This also specifies inclusion and
exclusion rules that determine the directories to index.
qos_type (QosTypeRpoPolicySettingsEnum): Specifies the QoS policy type
to use. 'kBackupHDD' indicates the Cohesity Cluster writes data
directly to the HDD tier for this Protection Job. This is the
recommended setting. 'kBackupSSD' indicates the Cohesity Cluster
writes data directly to the SSD tier for this Protection Job. Only
specify this policy if you need fast ingest speed for a small
number of Protection Jobs.
storage_domain_id (long|int): Specifies the Storage Domain to which
data will be written.
"""
# Create a mapping from Model property names to API property names
_names = {
"alerting_config":'alertingConfig',
"alerting_policy":'alertingPolicy',
"environment_type_job_params":'environmentTypeJobParams',
"indexing_policy":'indexingPolicy',
"qos_type":'qosType',
"storage_domain_id":'storageDomainId'
}
def __init__(self,
alerting_config=None,
alerting_policy=None,
environment_type_job_params=None,
indexing_policy=None,
qos_type=None,
storage_domain_id=None):
"""Constructor for the RpoPolicySettings class"""
# Initialize members of the class
self.alerting_config = alerting_config
self.alerting_policy = alerting_policy
self.environment_type_job_params = environment_type_job_params
self.indexing_policy = indexing_policy
self.qos_type = qos_type
self.storage_domain_id = storage_domain_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
alerting_config = cohesity_management_sdk.models.alerting_config.AlertingConfig.from_dictionary(dictionary.get('alertingConfig')) if dictionary.get('alertingConfig') else None
alerting_policy = dictionary.get('alertingPolicy')
environment_type_job_params = cohesity_management_sdk.models.environment_type_job_parameters.EnvironmentTypeJobParameters.from_dictionary(dictionary.get('environmentTypeJobParams')) if dictionary.get('environmentTypeJobParams') else None
indexing_policy = cohesity_management_sdk.models.indexing_policy.IndexingPolicy.from_dictionary(dictionary.get('indexingPolicy')) if dictionary.get('indexingPolicy') else None
qos_type = dictionary.get('qosType')
storage_domain_id = dictionary.get('storageDomainId')
# Return an object of this model
return cls(alerting_config,
alerting_policy,
environment_type_job_params,
indexing_policy,
qos_type,
storage_domain_id)
|
Subsets and Splits