filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_5046 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.backends.chrome_inspector import inspector_websocket
from telemetry.internal.platform import system_info
from py_utils import camel_case
class SystemInfoBackend(object):
def __init__(self, devtools_port):
self._port = devtools_port
def GetSystemInfo(self, timeout=10):
req = {'method': 'SystemInfo.getInfo'}
websocket = inspector_websocket.InspectorWebsocket()
try:
websocket.Connect('ws://127.0.0.1:%i/devtools/browser' % self._port,
timeout)
res = websocket.SyncRequest(req, timeout)
finally:
websocket.Disconnect()
if 'error' in res:
return None
return system_info.SystemInfo.FromDict(
camel_case.ToUnderscore(res['result']))
def Close(self):
pass
|
the-stack_0_5050 | import logging
from threading import Thread
from telemetry_f1_2021.listener import TelemetryListener
from kafka.kafka_admin import KafkaAdmin
class TelemetryManager(Thread):
"""Class for adding packets to the packet queue.
Derived from the Thread class, this is run as part of a multithreaded program.
The class initialises a TelemetryListener object and uses this to gather packets
from the UDP stream. These are then added to a separate packet queue by reference.
Methods:
run - called as part of the start method in Thread. Gets packets and adds them to the queue.
"""
def __init__(self, producer):
Thread.__init__(self)
self.producer = producer
self.daemon = True
self.telemetry_listener = TelemetryListener()
self.start()
def run(self):
admin = KafkaAdmin(self.producer.config)
while True:
packet = self.telemetry_listener.get()
topic_name = type(packet).__name__
admin.check_add_topic(topic_name)
self.producer.produce_data(topic_name, packet)
|
the-stack_0_5051 | import logging
import multiprocessing
import os
from bootleg.utils import train_utils
def get_log_name(args, mode):
log_name = os.path.join(train_utils.get_save_folder(args.run_config), f"log_{mode}")
log_name += train_utils.get_file_suffix(args)
log_name += f'_gpu{args.run_config.gpu}'
return log_name
def create_logger(args, mode):
if args.run_config.distributed:
logger = logging.getLogger("bootleg")
else:
logger = logging.getLogger("bootleg")
# set logging level
numeric_level = getattr(logging, args.run_config.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.run_config.loglevel.upper())
logger.setLevel(numeric_level)
# do not propagate messages to the root logger
logger.propagate = False
log_name = get_log_name(args, mode)
if not os.path.exists(log_name): os.system("touch " + log_name)
if not logger.hasHandlers():
formatter = logging.Formatter('%(asctime)s %(message)s')
fh = logging.FileHandler(log_name, mode='w' if mode == 'train' else 'a')
fh.setFormatter(formatter)
logger.addHandler(fh)
# only print the stream for the first GPU
if args.run_config.gpu == 0:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
else:
print('Something went wrong in the logger')
exit()
return logger
def get_logger(args):
if args.run_config.distributed:
return logging.getLogger("bootleg")
else:
return logging.getLogger("bootleg") |
the-stack_0_5052 | # -*- coding: utf-8 -*-
# 本类实现了Richard Wallace博士在以下站点描述的AIML模式匹配算法:http://www.alicebot.org/documentation/matching.html '''
from __future__ import print_function
import marshal
import pprint
import re
from .constants import *
class PatternMgr:
# special dictionary keys
_UNDERSCORE = 0
_STAR = 1
_TEMPLATE = 2
_THAT = 3
_TOPIC = 4
_BOT_NAME = 5
def __init__(self):
self._root = {}
self._templateCount = 0
self._botName = u"Nameless"
punctuation = "\"`~!@#$%^&*()-_=+[{]}\|;:',<.>/?"
self._puncStripRE = re.compile("[" + re.escape(punctuation) + "]")
self._whitespaceRE = re.compile("\s+", re.UNICODE)
def numTemplates(self):
"""返回当前存储的模板数量。"""
return self._templateCount
def setBotName(self, name):
"""设置机器人的名称,用于匹配模式中的<bot name =“name”>标签。 名字必须是一个单词! """
# 将多个单词的名字合并为一个单词
self._botName = unicode( ' '.join(name.split()) )
def dump(self):
"""打印所有学习的模式,用于调试目的。"""
pprint.pprint(self._root)
def save(self, filename):
"""将当前模式转储到由filename指定的文件。 要稍后恢复,请使用restore(). """
try:
outFile = open(filename, "wb")
marshal.dump(self._templateCount, outFile)
marshal.dump(self._botName, outFile)
marshal.dump(self._root, outFile)
outFile.close()
except Exception as e:
print( "Error saving PatternMgr to file %s:" % filename )
raise
def restore(self, filename):
"""还原以前保存过的模式集合。"""
try:
inFile = open(filename, "rb")
self._templateCount = marshal.load(inFile)
self._botName = marshal.load(inFile)
self._root = marshal.load(inFile)
inFile.close()
except Exception as e:
print( "Error restoring PatternMgr from file %s:" % filename )
raise
def add(self, data, template):
"""将[pattern / that / topic]元组及其相应的模板添加到节点树中。 """
pattern,that,topic = data
# TODO: 请确保 words只包含合法的字符! (alphanumerics,*,_)
# N浏览节点树到模板的位置,如有必要添加节点。
node = self._root
for word in pattern.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
elif key == u"BOT_NAME":
key = self._BOT_NAME
if key not in node:
node[key] = {}
node = node[key]
# 如果包含一个非空的“that”模式,进一步向下浏览
if len(that) > 0:
if self._THAT not in node:
node[self._THAT] = {}
node = node[self._THAT]
for word in that.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# 如果包含一个非空的“topic”字符串,可以进一步导航
if len(topic) > 0:
if self._TOPIC not in node:
node[self._TOPIC] = {}
node = node[self._TOPIC]
for word in topic.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# 添加模板
if self._TEMPLATE not in node:
self._templateCount += 1
node[self._TEMPLATE] = template
def match(self, pattern, that, topic):
""" 返回最接近模式的模板。 'that'参数包含机器人以前的回应。 “topic”参数包含当前的对话主题。
如果没有找到模板,则返回None。 """
if len(pattern) == 0:
return None
# 切断输入内容。 删除所有标点符号并将文本转换为全部大写。【关键!】
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
# 将输入传递给递归调用
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
return template
def star(self, starType, pattern, that, topic, index):
"""返回一个字符串,即由*匹配的模式部分。
'starType'参数指定要找到哪种星型。 合法值是:
- “star”:匹配主要模式中的一个星号。
- “thatstar”:与that模式中的一个星号匹配。
- “topicstar”:与topic模式中的一个星号匹配。 """
# 破坏输入。 删除所有标点符号并将文本转换为全部大写。
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
input_ = re.sub(self._whitespaceRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
topicInput = re.sub(self._whitespaceRE, " ", topicInput)
# P将输入传递给递归 pattern-matcher
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
if template == None:
return ""
# 返回基于starType参数提取模式的适当部分。
words = None
if starType == 'star':
patMatch = patMatch[:patMatch.index(self._THAT)]
words = input_.split()
elif starType == 'thatstar':
patMatch = patMatch[patMatch.index(self._THAT)+1 : patMatch.index(self._TOPIC)]
words = thatInput.split()
elif starType == 'topicstar':
patMatch = patMatch[patMatch.index(self._TOPIC)+1 :]
words = topicInput.split()
else:
# unknown value
raise ValueError( "starType must be in ['star', 'thatstar', 'topicstar']" )
# 将输入的字符串与匹配的模式进行逐字比较。 在循环结束时,如果foundTheRightStar为true,
# 则start和end将包含所需星形匹配子字符串的开始和结束索引(以“单词”表示)。
foundTheRightStar = False
start = end = j = numStars = k = 0
for i in range(len(words)):
# 在处理不是我们正在寻找的星星之后,这个条件是 true
if i < k:
continue
# 如果我们已经达到了模式的结尾,就完成了。
if j == len(patMatch):
break
if not foundTheRightStar:
if patMatch[j] in [self._STAR, self._UNDERSCORE]: #we got a star
numStars += 1
if numStars == index:
# 这个是我们关心的那个 star .
foundTheRightStar = True
start = i
# 迭代字符串的其余部分。
for k in range (i, len(words)):
# 如果星星在模式的最后,我们知道它到底在哪里。
if j+1 == len (patMatch):
end = len (words)
break
# 如果单词已经开始再次匹配,那么这个星星就结束了。
# ======== 不确定:修正:对于pattch“* A B”,“A C A B”将匹配,这是一个错误
if patMatch[j+1] == words[k]:
end = k - 1
i = k
break
# 如果我们刚刚完成处理我们所关心的星,我们会尽早退出循环。
if foundTheRightStar:
break
# 移动到模式的下一个元素。
j += 1
# 从原始的,毫不含糊的输入中提取星号。
if foundTheRightStar:
#print( ' '.join(pattern.split()[start:end+1]) )
if starType == 'star': return ' '.join(pattern.split()[start:end+1])
elif starType == 'thatstar': return ' '.join(that.split()[start:end+1])
elif starType == 'topicstar': return ' '.join(topic.split()[start:end+1])
else: return u""
def _match(self, words, thatWords, topicWords, root):
"""返回一个元组(pat,tem),其中pat是节点列表,从根开始并导致匹配的模式,tem是匹配的模板。 """
# 基本情况:如果单词列表为空,则返回当前节点的模板。
if len(words) == 0:
# we're out of words.
pattern = []
template = None
if len(thatWords) > 0:
# 如果该词不为空,则在_THAT节点上将该词与该词递归模式匹配。
try:
pattern, template = self._match(thatWords, [], topicWords, root[self._THAT])
if pattern != None:
pattern = [self._THAT] + pattern
except KeyError:
pattern = []
template = None
elif len(topicWords) > 0:
# 如果该字词为空且topicWords不为空,则以topicWords为单词在_TOPIC节点上以递归方式进行模式。
try:
pattern, template = self._match(topicWords, [], [], root[self._TOPIC])
if pattern != None:
pattern = [self._TOPIC] + pattern
except KeyError:
pattern = []
template = None
if template == None:
# 完全没有输入了。 在此节点抓取模板。
pattern = []
try: template = root[self._TEMPLATE]
except KeyError: template = None
return (pattern, template)
first = words[0]
suffix = words[1:]
# Check underscore.检查下划线。
# 注意:这是标准AIML集合中的问题,目前已被禁用。
if self._UNDERSCORE in root:
# 必须包含suf为[]的情况,以便处理在模式结尾处出现*或_的情况。
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._UNDERSCORE])
if template is not None:
newPattern = [self._UNDERSCORE] + pattern
return (newPattern, template)
# Check first
if first in root:
pattern, template = self._match(suffix, thatWords, topicWords, root[first])
if template is not None:
newPattern = [first] + pattern
return (newPattern, template)
# check bot name
if self._BOT_NAME in root and first == self._botName:
pattern, template = self._match(suffix, thatWords, topicWords, root[self._BOT_NAME])
if template is not None:
newPattern = [first] + pattern
return (newPattern, template)
# check star
if self._STAR in root:
# 必须包含suf为[]的情况,以便处理在模式结尾处出现*或_的情况。
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._STAR])
if template is not None:
newPattern = [self._STAR] + pattern
return (newPattern, template)
# 没有找到匹配。
return (None, None) |
the-stack_0_5053 | import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import normalize, LabelEncoder
import sys
from process import load_names
from scanorama import *
NAMESPACE = 'hsc'
data_names = [
'data/hsc/hsc_mars',
'data/hsc/hsc_ss2',
]
# Computes the probability that the corrected SS2 dataset
# comes from the original SS2 distribution or from the same
# distribution as the corrected MARS-Seq dataset.
if __name__ == '__main__':
# Load data.
datasets, genes_list, n_cells = load_names(data_names, verbose=False)
datasets, genes = merge_datasets(datasets, genes_list, verbose=False)
datasets, genes = process_data(datasets, genes)
datasets = [ normalize(ds, axis=1) for ds in datasets ]
# Fit initial mixture models.
gm_ss2 = (GaussianMixture(n_components=3, n_init=3)
.fit(datasets[1]))
# Do batch correction.
datasets = assemble(
datasets,
verbose=False, knn=KNN, sigma=SIGMA, approx=APPROX
)
datasets = [ normalize(ds, axis=1) for ds in datasets ]
# Fit mixture models to other dataset.
gm_mars_corrected = (
GaussianMixture(n_components=3, n_init=3)
.fit(datasets[0])
)
# Natural log likelihoods.
ll_ss2 = gm_ss2.score(datasets[1])
ll_mars_corrected = gm_mars_corrected.score(datasets[1])
# Natural log of the likelihood ratio.
print(ll_ss2 - max(ll_ss2, ll_mars_corrected))
|
the-stack_0_5054 | from pynonymizer.database.provider import DatabaseProvider
from pynonymizer.database.provider import SEED_TABLE_NAME
from pynonymizer.strategy.update_column import UpdateColumnStrategyTypes
from pynonymizer.strategy.table import TableStrategyTypes
from pynonymizer.database.exceptions import (
UnsupportedColumnStrategyError,
UnsupportedTableStrategyError,
DependencyError,
)
from pynonymizer.fake import FakeDataType
import math
import logging
from tqdm import tqdm
from pathlib import PureWindowsPath, PurePosixPath
import re
_FAKE_COLUMN_TYPES = {
FakeDataType.STRING: "VARCHAR(MAX)",
FakeDataType.DATE: "DATE",
FakeDataType.DATETIME: "DATETIME",
FakeDataType.INT: "INT",
}
_LOCAL_SERVER = "127.0.0.1"
_DEFAULT_PORT = "1433"
def _extract_driver_version(driver):
try:
return int(re.findall(r"\d+", driver)[0])
except IndexError:
return 0
class MsSqlProvider(DatabaseProvider):
"""
A pyodbc-based MSSQL provider.
"""
logger = logging.getLogger(__name__)
# stats value for restore/backup command: Report progress every X percent
# A lower value means MORE resultssets / more frequent updates from the backup command.
# Values lower than 5 often yield unreliable results on smaller backups
__STATS = 5
def __init__(
self,
db_host,
db_user,
db_pass,
db_name,
db_port=None,
seed_rows=None,
backup_compression=False,
driver=None,
):
# import here for fast-failiness
import pyodbc
db_host = db_host or _LOCAL_SERVER
db_port = db_port or _DEFAULT_PORT
driver = driver or self.__detect_driver()
self.db_host = db_host
self.db_user = db_user
self.db_pass = db_pass
self.db_name = db_name
self.db_port = db_port
if seed_rows is None:
seed_rows = 150
self.seed_rows = int(seed_rows)
self.__conn = None
self.__db_conn = None
self.__backup_compression = backup_compression
self.__driver = driver
def __detect_driver(self):
import pyodbc
ms_drivers = [i for i in pyodbc.drivers() if "sql server" in i.lower()]
if len(ms_drivers) < 1:
raise DependencyError(
"odbc", "Failed to detect any ODBC drivers on this system."
)
if len(ms_drivers) > 1:
self.logger.debug("multiple drivers detected for mssql: %s", ms_drivers)
# Sort by the highest number (like, ODBC driver 14 for SQL server)
return sorted(ms_drivers, key=_extract_driver_version, reverse=True)[0]
def __require_local_server(self):
if self.db_host != _LOCAL_SERVER:
raise DependencyError(
"db_host",
"This operation does not support remote servers due to backup file "
"location requirements. You must omit db_host from your configuration "
"and run pynonymizer on the same server as the database.",
)
def __connection(self):
import pyodbc
"""a lazy-evaluated connection"""
if self.__conn is None:
self.__conn = pyodbc.connect(
driver=f"{{{self.__driver}}}",
server=f"{self.db_host},{self.db_port}",
uid=self.db_user,
pwd=self.db_pass,
autocommit=True,
)
return self.__conn
def __db_connection(self):
import pyodbc
"""a lazy-evaluated db-specific connection"""
if self.__db_conn is None:
self.__db_conn = pyodbc.connect(
driver=f"{{{self.__driver}}}",
database=self.db_name,
server=f"{self.db_host},{self.db_port}",
uid=self.db_user,
pwd=self.db_pass,
autocommit=True,
)
return self.__db_conn
def __execute(self, *args, **kwargs):
return self.__connection().execute(*args, **kwargs)
def __db_execute(self, *args, **kwargs):
return self.__db_connection().execute(*args, **kwargs)
def __get_path(self, filepath):
if "\\" in filepath:
return PureWindowsPath(filepath)
else:
return PurePosixPath(filepath)
def __get_default_datafolder(self):
"""
Locate the default data folder using the `model` database location
It's possible that the model database is not the currently set default, i.e if it's been changed after install
The solution to this would be to make a new database and then perform the below check on that instead.
See https://blogs.technet.microsoft.com/sqlman/2009/07/19/tsql-script-determining-default-database-file-log-path/
However, this seems like a heavyweight solution for what is essentially a tsql-writeable tempfolder, so
checking the model db seems like a good 'boring' solution
:return: Default data directory e.g. "C:\\DATA"
"""
datafile = self.__execute(
"""
SELECT physical_name
FROM sys.master_files mf
INNER JOIN sys.[databases] d
ON mf.[database_id] = d.[database_id]
WHERE d.[name] = 'model' AND type = 0
"""
).fetchone()[0]
return self.__get_path(datafile).parent
def __get_default_logfolder(self):
"""
Locate the default log folder using the `model` database location
__get_default_datafolder: see for more info
:return:
"""
logfile = self.__execute(
"""
SELECT physical_name
FROM sys.master_files mf
INNER JOIN sys.[databases] d
ON mf.[database_id] = d.[database_id]
WHERE d.[name] = 'model' AND type = 1
"""
).fetchone()[0]
return self.__get_path(logfile).parent
def __get_file_moves(self, input_path):
"""
Using RESTORE FILELISTONLY, get all the files in the backup that need to be moved to the local system for restore
:return: a dict of file name: new destination
"""
datadir = self.__get_default_datafolder()
logdir = self.__get_default_logfolder()
filelist = self.__execute(
f"RESTORE FILELISTONLY FROM DISK = ?;", input_path
).fetchall()
move_file_map = {}
for file in filelist:
name = file[0]
type = file[2].upper()
filepath = self.__get_path(file[1])
# log files can go into the default log directory, everything else can go into the data directory
if type == "L":
target_path = str(logdir.joinpath(f"{self.db_name}_{filepath.name}"))
else:
target_path = str(datadir.joinpath(f"{self.db_name}_{filepath.name}"))
move_file_map[name] = target_path
return move_file_map
def __async_operation_progress(self, desc, cursor):
# With STATS=x, we should recieve 100/x resultsets, provided the backup is slow enough.
# With some databases, it will jump from y% to 100, so we'll only get <x nextset calls.
# Even SSMS doesn't get informed (read: it's not my fault, blame microsoft)
with tqdm(desc=desc, total=math.floor(100 / self.__STATS)) as progressbar:
while cursor.nextset():
progressbar.update()
# finish the progress - less confusing than a dangling 40% progressbar
progressbar.update(progressbar.total - progressbar.n)
def __run_scripts(self, script_list, title=""):
import pyodbc
for i, script in enumerate(script_list):
self.logger.info(f'Running {title} script #{i} "{script[:50]}"')
cursor = self.__db_execute(script)
results = None
try:
results = cursor.fetchall()
except pyodbc.Error:
pass
self.logger.info(results)
def __create_seed_table(self, qualifier_map):
seed_column_lines = [
"[{}] {}".format(name, _FAKE_COLUMN_TYPES[col.data_type])
for name, col in qualifier_map.items()
]
create_statement = "CREATE TABLE [{}]({});".format(
SEED_TABLE_NAME, ",".join(seed_column_lines)
)
self.__db_execute(create_statement)
def __drop_seed_table(self):
self.__db_execute("DROP TABLE IF EXISTS [{}];".format(SEED_TABLE_NAME))
def __insert_seed_row(self, qualifier_map):
column_list = ",".join(
["[{}]".format(qualifier) for qualifier in qualifier_map]
)
substitution_list = ",".join(
[" ?".format(qualifier) for qualifier in qualifier_map]
)
value_list = [column.value for qualifier, column in qualifier_map.items()]
statement = "INSERT INTO [{}]({}) VALUES ({});".format(
SEED_TABLE_NAME, column_list, substitution_list
)
self.__db_execute(statement, value_list)
def __seed(self, qualifier_map):
for i in tqdm(
range(0, self.seed_rows), desc="Inserting seed data", unit="rows"
):
self.__insert_seed_row(qualifier_map)
def __get_column_subquery(self, column_strategy, table_name, column_name):
if column_strategy.strategy_type == UpdateColumnStrategyTypes.EMPTY:
return "('')"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.UNIQUE_EMAIL:
return f"( SELECT CONCAT(NEWID(), '@', NEWID(), '.com') )"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.UNIQUE_LOGIN:
return f"( SELECT NEWID() )"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.FAKE_UPDATE:
column = f"[{column_strategy.qualifier}]"
if column_strategy.sql_type:
column = f"CAST({column} AS {column_strategy.sql_type})"
# Add WHERE LIKE % OR NULL to make subquery correlated with outer table, therefore uncachable
return f"( SELECT TOP 1 {column} FROM [{SEED_TABLE_NAME}] WHERE [{table_name}].[{column_name}] LIKE '%' OR [{table_name}].[{column_name}] IS NULL ORDER BY NEWID())"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.LITERAL:
return column_strategy.value
else:
raise UnsupportedColumnStrategyError(column_strategy)
def create_database(self):
self.logger.warning(
"MSSQL: create_database ignored, database will be created when restore_db is run"
)
def drop_database(self):
# force connection close so we can always drop the db: sometimes timing makes a normal drop impossible.
self.__execute(
f"ALTER DATABASE [{self.db_name}] SET SINGLE_USER WITH ROLLBACK IMMEDIATE;"
)
self.__execute(f"DROP DATABASE IF EXISTS [{self.db_name}];")
def anonymize_database(self, database_strategy):
qualifier_map = database_strategy.fake_update_qualifier_map
if len(qualifier_map) > 0:
self.logger.info("creating seed table with %d columns", len(qualifier_map))
self.__create_seed_table(qualifier_map)
self.logger.info("Inserting seed data")
self.__seed(qualifier_map)
self.__run_scripts(database_strategy.before_scripts, "before")
table_strategies = database_strategy.table_strategies
self.logger.info("Anonymizing %d tables", len(table_strategies))
anonymization_errors = []
with tqdm(
desc="Anonymizing database", total=len(table_strategies)
) as progressbar:
for table_strategy in table_strategies:
try:
table_name = table_strategy.table_name
schema_prefix = (
f"[{table_strategy.schema}]." if table_strategy.schema else ""
)
if table_strategy.strategy_type == TableStrategyTypes.TRUNCATE:
progressbar.set_description("Truncating {}".format(table_name))
self.__db_execute(
"TRUNCATE TABLE {}[{}];".format(schema_prefix, table_name)
)
elif table_strategy.strategy_type == TableStrategyTypes.DELETE:
progressbar.set_description("Deleting {}".format(table_name))
self.__db_execute(
"DELETE FROM {}[{}];".format(schema_prefix, table_name)
)
elif (
table_strategy.strategy_type
== TableStrategyTypes.UPDATE_COLUMNS
):
progressbar.set_description("Anonymizing {}".format(table_name))
where_grouping = table_strategy.group_by_where()
total_wheres = len(where_grouping)
for i, (where, column_map) in enumerate(where_grouping.items()):
column_assignments = ",".join(
[
"[{}] = {}".format(
name,
self.__get_column_subquery(
column, table_name, name
),
)
for name, column in column_map.items()
]
)
where_clause = f" WHERE {where}" if where else ""
progressbar.set_description(
"Anonymizing {}: w[{}/{}]".format(
table_name, i + 1, total_wheres
)
)
# Disable ANSI_WARNINGS to allow oversized fake data to be truncated without error
self.__db_execute(
"SET ANSI_WARNINGS off; UPDATE {}[{}] SET {}{}; SET ANSI_WARNINGS on;".format(
schema_prefix,
table_name,
column_assignments,
where_clause,
)
)
else:
raise UnsupportedTableStrategyError(table_strategy)
except Exception as e:
anonymization_errors.append(e)
self.logger.exception(
f"Error while anonymizing table {table_strategy.qualified_name}"
)
progressbar.update()
if len(anonymization_errors) > 0:
raise Exception("Error during anonymization")
self.__run_scripts(database_strategy.after_scripts, "after")
self.logger.info("Dropping seed table")
self.__drop_seed_table()
def restore_database(self, input_path):
self.__require_local_server()
move_files = self.__get_file_moves(input_path)
self.logger.info("Found %d files in %s", len(move_files), input_path)
self.logger.debug(move_files)
# get move statements and flatten pairs out so we can do the 2-param substitution
move_clauses = ", ".join(["MOVE ? TO ?"] * len(move_files))
move_clause_params = [item for pair in move_files.items() for item in pair]
restore_cursor = self.__execute(
f"RESTORE DATABASE ? FROM DISK = ? WITH {move_clauses}, STATS = ?;",
[self.db_name, input_path, *move_clause_params, self.__STATS],
)
self.__async_operation_progress("Restoring Database", restore_cursor)
def dump_database(self, output_path):
self.__require_local_server()
with_options = []
if self.__backup_compression:
with_options.append("COMPRESSION")
with_options_str = (
",".join(with_options) + ", " if len(with_options) > 0 else ""
)
dump_cursor = self.__execute(
f"BACKUP DATABASE ? TO DISK = ? WITH {with_options_str}STATS = ?;",
[self.db_name, output_path, self.__STATS],
)
self.__async_operation_progress("Dumping Database", dump_cursor)
|
the-stack_0_5055 | """Based on BertForTokenClassification, implemented here since it's not in transformers currently."""
from torch import nn
from transformers import AlbertModel, AlbertPreTrainedModel
class AlbertForTokenClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`,
`optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.AlbertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
|
the-stack_0_5057 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
import calendar
import datetime
import json
import time
import unittest
import mock
import pytest
import six
from six.moves import urllib_parse
from . import _read_local_json
_SERVICE_ACCOUNT_JSON = _read_local_json("url_signer_v4_test_account.json")
_CONFORMANCE_TESTS = _read_local_json("url_signer_v4_test_data.json")["signingV4Tests"]
_BUCKET_TESTS = [
test for test in _CONFORMANCE_TESTS if "bucket" in test and not test.get("object")
]
_BLOB_TESTS = [
test for test in _CONFORMANCE_TESTS if "bucket" in test and test.get("object")
]
def _utc_seconds(when):
return int(calendar.timegm(when.timetuple()))
class Test_get_expiration_seconds_v2(unittest.TestCase):
@staticmethod
def _call_fut(expiration):
from google.cloud.storage._signing import get_expiration_seconds_v2
return get_expiration_seconds_v2(expiration)
def test_w_invalid_expiration_type(self):
with self.assertRaises(TypeError):
self._call_fut(object(), None)
def test_w_expiration_none(self):
with self.assertRaises(TypeError):
self._call_fut(None)
def test_w_expiration_int(self):
self.assertEqual(self._call_fut(123), 123)
def test_w_expiration_long(self):
if not six.PY2:
raise unittest.SkipTest("No long on Python 3+")
self.assertEqual(self._call_fut(long(123)), 123) # noqa: F821
def test_w_expiration_naive_datetime(self):
expiration_no_tz = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = _utc_seconds(expiration_no_tz)
self.assertEqual(self._call_fut(expiration_no_tz), utc_seconds)
def test_w_expiration_utc_datetime(self):
from google.cloud._helpers import UTC
expiration_utc = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC)
utc_seconds = _utc_seconds(expiration_utc)
self.assertEqual(self._call_fut(expiration_utc), utc_seconds)
def test_w_expiration_other_zone_datetime(self):
from google.cloud._helpers import _UTC
class CET(_UTC):
_tzname = "CET"
_utcoffset = datetime.timedelta(hours=1)
zone = CET()
expiration_other = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, zone)
utc_seconds = _utc_seconds(expiration_other)
cet_seconds = utc_seconds - (60 * 60) # CET one hour earlier than UTC
self.assertEqual(self._call_fut(expiration_other), cet_seconds)
def test_w_expiration_timedelta_seconds(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = _utc_seconds(dummy_utcnow)
expiration_as_delta = datetime.timedelta(seconds=10)
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_as_delta)
self.assertEqual(result, utc_seconds + 10)
utcnow.assert_called_once_with()
def test_w_expiration_timedelta_days(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = _utc_seconds(dummy_utcnow)
expiration_as_delta = datetime.timedelta(days=1)
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_as_delta)
self.assertEqual(result, utc_seconds + 86400)
utcnow.assert_called_once_with()
class Test_get_expiration_seconds_v4(unittest.TestCase):
@staticmethod
def _call_fut(expiration):
from google.cloud.storage._signing import get_expiration_seconds_v4
return get_expiration_seconds_v4(expiration)
def test_w_invalid_expiration_type(self):
with self.assertRaises(TypeError):
self._call_fut(object(), None)
def test_w_expiration_none(self):
with self.assertRaises(TypeError):
self._call_fut(None)
def test_w_expiration_int_gt_seven_days(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
delta = datetime.timedelta(days=10)
expiration_utc = dummy_utcnow + delta
expiration_seconds = _utc_seconds(expiration_utc)
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
with self.assertRaises(ValueError):
self._call_fut(expiration_seconds)
utcnow.assert_called_once_with()
def test_w_expiration_int(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
expiration_seconds = 10
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_seconds)
self.assertEqual(result, expiration_seconds)
utcnow.assert_called_once_with()
def test_w_expiration_naive_datetime(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
delta = datetime.timedelta(seconds=10)
expiration_no_tz = dummy_utcnow + delta
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_no_tz)
self.assertEqual(result, delta.seconds)
utcnow.assert_called_once_with()
def test_w_expiration_utc_datetime(self):
from google.cloud._helpers import UTC
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC)
delta = datetime.timedelta(seconds=10)
expiration_utc = dummy_utcnow + delta
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_utc)
self.assertEqual(result, delta.seconds)
utcnow.assert_called_once_with()
def test_w_expiration_other_zone_datetime(self):
from google.cloud._helpers import UTC
from google.cloud._helpers import _UTC
class CET(_UTC):
_tzname = "CET"
_utcoffset = datetime.timedelta(hours=1)
zone = CET()
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC)
dummy_cetnow = dummy_utcnow.astimezone(zone)
delta = datetime.timedelta(seconds=10)
expiration_other = dummy_cetnow + delta
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_other)
self.assertEqual(result, delta.seconds)
utcnow.assert_called_once_with()
def test_w_expiration_timedelta(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
expiration_as_delta = datetime.timedelta(seconds=10)
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_as_delta)
self.assertEqual(result, expiration_as_delta.total_seconds())
utcnow.assert_called_once_with()
class Test_get_signed_query_params_v2(unittest.TestCase):
@staticmethod
def _call_fut(credentials, expiration, string_to_sign):
from google.cloud.storage._signing import get_signed_query_params_v2
return get_signed_query_params_v2(credentials, expiration, string_to_sign)
def test_it(self):
sig_bytes = b"DEADBEEF"
account_name = mock.sentinel.service_account_email
credentials = _make_credentials(signer_email=account_name)
credentials.sign_bytes.return_value = sig_bytes
expiration = 100
string_to_sign = "dummy_signature"
result = self._call_fut(credentials, expiration, string_to_sign)
expected = {
"GoogleAccessId": account_name,
"Expires": expiration,
"Signature": base64.b64encode(sig_bytes),
}
self.assertEqual(result, expected)
credentials.sign_bytes.assert_called_once_with(string_to_sign)
class Test_get_canonical_headers(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import get_canonical_headers
return get_canonical_headers(*args, **kwargs)
def test_w_none(self):
headers = None
expected_canonical = []
expected_ordered = []
canonical, ordered = self._call_fut(headers)
self.assertEqual(canonical, expected_canonical)
self.assertEqual(ordered, expected_ordered)
def test_w_dict(self):
headers = {"foo": "Foo 1.2.3", "Bar": " baz,bam,qux "}
expected_canonical = ["bar:baz,bam,qux", "foo:Foo 1.2.3"]
expected_ordered = [tuple(item.split(":")) for item in expected_canonical]
canonical, ordered = self._call_fut(headers)
self.assertEqual(canonical, expected_canonical)
self.assertEqual(ordered, expected_ordered)
def test_w_list_and_multiples(self):
headers = [
("foo", "Foo 1.2.3"),
("Bar", " baz"),
("Bar", "bam"),
("Bar", "qux "),
]
expected_canonical = ["bar:baz,bam,qux", "foo:Foo 1.2.3"]
expected_ordered = [tuple(item.split(":")) for item in expected_canonical]
canonical, ordered = self._call_fut(headers)
self.assertEqual(canonical, expected_canonical)
self.assertEqual(ordered, expected_ordered)
def test_w_embedded_ws(self):
headers = {"foo": "Foo\n1.2.3", "Bar": " baz bam qux "}
expected_canonical = ["bar:baz bam qux", "foo:Foo 1.2.3"]
expected_ordered = [tuple(item.split(":")) for item in expected_canonical]
canonical, ordered = self._call_fut(headers)
self.assertEqual(canonical, expected_canonical)
self.assertEqual(ordered, expected_ordered)
class Test_canonicalize_v2(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import canonicalize_v2
return canonicalize_v2(*args, **kwargs)
def test_wo_headers_or_query_parameters(self):
method = "GET"
resource = "/bucket/blob"
canonical = self._call_fut(method, resource, None, None)
self.assertEqual(canonical.method, method)
self.assertEqual(canonical.resource, resource)
self.assertEqual(canonical.query_parameters, [])
self.assertEqual(canonical.headers, [])
def test_w_headers_and_resumable(self):
method = "RESUMABLE"
resource = "/bucket/blob"
headers = [("x-goog-extension", "foobar")]
canonical = self._call_fut(method, resource, None, headers)
self.assertEqual(canonical.method, "POST")
self.assertEqual(canonical.resource, resource)
self.assertEqual(canonical.query_parameters, [])
self.assertEqual(
canonical.headers, ["x-goog-extension:foobar", "x-goog-resumable:start"]
)
def test_w_query_parameters(self):
method = "GET"
resource = "/bucket/blob"
query_parameters = {"foo": "bar", "baz": "qux"}
canonical = self._call_fut(method, resource, query_parameters, None)
self.assertEqual(canonical.method, method)
self.assertEqual(canonical.resource, "{}?baz=qux&foo=bar".format(resource))
self.assertEqual(canonical.query_parameters, [("baz", "qux"), ("foo", "bar")])
self.assertEqual(canonical.headers, [])
class Test_generate_signed_url_v2(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import generate_signed_url_v2
return generate_signed_url_v2(*args, **kwargs)
def _generate_helper(
self,
api_access_endpoint="",
method="GET",
content_md5=None,
content_type=None,
response_type=None,
response_disposition=None,
generation=None,
headers=None,
query_parameters=None,
):
from six.moves.urllib.parse import urlencode
resource = "/name/path"
credentials = _make_credentials(signer_email="[email protected]")
credentials.sign_bytes.return_value = b"DEADBEEF"
signed = base64.b64encode(credentials.sign_bytes.return_value)
signed = signed.decode("ascii")
expiration = 1000
url = self._call_fut(
credentials,
resource,
expiration=expiration,
api_access_endpoint=api_access_endpoint,
method=method,
content_md5=content_md5,
content_type=content_type,
response_type=response_type,
response_disposition=response_disposition,
generation=generation,
headers=headers,
query_parameters=query_parameters,
service_account_email=None,
access_token=None,
)
# Check the mock was called.
method = method.upper()
if headers is None:
headers = []
elif isinstance(headers, dict):
headers = sorted(headers.items())
elements = []
expected_resource = resource
if method == "RESUMABLE":
elements.append("POST")
headers.append(("x-goog-resumable", "start"))
else:
elements.append(method)
if query_parameters is not None:
normalized_qp = {
key.lower(): value and value.strip() or ""
for key, value in query_parameters.items()
}
expected_qp = urlencode(sorted(normalized_qp.items()))
expected_resource = "{}?{}".format(resource, expected_qp)
elements.append(content_md5 or "")
elements.append(content_type or "")
elements.append(str(expiration))
elements.extend(["{}:{}".format(*header) for header in headers])
elements.append(expected_resource)
string_to_sign = "\n".join(elements)
credentials.sign_bytes.assert_called_once_with(string_to_sign)
scheme, netloc, path, qs, frag = urllib_parse.urlsplit(url)
expected_scheme, expected_netloc, _, _, _ = urllib_parse.urlsplit(
api_access_endpoint
)
self.assertEqual(scheme, expected_scheme)
self.assertEqual(netloc, expected_netloc)
self.assertEqual(path, resource)
self.assertEqual(frag, "")
# Check the URL parameters.
params = dict(urllib_parse.parse_qsl(qs, keep_blank_values=True))
self.assertEqual(params["GoogleAccessId"], credentials.signer_email)
self.assertEqual(params["Expires"], str(expiration))
self.assertEqual(params["Signature"], signed)
if response_type is not None:
self.assertEqual(params["response-content-type"], response_type)
if response_disposition is not None:
self.assertEqual(
params["response-content-disposition"], response_disposition
)
if generation is not None:
self.assertEqual(params["generation"], str(generation))
if query_parameters is not None:
for key, value in query_parameters.items():
value = value.strip() if value else ""
self.assertEqual(params[key].lower(), value)
def test_w_expiration_int(self):
self._generate_helper()
def test_w_endpoint(self):
api_access_endpoint = "https://api.example.com"
self._generate_helper(api_access_endpoint=api_access_endpoint)
def test_w_method(self):
method = "POST"
self._generate_helper(method=method)
def test_w_method_resumable(self):
method = "RESUMABLE"
self._generate_helper(method=method)
def test_w_response_type(self):
response_type = "text/plain"
self._generate_helper(response_type=response_type)
def test_w_response_disposition(self):
response_disposition = "attachment; filename=blob.png"
self._generate_helper(response_disposition=response_disposition)
def test_w_generation(self):
generation = "123"
self._generate_helper(generation=generation)
def test_w_custom_headers_dict(self):
self._generate_helper(headers={"x-goog-foo": "bar"})
def test_w_custom_headers_list(self):
self._generate_helper(headers=[("x-goog-foo", "bar")])
def test_w_custom_query_parameters_w_string_value(self):
self._generate_helper(query_parameters={"bar": "/"})
def test_w_custom_query_parameters_w_none_value(self):
self._generate_helper(query_parameters={"qux": None})
def test_with_google_credentials(self):
resource = "/name/path"
credentials = _make_credentials()
expiration = int(time.time() + 5)
with self.assertRaises(AttributeError):
self._call_fut(credentials, resource=resource, expiration=expiration)
def test_with_access_token(self):
resource = "/name/path"
credentials = _make_credentials()
expiration = int(time.time() + 5)
email = mock.sentinel.service_account_email
with mock.patch(
"google.cloud.storage._signing._sign_message", return_value=b"DEADBEEF"
):
self._call_fut(
credentials,
resource=resource,
expiration=expiration,
service_account_email=email,
access_token="token",
)
class Test_generate_signed_url_v4(unittest.TestCase):
DEFAULT_EXPIRATION = 1000
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import generate_signed_url_v4
return generate_signed_url_v4(*args, **kwargs)
def _generate_helper(
self,
expiration=DEFAULT_EXPIRATION,
api_access_endpoint="",
method="GET",
content_type=None,
content_md5=None,
response_type=None,
response_disposition=None,
generation=None,
headers=None,
query_parameters=None,
):
now = datetime.datetime(2019, 2, 26, 19, 53, 27)
resource = "/name/path"
signer_email = "[email protected]"
credentials = _make_credentials(signer_email=signer_email)
credentials.sign_bytes.return_value = b"DEADBEEF"
with mock.patch("google.cloud.storage._signing.NOW", lambda: now):
url = self._call_fut(
credentials,
resource,
expiration=expiration,
api_access_endpoint=api_access_endpoint,
method=method,
content_type=content_type,
content_md5=content_md5,
response_type=response_type,
response_disposition=response_disposition,
generation=generation,
headers=headers,
query_parameters=query_parameters,
)
# Check the mock was called.
credentials.sign_bytes.assert_called_once()
scheme, netloc, path, qs, frag = urllib_parse.urlsplit(url)
expected_scheme, expected_netloc, _, _, _ = urllib_parse.urlsplit(
api_access_endpoint
)
self.assertEqual(scheme, expected_scheme)
self.assertEqual(netloc, expected_netloc)
self.assertEqual(path, resource)
self.assertEqual(frag, "")
# Check the URL parameters.
params = dict(urllib_parse.parse_qsl(qs, keep_blank_values=True))
self.assertEqual(params["X-Goog-Algorithm"], "GOOG4-RSA-SHA256")
now_date = now.date().strftime("%Y%m%d")
expected_cred = "{}/{}/auto/storage/goog4_request".format(
signer_email, now_date
)
self.assertEqual(params["X-Goog-Credential"], expected_cred)
now_stamp = now.strftime("%Y%m%dT%H%M%SZ")
self.assertEqual(params["X-Goog-Date"], now_stamp)
self.assertEqual(params["X-Goog-Expires"], str(self.DEFAULT_EXPIRATION))
signed = binascii.hexlify(credentials.sign_bytes.return_value).decode("ascii")
self.assertEqual(params["X-Goog-Signature"], signed)
if response_type is not None:
self.assertEqual(params["response-content-type"], response_type)
if response_disposition is not None:
self.assertEqual(
params["response-content-disposition"], response_disposition
)
if generation is not None:
self.assertEqual(params["generation"], str(generation))
if query_parameters is not None:
for key, value in query_parameters.items():
value = value.strip() if value else ""
self.assertEqual(params[key].lower(), value)
def test_w_expiration_too_long(self):
with self.assertRaises(ValueError):
self._generate_helper(expiration=datetime.timedelta(days=8))
def test_w_defaults(self):
self._generate_helper()
def test_w_api_access_endpoint(self):
self._generate_helper(api_access_endpoint="http://api.example.com")
def test_w_method(self):
self._generate_helper(method="PUT")
def test_w_method_resumable(self):
self._generate_helper(method="RESUMABLE")
def test_w_content_type(self):
self._generate_helper(content_type="text/plain")
def test_w_content_md5(self):
self._generate_helper(content_md5="FACEDACE")
def test_w_response_type(self):
self._generate_helper(response_type="application/octets")
def test_w_response_disposition(self):
self._generate_helper(response_disposition="attachment")
def test_w_generation(self):
self._generate_helper(generation=12345)
def test_w_custom_host_header(self):
self._generate_helper(headers={"Host": "api.example.com"})
def test_w_custom_headers(self):
self._generate_helper(headers={"x-goog-foo": "bar"})
def test_w_custom_payload_hash_goog(self):
self._generate_helper(headers={"x-goog-content-sha256": "DEADBEEF"})
def test_w_custom_query_parameters_w_string_value(self):
self._generate_helper(query_parameters={"bar": "/"})
def test_w_custom_query_parameters_w_none_value(self):
self._generate_helper(query_parameters={"qux": None})
def test_with_access_token(self):
resource = "/name/path"
signer_email = "[email protected]"
credentials = _make_credentials(signer_email=signer_email)
with mock.patch(
"google.cloud.storage._signing._sign_message", return_value=b"DEADBEEF"
):
self._call_fut(
credentials,
resource=resource,
expiration=datetime.timedelta(days=5),
service_account_email=signer_email,
access_token="token",
)
class Test_sign_message(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import _sign_message
return _sign_message(*args, **kwargs)
def test_sign_bytes(self):
signature = "DEADBEEF"
data = {"signedBlob": signature}
request = make_request(200, data)
with mock.patch("google.auth.transport.requests.Request", return_value=request):
returned_signature = self._call_fut(
"123", service_account_email="[email protected]", access_token="token"
)
assert returned_signature == signature
def test_sign_bytes_failure(self):
from google.auth import exceptions
request = make_request(401)
with mock.patch("google.auth.transport.requests.Request", return_value=request):
with pytest.raises(exceptions.TransportError):
self._call_fut(
"123",
service_account_email="[email protected]",
access_token="token",
)
class TestCustomURLEncoding(unittest.TestCase):
def test_url_encode(self):
from google.cloud.storage._signing import _url_encode
# param1 includes safe symbol ~
# param# includes symbols, which must be encoded
query_params = {"param1": "value~1-2", "param#": "*value+value/"}
self.assertEqual(
_url_encode(query_params), "param%23=%2Avalue%2Bvalue%2F¶m1=value~1-2"
)
class TestQuoteParam(unittest.TestCase):
def test_ascii_symbols(self):
from google.cloud.storage._signing import _quote_param
encoded_param = _quote_param("param")
self.assertIsInstance(encoded_param, str)
self.assertEqual(encoded_param, "param")
def test_quoted_symbols(self):
from google.cloud.storage._signing import _quote_param
encoded_param = _quote_param("!#$%&'()*+,/:;=?@[]")
self.assertIsInstance(encoded_param, str)
self.assertEqual(
encoded_param, "%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D"
)
def test_unquoted_symbols(self):
from google.cloud.storage._signing import _quote_param
import string
UNQUOTED = string.ascii_letters + string.digits + ".~_-"
encoded_param = _quote_param(UNQUOTED)
self.assertIsInstance(encoded_param, str)
self.assertEqual(encoded_param, UNQUOTED)
def test_unicode_symbols(self):
from google.cloud.storage._signing import _quote_param
encoded_param = _quote_param("ЁЙЦЯЩЯЩ")
self.assertIsInstance(encoded_param, str)
self.assertEqual(encoded_param, "%D0%81%D0%99%D0%A6%D0%AF%D0%A9%D0%AF%D0%A9")
def test_bytes(self):
from google.cloud.storage._signing import _quote_param
encoded_param = _quote_param(b"bytes")
self.assertIsInstance(encoded_param, str)
self.assertEqual(encoded_param, "bytes")
class TestV4Stamps(unittest.TestCase):
def test_get_v4_now_dtstamps(self):
import datetime
from google.cloud.storage._signing import get_v4_now_dtstamps
with mock.patch(
"google.cloud.storage._signing.NOW",
return_value=datetime.datetime(2020, 3, 12, 13, 14, 15),
) as now_mock:
timestamp, datestamp = get_v4_now_dtstamps()
now_mock.assert_called_once()
self.assertEqual(timestamp, "20200312T131415Z")
self.assertEqual(datestamp, "20200312")
_DUMMY_SERVICE_ACCOUNT = None
def dummy_service_account():
global _DUMMY_SERVICE_ACCOUNT
from google.oauth2.service_account import Credentials
if _DUMMY_SERVICE_ACCOUNT is None:
_DUMMY_SERVICE_ACCOUNT = Credentials.from_service_account_info(
_SERVICE_ACCOUNT_JSON
)
return _DUMMY_SERVICE_ACCOUNT
_API_ACCESS_ENDPOINT = "https://storage.googleapis.com"
def _run_conformance_test(
resource, test_data, api_access_endpoint=_API_ACCESS_ENDPOINT
):
credentials = dummy_service_account()
url = Test_generate_signed_url_v4._call_fut(
credentials,
resource,
expiration=test_data["expiration"],
api_access_endpoint=api_access_endpoint,
method=test_data["method"],
_request_timestamp=test_data["timestamp"],
headers=test_data.get("headers"),
query_parameters=test_data.get("queryParameters"),
)
assert url == test_data["expectedUrl"]
@pytest.mark.parametrize("test_data", _BUCKET_TESTS)
def test_conformance_bucket(test_data):
global _API_ACCESS_ENDPOINT
if "urlStyle" in test_data and test_data["urlStyle"] == "BUCKET_BOUND_HOSTNAME":
_API_ACCESS_ENDPOINT = "{scheme}://{bucket_bound_hostname}".format(
scheme=test_data["scheme"],
bucket_bound_hostname=test_data["bucketBoundHostname"],
)
resource = "/"
_run_conformance_test(resource, test_data, _API_ACCESS_ENDPOINT)
else:
resource = "/{}".format(test_data["bucket"])
_run_conformance_test(resource, test_data)
@pytest.mark.parametrize("test_data", _BLOB_TESTS)
def test_conformance_blob(test_data):
global _API_ACCESS_ENDPOINT
if "urlStyle" in test_data:
if test_data["urlStyle"] == "BUCKET_BOUND_HOSTNAME":
_API_ACCESS_ENDPOINT = "{scheme}://{bucket_bound_hostname}".format(
scheme=test_data["scheme"],
bucket_bound_hostname=test_data["bucketBoundHostname"],
)
# For the VIRTUAL_HOSTED_STYLE
else:
_API_ACCESS_ENDPOINT = "{scheme}://{bucket_name}.storage.googleapis.com".format(
scheme=test_data["scheme"], bucket_name=test_data["bucket"]
)
resource = "/{}".format(test_data["object"])
_run_conformance_test(resource, test_data, _API_ACCESS_ENDPOINT)
else:
resource = "/{}/{}".format(test_data["bucket"], test_data["object"])
_run_conformance_test(resource, test_data)
def _make_credentials(signer_email=None):
import google.auth.credentials
if signer_email:
credentials = mock.Mock(spec=google.auth.credentials.Signing)
credentials.signer_email = signer_email
return credentials
else:
return mock.Mock(spec=google.auth.credentials.Credentials)
def make_request(status, data=None):
from google.auth import transport
response = mock.create_autospec(transport.Response, instance=True)
response.status = status
if data is not None:
response.data = json.dumps(data).encode("utf-8")
request = mock.create_autospec(transport.Request)
request.return_value = response
return request
|
the-stack_0_5058 | #!/usr/bin/env python
# coding: utf-8
# The MIT License (MIT)
# Copyright (c) 2015 Pavel Vomacka
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse
import os
import sys
import re
import subprocess, shlex
from threading import Timer
import sqlite3
manpage_groups = ("1", "8",)
# Name of output file.
db_file = "switch.sqlite3"
# Database path
db_path = "/tmp/switchTest/"
# Database schema
schema_file = "./schema.sql"
opened_db = None
def err_print(*args, **kwargs):
"""
Print to stderr.
"""
print(*args, file=sys.stderr, **kwargs)
def create_empty_db():
"""
Prepare empty database.
"""
global opened_db
database_file = os.path.join(db_path, db_file)
print("\tCreating new database file " + database_file)
if not os.path.exists(db_path):
os.makedirs(db_path)
with sqlite3.connect(os.path.join(db_path, db_file)) as opened_db:
print("\t\tImporting database schema...")
with open(schema_file, 'rt') as schema_f:
schema = schema_f.read()
# Aplly the schema.
opened_db.executescript(schema)
def open_db():
"""
Open DB file.
"""
global opened_db
database_file = os.path.join(db_path, db_file)
print("\tOpening DB file: " + database_file)
opened_db = sqlite3.connect(database_file)
curs = opened_db.cursor()
# Check whether correct tables exists in db
curs.execute("SELECT count(*) FROM sqlite_master WHERE type='table' AND ("
"name=? OR name=? OR name=?);", ('system', 'command', 'switch',))
table_count = curs.fetchone()[0]
if table_count != 3:
raise RuntimeError
def add_system(sys_name):
"""
Add system record.
"""
curs = opened_db.cursor()
curs.execute("INSERT INTO system(name) VALUES(?)", (sys_name,))
opened_db.commit()
return curs.lastrowid
def find_system(sys_name):
"""
Find system id.
"""
curs = opened_db.cursor()
curs.execute("SELECT id FROM system WHERE name=?", (sys_name,))
return curs.fetchone()
def handle_system(sys_name):
"""
Handle system.
"""
system = find_system(sys_name)
if system is None:
system = add_system(sys_name)
else:
system = system[0]
return system
def add_command(manpage_name, command, group, sys_id):
"""
Add command record.
"""
curs = opened_db.cursor()
# Handle situation when we are finding record for command --help output.
if group is not None:
group = str(group)
curs.execute("INSERT INTO command(command, manpage_name, man_group, system_id) "
"VALUES(?,?,?,?)", (command, manpage_name, group, str(sys_id),))
opened_db.commit()
return curs.lastrowid
def find_command(command, group, os_id):
"""
Find command record for correct OS.
"""
curs = opened_db.cursor()
# Handle situation when we are finding record for command --help output.
if group is None:
curs.execute("SELECT id FROM command WHERE command=? AND system_id=?",
(command, os_id,))
else:
curs.execute("SELECT id FROM command WHERE command=? AND "
"man_group=? AND system_id=?",
(command, group, os_id,))
return curs.fetchone()
def handle_command(manpage_name, command, group, os_id):
"""
Handle adding commands, in case that command already exists
also remove all switches which are associated with current command
"""
command_id = find_command(command, group, os_id)
if command_id is None:
# Command is not in database. Add it and use the new ID
command_id = add_command(manpage_name, command, group, os_id)
else:
# Command already exists so use its record id and remove
# all associated switches.
command_id = command_id[0]
delete_associated_switches(command_id)
return command_id
def store_cmds_to_db(cmds, os_id):
"""
Store all commands from compgen -c command to database also in case that
we don't run --help for each command. It helps with testing of commands.
"""
for cmd in cmds:
handle_command(None, cmd, None, os_id)
def get_all_commands():
"""
Get all already inserted commands
"""
curs = opened_db.cursor()
curs.execute("SELECT command FROM command;")
return curs.fetchall()
def add_switch(switch, com_id):
"""
Add switch record.
"""
curs = opened_db.cursor()
curs.execute("INSERT INTO switch(switch, command_id) "
"VALUES(?,?)", (switch, str(com_id),))
opened_db.commit()
def delete_associated_switches(command_id):
"""
Delete all switches associated to the particular command.=
"""
curs = opened_db.cursor()
curs.execute("DELETE FROM switch WHERE command_id=?", (command_id,))
opened_db.commit()
def prepare_dir_regex():
"""
Prepare regex for getting directories which numbers are defined by
global variables.
"""
regex_begin = r"^(?:"
regex_end = r")$"
regex = regex_begin
for group_num in manpage_groups:
regex = regex + r"(?:man" + group_num + ")|"
regex = re.sub(r'\|$', '', regex)
regex = regex + regex_end
return regex
def get_directories():
"""
Function that fetch all needed directory names.
"""
directories = []
dir_regex = prepare_dir_regex()
# Load all directories and files in /usr/share/man.
for root, dirs, files in os.walk('/usr/share/man'):
# Go through all directory names
for directory in dirs:
# Prepare regexp which match to all directories which starts by 'man'
dirRegexp = re.compile(dir_regex)
if dirRegexp.match(directory) is None:
# Skip all directories which does not match regexp
continue
# All directories names which match the regexp concatenate with path
# and save them into list.
directories.append(os.path.join(root, directory))
# Do not go deeper into subdirectories.
break
# Return list with directories
return directories
def get_file_names(directories):
"""
Function that get names of all files in 'directories'.
"""
files = []
# Go through all directories
for directory in directories:
# Fetch all directories and files in current directory
for r, d, f in os.walk(directory):
# Go through all files.
for ccc in f:
# Add path to the file to the list
files.append(r + "/" + ccc)
# Return filled list.
return files
def parse_name(content):
"""
Finds the name of the man page.
"""
# Create regular expression
name_regex = re.compile(r"^([\w\.-]*)")
# Get name of manual page
just_name = name_regex.search(content)
name_str = ""
if just_name is not None:
name_str = just_name.group(1)
return name_str
def parse_manpage_number(path):
"""
Parse number of man page group.
"""
# Create regular expression
number_regex = re.compile(r".*/man(\d).*")
# Get number of manpage group
number = number_regex.search(path)
only_number = ""
if number is not None:
number = number.group(1)
return number
def parse_one_page(content):
"""
Parse flags from manpage which is in content parameter.
"""
# Create regular expression for getting flags from file \s{1,}
flag_regex = re.compile(r"(?:\n?(?:(?:[^\w\-])|(?:\[))((?:(?:\-{1,2})|(?:\+))[#\?\w\-\+]*)"
"(?:(?:,?\s((?:(?:\-{1,2})|(?:\+))[#\?\w\-\+]+))"
"|(?:.*?\s((?:(?:\-{1,2})|(?:\+))[#\?\w\-\+]+)))?)"
"|(?:[\[\{]((?:(?:\-{1,2})|(?:\+))[^ ]*?)[\|,\]\}]"
"(?:((?:(?:\-{1,2})|(?:\+))}[^ ]*?)[\]\}])?)+")
flag_list = flag_regex.findall(content)
# Prepare empty list.
parsed_flags = []
# Create regex for checking whether flag contains at least one letter
# or '#' or question mark.
check_regexp = re.compile(r"(?:.*?[\w#\?]+.*?)|(?:\-\-)")
# Go through all flags (flags can be in tuple.)
for flags in flag_list:
# Go through each tuple.
for flag in flags:
# Check flag.
if check_regexp.match(flag):
#Add flag into list.
#print(flag)
parsed_flags.append(flag)
# Remove duplicates
parsed_flags = list(set(parsed_flags))
# Return flag which was found.
return parsed_flags
def parse_bash_page(content, command_list, os_id):
"""
Parse bash manpage, which is different and keeps switches for more commands.
"""
#regex for SHELL BUILTIN COMMANDS
shell_builtins = re.compile(r"^SHELL BUILTIN COMMANDS$")
# subcommands has 7 spaces before its name.
builtin_reg = re.compile(r"^ {6,8}([a-zA-Z0-9_\-\+]+)")
# Match the end of section
section_end = re.compile(r"^[A-Z]")
man_group = 1
builtins = False
first_line = False
current_builtin = ""
bash_man = ""
mans = {}
for line in content.splitlines():
if not builtins:
if shell_builtins.match(line):
builtins = True
# add bash and so far concatenated manpage to table
mans['bash'] = bash_man
else:
bash_man = bash_man + line
else:
if builtin_reg.match(line):
# builtin command
first_word = builtin_reg.findall(line)[0]
if first_word in command_list:
# first word is correct command
current_builtin = first_word
mans[current_builtin] = first_word
continue
elif section_end.match(line):
# next section end whole for cycle
break
if current_builtin != "":
mans[current_builtin] = mans[current_builtin] + line
# parse mans
for command in mans:
flags = parse_one_page(mans[command])
put_manpage_into_db(os_id, None, command, man_group, flags)
def store_helps(os_id, helps):
"""
Store options from help outputs to DB.
"""
for command, manpage in helps.iteritems():
f_list = parse_one_page(manpage)
put_manpage_into_db(os_id, None, command, None, f_list)
def put_manpage_into_db(os_id, man_name, command, number, flags_list):
"""
Insert manpage into database.
"""
command_id = handle_command(man_name, command, number, os_id)
for flag in flags_list:
add_switch(flag, command_id)
def parse_man_pages(files, builtins, os_id):
"""
Parse all manpages which are accessible by the path in 'path' parameter list.
"""
# Define variables with tools for reading files.
reader = "zcat "
zipped_files = "zcat "
not_zipped_files = "cat "
commands_stored = []
# Open /dev/null/ for output of groff
f_devnull = open(os.devnull, 'w')
# Check all files.
for file_path in files:
# clean vars
flags_list = None
man_name = None
command = None
number = None
""" zcat " + f + " | groff -mandoc -Tutf8
SOME ERRORS OCCURE WHILE GROFF READING MANPAGES --- ADJUST LINE
^^ those errors are caused by mistakes in manpages
"""
# Check whether the file is zipped or not.
zipped = re.compile(r".*\.gz$")
if zipped.match(file_path):
reader = zipped_files
else:
reader = not_zipped_files
# Check whether there is redirection. If it is then parse name from the path.
file_name_changed = False
check_file = subprocess.Popen(shlex.split(reader + file_path), stdout=subprocess.PIPE).communicate()[0]
if re.match("\.so", check_file):
file_name_changed = True
# Create regex for getting name of file.
reg_name = re.compile(r".*/(.*?)\.\w{1,5}\.gz")
# Parse path.
parsed_path = reg_name.search(file_path)
# Variable for saving name.
man_name = None
# If there is at least one match then save it to the variable.
if parsed_path is not None:
man_name = parsed_path.group(1)
# Create regex which catch new file name.
new_file_regex = re.compile(r".* (.*)")
# Parse file.
n_f_search = new_file_regex.search(check_file)
# Prepare variable.
new_file = None
# If there is at least one match then save it to the prepared variable.
if n_f_search is not None:
new_file = n_f_search.group(1)
# Add .gz extension.
new_file = new_file + ".gz"
# Substitute old file name by new file name.
if re.match(r".*/.*", new_file):
file_path = re.sub(r"/[-\.\w]*/[-\.\w]*$", "/" + new_file, file_path)
elif re.match(r"[^/]*", new_file):
file_path = re.sub(r"/[-\.\w]*$", "/" + new_file, file_path)
p1 = subprocess.Popen(shlex.split(reader + file_path),
stdout=subprocess.PIPE,
universal_newlines=True)
# Run these two commands connected by pipe.
"""
Error output is redirected to /dev/null because of warnings from
incorrectly formated manpages
"""
output = subprocess.Popen(shlex.split("groff -E -c -mandoc -Tutf8"),
stdin=p1.stdout,
stdout=subprocess.PIPE,
stderr=f_devnull,
universal_newlines=True).communicate()[0]
number = parse_manpage_number(file_path)
# Parse name of manpage.
if not file_name_changed:
man_name = parse_name(output)
# \u001B is escape character - character which make colors in man pages
output = re.sub(u"\u001B\[[^-]*?;?[^-]*?m", "", output)
if man_name == 'BASH':
parse_bash_page(output, builtins, os_id)
continue # manpage is put into db directly in previous function
# Get list of flags for this page
flags_list = parse_one_page(output)
# Consider manpage name as the name of command.
command = man_name.lower()
put_manpage_into_db(os_id, man_name, command, number, flags_list)
commands_stored.append(command)
f_devnull.close()
return commands_stored
def get_os_commands(ctype=None):
"""
Get bash builtin functions
@param type string could be 'builtin'
"""
command = "compgen -c"
if (ctype == 'builtin'):
command = 'compgen -b'
p = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
universal_newlines=True
)
output = subprocess.Popen(["sort", "-u"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=p.stdout,
universal_newlines=True
).communicate()[0]
output = output.split('\n')
regex = re.compile(r'[a-zA-Z]')
for o in output:
if not regex.match(o):
output.remove(o)
return output
def remove_already_found_cmds(cmds, cmds_in_db):
"""
Remove commands which are already in database
"""
for cmd in cmds_in_db:
if cmd in cmds:
cmds.remove(cmd)
return cmds
def handle_helps(os_id, cmds):
"""
Call --help on each command which has not been processed yet
"""
help_cont = ''
timeout = 2
helps = {}
for cmd in cmds:
try:
p = subprocess.Popen([cmd, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
universal_newlines=True
)
kill_proc = lambda p: p.kill()
timer = Timer(timeout, kill_proc, [p])
try:
timer.start()
help_cont = p.communicate()[0]
finally:
timer.cancel()
except OSError:
err_print("ERROR in running '" + cmd + " --help'.")
continue
f_list = parse_one_page(help_cont)
put_manpage_into_db(os_id, None, cmd, None, f_list)
helps[cmd] = help_cont
return helps
def parse_options():
"""
Parse options
"""
parser = argparse.ArgumentParser(description="Generate SQLite3 database "
"with all options and switches for all "
"installed commands.")
parser.add_argument("--from-help", help="WARNING: Use this parameter only on "
"virtual machine, which could be lost. Try to run all "
"found commands with '--help' parameter to fetch all "
"options from the output. Please use this only if you "
"know what you are doing. ",
action="store_true")
parser.add_argument("--os-name", help="Name of the OS. Whole name will be "
"created by concatenating OS name and OS version.",
required=True)
parser.add_argument("--os-version", help="Version of OS. Whole name will be "
"created by concatenating OS name and OS version.",
required=True)
parser.add_argument("--schema-file", default="./schema.sql",
help="File with database schema. Default file: "
"./schema.sql")
parser.add_argument("--db-file", default="switch.sqlite3",
help="The name of the database file.")
parser.add_argument("--output-db-dir", default="/tmp/switchTest",
help="Directory to write generated database to. "
"Default directory: /tmp/switchTest/")
prog_args = parser.parse_args()
# Name of schema file.
if prog_args.schema_file:
global schema_file
schema_file = prog_args.schema_file
# Name of database file.
if prog_args.output_db_dir:
global db_path
db_path = prog_args.output_db_dir
# DB path
if prog_args.db_file:
global db_file
db_file = prog_args.db_file
return prog_args
def main():
"""
Main funciton.
"""
# Parse options
args = parse_options()
# Check Python version
if sys.version_info[0] != 2:
raise Exception("Must be using Python 2")
print("Preparing database file...")
# Create empty database in case that db file does not exists
if os.path.exists(os.path.join(db_path, db_file)):
open_db()
else:
create_empty_db()
print("Searching OS ID...")
current_os_id = handle_system(args.os_name + args.os_version)
print("Fetching directories with manual pages...")
# Get directories with manual pages
directories = get_directories()
# Get names of manpage files.
files = get_file_names(directories)
print("Fetching builtin commands...")
# Get bash builtin functions
builtins = get_os_commands('builtin')
# Get all runnable commands - get all runable commands
cmds = get_os_commands()
print("Parsing manual pages...")
# Parse man pages
handled_cmds = parse_man_pages(files, builtins, current_os_id)
# Compare list of commands found in OS with all already stored in DB.
# Then remove all commands which are already in DB from list of all commands.
remove_already_found_cmds(cmds, handled_cmds)
print("Storing commands from 'compgen -c' command...")
store_cmds_to_db(cmds, current_os_id)
# Call each command which is not in DB yet with '--help' param to gather
# further data.
if args.from_help:
print("Running commands with --help option...")
helps = handle_helps(current_os_id, cmds)
"""
Run main function.
"""
if __name__ == "__main__":
main()
|
the-stack_0_5059 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the fetch_builds module."""
import errno
import unittest
# The third-party mock module is expected to be available in PYTHONPATH.
import mock
import fetch_build
# The tests below test private functions (W0212).
# Some methods don't reference self because they use the mock module (R0201).
# pylint: disable=R0201,W0212
class FetchBuildTest(unittest.TestCase):
def setUp(self):
# Mocks of the os and bisect_utils modules are used in the methods below.
cloud_storage_patcher = mock.patch('fetch_build.cloud_storage')
self.mock_cloud_storage = cloud_storage_patcher.start()
self.addCleanup(cloud_storage_patcher.stop)
@mock.patch('fetch_build.os.path.exists')
def test_FetchFromCloudStorage_FileFound(self, mock_os_path_exists):
self.mock_cloud_storage.Exists.return_value = True
mock_os_path_exists.return_value = True
local_path = fetch_build.FetchFromCloudStorage(
'my_bucket', 'remote/foo.zip', 'local')
self.assertEqual('local/foo.zip', local_path)
self.mock_cloud_storage.Get.assert_called_with(
'my_bucket', 'remote/foo.zip', 'local/foo.zip')
def test_FetchFromCloudStorage_FileNotFound(self):
self.mock_cloud_storage.Exists.return_value = False
local_path = fetch_build.FetchFromCloudStorage(
'my_bucket', 'remote/foo.zip', 'local')
self.assertIsNone(local_path)
self.assertFalse(self.mock_cloud_storage.Get.called)
class BuildArchiveTest(unittest.TestCase):
def test_CreatePerfBuildArchive(self):
archive = fetch_build.BuildArchive.Create(fetch_build.PERF_BUILDER)
self.assertEqual('chrome-perf', archive.BucketName())
self.assertTrue(isinstance(archive, fetch_build.PerfBuildArchive))
def test_CreateFullBuildArchive(self):
archive = fetch_build.BuildArchive.Create(fetch_build.FULL_BUILDER)
archive._platform = 'linux'
self.assertEqual('chromium-linux-archive', archive.BucketName())
self.assertTrue(isinstance(archive, fetch_build.FullBuildArchive))
def test_BuildArchive_NonExistentType(self):
self.assertRaises(
NotImplementedError, fetch_build.BuildArchive.Create, 'other')
def test_FullBuildArchive_Linux(self):
archive = fetch_build.FullBuildArchive()
archive._platform = 'linux'
self.assertEqual('chromium-linux-archive', archive.BucketName())
self.assertEqual(
'chromium.linux/Linux Builder/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_FullBuildArchive_Android(self):
archive = fetch_build.FullBuildArchive()
archive._platform = 'android'
self.assertEqual('chromium-android', archive.BucketName())
self.assertEqual('android_main_rel/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_PerfBuildArchive_Linux(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'linux'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'Linux Builder/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_PerfBuildArchive_Android(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'android'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'android_perf_rel/full-build-linux_123456.zip',
archive.FilePath('123456'))
def test_PerfBuildArchive_64BitWindows(self):
archive = fetch_build.PerfBuildArchive(target_arch='x64')
archive._platform = 'win64'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'Win x64 Builder/full-build-win32_123456.zip',
archive.FilePath('123456'))
def test_PerfBuildArchive_WithDepsPatchSha(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'linux'
self.assertEqual(
'Linux Builder/full-build-linux_123456'
'_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.zip',
archive.FilePath(123456, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
class UnzipTest(unittest.TestCase):
def setUp(self):
# Mocks of the os and bisect_utils modules are used in the methods below.
os_patcher = mock.patch('fetch_build.os')
self.mock_os = os_patcher.start()
self.addCleanup(os_patcher.stop)
bisect_utils_patcher = mock.patch('fetch_build.bisect_utils')
self.mock_bisect_utils = bisect_utils_patcher.start()
self.addCleanup(bisect_utils_patcher.stop)
@mock.patch('fetch_build._MakeDirectory')
@mock.patch('fetch_build._UnzipUsingCommand')
def test_Unzip_Linux(self, mock_UnzipUsingCommand, mock_MakeDirectory):
self.mock_bisect_utils.IsLinuxHost.return_value = True
self.mock_bisect_utils.IsMacHost.return_value = False
self.mock_bisect_utils.IsWindowsHost.return_value = False
fetch_build.Unzip('x.zip', 'out_dir', verbose=False)
mock_MakeDirectory.assert_called_with('out_dir')
mock_UnzipUsingCommand.assert_called_with(
['unzip', '-o'], 'x.zip', 'out_dir')
@mock.patch('fetch_build._MakeDirectory')
@mock.patch('fetch_build._UnzipUsingZipFile')
def test_Unzip_Mac_LargeFile(
self, mock_UnzipUsingZipFile, mock_MakeDirectory):
# The zipfile module is used to unzip on mac when the file is > 4GB.
self.mock_bisect_utils.IsLinuxHost.return_value = False
self.mock_bisect_utils.IsMacHost.return_value = True
self.mock_bisect_utils.IsWindowsHost.return_value = False
self.mock_os.path.getsize.return_value = 2 ** 33 # 8GB
fetch_build.Unzip('x.zip', 'out_dir', verbose=False)
mock_MakeDirectory.assert_called_with('out_dir')
mock_UnzipUsingZipFile.assert_called_with('x.zip', 'out_dir', False)
def test_UnzipUsingCommand(self):
# The _UnzipUsingCommand function should move to the output
# directory and run the command with the file's absolute path.
self.mock_os.path.abspath.return_value = '/foo/some/path/x.zip'
self.mock_os.getcwd.return_value = 'curr_dir'
self.mock_bisect_utils.RunProcess.return_value = 0
fetch_build._UnzipUsingCommand(['unzip'], 'x.zip', 'out_dir')
self.mock_os.chdir.assert_has_calls(
[mock.call('out_dir'), mock.call('curr_dir')])
self.mock_bisect_utils.RunProcess.assert_called_with(
['unzip', '/foo/some/path/x.zip'])
def test_MakeDirectory(self):
# _MakeDirectory uses os.makedirs.
fetch_build._MakeDirectory('some/path')
self.mock_os.makedirs.assert_called_with('some/path')
def test_MakeDirectory_RaisesError(self):
self.mock_os.makedirs.side_effect = OSError()
self.assertRaises(OSError, fetch_build._MakeDirectory, 'some/path')
def test_MakeDirectory_NoErrorIfDirectoryAlreadyExists(self):
already_exists = OSError()
already_exists.errno = errno.EEXIST
self.mock_os.makedirs.side_effect = already_exists
fetch_build._MakeDirectory('some/path')
@mock.patch('fetch_build.shutil')
def test_RemoveDirectoryTree(self, mock_shutil):
# _RemoveDirectoryTree uses shutil.rmtree.
fetch_build._RemoveDirectoryTree('some/path')
mock_shutil.rmtree.assert_called_with('some/path')
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5060 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that we don't leak txs to inbound peers that we haven't yet announced to"""
from test_framework.messages import msg_getdata, CInv, MSG_TX
from test_framework.p2p import p2p_lock, P2PDataStore
from test_framework.test_framework import RuvchainTestFramework
from test_framework.util import (
assert_equal,
)
from test_framework.wallet import MiniWallet
class P2PNode(P2PDataStore):
def on_inv(self, msg):
pass
class P2PLeakTxTest(RuvchainTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
gen_node = self.nodes[0] # The block and tx generating node
miniwallet = MiniWallet(gen_node)
# Add enough mature utxos to the wallet, so that all txs spend confirmed coins
miniwallet.generate(1)
gen_node.generate(100)
inbound_peer = self.nodes[0].add_p2p_connection(P2PNode()) # An "attacking" inbound peer
MAX_REPEATS = 100
self.log.info("Running test up to {} times.".format(MAX_REPEATS))
for i in range(MAX_REPEATS):
self.log.info('Run repeat {}'.format(i + 1))
txid = miniwallet.send_self_transfer(from_node=gen_node)['wtxid']
want_tx = msg_getdata()
want_tx.inv.append(CInv(t=MSG_TX, h=int(txid, 16)))
with p2p_lock:
inbound_peer.last_message.pop('notfound', None)
inbound_peer.send_and_ping(want_tx)
if inbound_peer.last_message.get('notfound'):
self.log.debug('tx {} was not yet announced to us.'.format(txid))
self.log.debug("node has responded with a notfound message. End test.")
assert_equal(inbound_peer.last_message['notfound'].vec[0].hash, int(txid, 16))
with p2p_lock:
inbound_peer.last_message.pop('notfound')
break
else:
self.log.debug('tx {} was already announced to us. Try test again.'.format(txid))
assert int(txid, 16) in [inv.hash for inv in inbound_peer.last_message['inv'].inv]
if __name__ == '__main__':
P2PLeakTxTest().main()
|
the-stack_0_5062 | #!/usr/bin/env python
from Bio import SeqIO
import argparse as ap
import sys
def read_params():
p = ap.ArgumentParser(description = 'fastq2fasta.py Parameters\n')
p.add_argument('--ifn', required = False, default = None, type = str)
p.add_argument('--ofn', required = False, default = None, type = str)
return vars(p.parse_args())
if __name__ == '__main__':
args = read_params()
if args['ifn'] == None:
ifile = sys.stdin
else:
ifile = open(args['ifn'], 'r')
if args['ofn'] == None:
ofile = sys.stdout
else:
ofile = open(args['ofn'], 'w')
for r in SeqIO.parse(ifile, "fastq"):
SeqIO.write(r, ofile, "fasta")
|
the-stack_0_5064 | """
# lex-ler
Compreender a motivação e mecanismos da análise léxica.
* Separar um código fonte em tokens e lexemas.
* Identificar os diferentes tipos de lexemas.
* Identificar lexemas em linguagens de programação reais como Python ou C.
----
Atenção! Este não é um exercício de programação, mas sim de compreensão dos
conceitos relacionados à análise léxica. Ainda assim, a resposta é corrigida
de forma automatizada.
Considere o código ruspy abaixo:
// Fatorial
fn fat(n: int) {
r = n
for i in 1..n {
r *= i
}
r
}
Separe este programa em lexemas e salve-os como uma lista de strings na variável:
FAT_LEXEMAS = ["fn", "fat", ...]
Os comentários são removidos da lista de lexemas, já que não interessam à análise
semântica. Você pode gerar esta lista de forma manual, automática ou semi-automática.
A única parte importante é obter o valor correto no final.
Na segunda parte faça a classificação de cada lexema em sua categoria e salve
como
FAT_TOKENS = ["fn FN", "fat ID", ...]
ou seja, cada string contêm o lexema e a categoria de não-terminal separados
por um espaço. Considere as seguintes categorias:
ID - identificadores
INT - inteiros
OP - operadores binários
LBRACE/RBRACE - chaves (abrir/fechar)
LPAR/RPAR - parênteses (abrir/fechar)
Cada palavra reservada possui sua categoria a parte como FN, IF, etc.
"""
import pytest
def test_verificações_básicas(var, check_value, fn):
size_hint = 24
lexemas = var("FAT_LEXEMAS")
tokens = var("FAT_TOKENS")
assert len(lexemas) == len(tokens)
tok_class = {tk.split()[1] for tk in tokens}
assert tok_class == {"FN", "FOR", "IN", "ID", "INT", "OP", "LBRACE", "RBRACE", "LPAR", "RPAR"}
check_value(
lexemas,
name="FAT_LEXEMAS",
type=list,
hash="MjQ6AD8NORcHa9YKPMlnYAmp6A==",
check=[fn.size(size_hint)],
)
check_value(
tokens,
name="FAT_TOKENS",
type=list,
hash="MjQ6eDGqAGldx9jyZm2FqVyIJg==",
check=[fn.size(size_hint)],
)
|
the-stack_0_5065 | import argparse
import random
import math
from dali.utils import (
set_device_from_args,
add_device_args,
unpickle_as_dict,
)
from dali.data.utils import split_punctuation
from translation import TranslationModel
def parse_args():
parser = argparse.ArgumentParser()
add_device_args(parser)
parser.add_argument("--path", type=str, required='True', help="Path to saved model")
parser.add_argument("--beam_width", type=int, default=5, help="Beam width used when prediction")
parser.add_argument("--max_output_length", type=int, default=40, help="Maximum number of words in the translation")
parser.add_argument("--show_beams", action='store_true', default=False,
help="If true shows all the beams and probabilities")
return parser.parse_args()
def show_reconstructions(model, example_pair, vocabs, max_sentence_length):
from_words, to_words = example_pair
from_vocab, to_vocab = vocabs
from_with_unk = ' '.join(from_vocab.decode(from_vocab.encode(from_words)))
to_with_unk = ' '.join(to_vocab.decode(to_vocab.encode(to_words)))
print('TRANSLATING: %s' % from_with_unk)
print('REFERENCE: %s' % to_with_unk)
print('')
def main(args):
set_device_from_args(args)
RELEVANT_VARIABLES = ["model", "vocabs"]
loaded = unpickle_as_dict(args.path, RELEVANT_VARIABLES)
model = loaded["model"]
from_vocab, to_vocab = loaded["vocabs"]
while True:
from_sentence = split_punctuation(input()).split(' ')
encoded = from_vocab.encode(list(reversed(from_sentence)), add_eos=False)
beams = model.predict(encoded,
eos_symbol=to_vocab.eos,
max_sequence_length=args.max_output_length + 1,
beam_width=args.beam_width)
if args.show_beams:
for solution, score, _ in beams:
score = math.exp(score.w[0])
# reveal the unks
solution = ' '.join(to_vocab.decode(solution, strip_eos=True))
print('%f => %s' % (score, to_vocab.decode(solution, True)))
else:
print(' '.join(to_vocab.decode(beams[0].solution, strip_eos=True)))
if __name__ == '__main__':
main(parse_args())
|
the-stack_0_5071 | import os
from pathlib import Path
import pytest
from aqt.archives import QtArchives, SrcDocExamplesArchives
from aqt.helper import Settings
@pytest.fixture(autouse=True)
def setup():
Settings.load_settings(os.path.join(os.path.dirname(__file__), "data", "settings.ini"))
@pytest.mark.parametrize(
"os_name, version, flavor, datafile",
(
("windows", "5.15.2", "doc", "windows-5152-src-doc-example-update.xml"),
("windows", "5.15.2", "src", "windows-5152-src-doc-example-update.xml"),
("windows", "5.15.2", "examples", "windows-5152-src-doc-example-update.xml"),
),
)
def test_parse_update_xml(monkeypatch, os_name, version, flavor, datafile):
def _mock(self, url):
return (Path(__file__).parent / "data" / datafile).read_text("utf-8")
monkeypatch.setattr(QtArchives, "_download_update_xml", _mock)
qt_archives = SrcDocExamplesArchives(flavor, os_name, "desktop", version, Settings.baseurl)
assert qt_archives.archives is not None
# Get packages with all extra modules
qt_archives_all_modules = SrcDocExamplesArchives(
flavor,
os_name,
"desktop",
version,
Settings.baseurl,
all_extra=True,
)
assert qt_archives_all_modules.archives is not None
# Extract all urls
url_list = [item.archive_path for item in qt_archives.archives]
url_all_modules_list = [item.archive_path for item in qt_archives_all_modules.archives]
# Check the difference list contains only extra modules urls for target specified
list_diff = [item for item in url_all_modules_list if item not in url_list]
unwanted_targets = [item for item in list_diff if flavor not in item]
# Assert if list_diff contains urls without target specified
assert unwanted_targets == []
|
the-stack_0_5073 | import kfp
from kfp import components
from kfp import dsl
sagemaker_hpo_op = components.load_component_from_file(
"../../hyperparameter_tuning/component.yaml"
)
@dsl.pipeline(
name="SageMaker HyperParameter Tuning", description="SageMaker HPO job test"
)
def hpo_pipeline(
region="",
job_name="",
algorithm_name="",
training_input_mode="",
static_parameters="",
integer_parameters="",
channels="",
categorical_parameters="",
early_stopping_type="",
max_parallel_jobs="",
max_num_jobs="",
metric_name="",
metric_type="",
hpo_strategy="",
instance_type="",
instance_count="",
volume_size="",
max_run_time="",
output_location="",
network_isolation="",
max_wait_time="",
role="",
):
sagemaker_hpo_op(
region=region,
job_name=job_name,
algorithm_name=algorithm_name,
training_input_mode=training_input_mode,
static_parameters=static_parameters,
integer_parameters=integer_parameters,
channels=channels,
categorical_parameters=categorical_parameters,
early_stopping_type=early_stopping_type,
max_parallel_jobs=max_parallel_jobs,
max_num_jobs=max_num_jobs,
metric_name=metric_name,
metric_type=metric_type,
strategy=hpo_strategy,
instance_type=instance_type,
instance_count=instance_count,
volume_size=volume_size,
max_run_time=max_run_time,
output_location=output_location,
network_isolation=network_isolation,
max_wait_time=max_wait_time,
role=role,
)
if __name__ == "__main__":
kfp.compiler.Compiler().compile(
hpo_pipeline, "SageMaker_hyperparameter_tuning_pipeline" + ".yaml"
)
|
the-stack_0_5075 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import subprocess
def find_shortest_symref(repo_path, sha1):
"""
Find the shortest symbolic reference (branch/tag) to a Git SHA1
:param repo_path: the path of a valid git repository
:type repo_path: str
:param sha1: the SAH1 of a commit to lookup the reference for
:type sha1: str
Returns None if nothing points to the requested SHA1
"""
repo_path = os.path.expanduser(repo_path)
possibles = []
# Can't use git for-each-ref --points-at because it only came in in Git 2.7
# which is not in Ubuntu 14.04 - check by hand instead.
branches = subprocess.check_output(
"git for-each-ref --sort=-committerdate "
"--format='%(objectname:short) %(refname:short)' "
"refs/heads/ refs/remotes/ refs/tags",
universal_newlines=True,
cwd=repo_path, shell=True)
for line in branches.splitlines():
try:
sha1_out, name = line.strip().split()
except ValueError:
continue
if sha1_out[:7] == sha1[:7]:
possibles.append(name)
if not possibles:
return None
return min(possibles, key=len)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
the-stack_0_5077 | #Crie um programa que vai gerar cinco números aleatórios e colocar em uma tupla. Depois disso, mostre a #listagem de números gerados e também indique o menor e o maior valor que estão na tupla
from random import randint
n1=randint(0,10)
n2=randint(0,10)
n3=randint(0,10)
n4=randint(0,10)
n5=randint(0,10)
maior=menor=0
sorteio=(n1,n2,n3,n4,n5)
print(sorteio)
print(f"O maior valor é: {max(sorteio)}.")
print(f"O menor valor é: {min(sorteio)}.") |
the-stack_0_5078 | #
# The Python Imaging Library.
# $Id$
#
# base class for image file handlers
#
# history:
# 1995-09-09 fl Created
# 1996-03-11 fl Fixed load mechanism.
# 1996-04-15 fl Added pcx/xbm decoders.
# 1996-04-30 fl Added encoders.
# 1996-12-14 fl Added load helpers
# 1997-01-11 fl Use encode_to_file where possible
# 1997-08-27 fl Flush output in _save
# 1998-03-05 fl Use memory mapping for some modes
# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
# 1999-05-31 fl Added image parser
# 2000-10-12 fl Set readonly flag on memory-mapped images
# 2002-03-20 fl Use better messages for common decoder errors
# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
# 2003-10-30 fl Added StubImageFile class
# 2004-02-25 fl Made incremental parser more robust
#
# Copyright (c) 1997-2004 by Secret Labs AB
# Copyright (c) 1995-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isPath
import io
import os
import sys
import struct
MAXBLOCK = 65536
SAFEBLOCK = 1024*1024
LOAD_TRUNCATED_IMAGES = False
ERRORS = {
-1: "image buffer overrun error",
-2: "decoding error",
-3: "unknown error",
-8: "bad configuration",
-9: "out of memory error"
}
def raise_ioerror(error):
try:
message = Image.core.getcodecstatus(error)
except AttributeError:
message = ERRORS.get(error)
if not message:
message = "decoder error %d" % error
raise IOError(message + " when reading image file")
#
# --------------------------------------------------------------------
# Helpers
def _tilesort(t):
# sort on offset
return t[2]
#
# --------------------------------------------------------------------
# ImageFile base class
class ImageFile(Image.Image):
"Base class for image file format handlers."
def __init__(self, fp=None, filename=None):
Image.Image.__init__(self)
self._min_frame = 0
self.tile = None
self.readonly = 1 # until we know better
self.decoderconfig = ()
self.decodermaxblock = MAXBLOCK
if isPath(fp):
# filename
self.fp = open(fp, "rb")
self.filename = fp
self._exclusive_fp = True
else:
# stream
self.fp = fp
self.filename = filename
# can be overridden
self._exclusive_fp = None
try:
self._open()
except (IndexError, # end of data
TypeError, # end of data (ord)
KeyError, # unsupported mode
EOFError, # got header but not the first frame
struct.error) as v:
# close the file only if we have opened it this constructor
if self._exclusive_fp:
self.fp.close()
raise SyntaxError(v)
if not self.mode or self.size[0] <= 0:
raise SyntaxError("not identified by this driver")
def draft(self, mode, size):
"Set draft mode"
pass
def verify(self):
"Check file integrity"
# raise exception if something's wrong. must be called
# directly after open, and closes file when finished.
if self._exclusive_fp:
self.fp.close()
self.fp = None
def load(self):
"Load image data based on tile list"
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.map = None
use_mmap = self.filename and len(self.tile) == 1
# As of pypy 2.1.0, memory mapping was failing here.
use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info')
readonly = 0
# look for read/seek overrides
try:
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
except AttributeError:
read = self.fp.read
try:
seek = self.load_seek
use_mmap = False
except AttributeError:
seek = self.fp.seek
if use_mmap:
# try memory mapping
decoder_name, extents, offset, args = self.tile[0]
if decoder_name == "raw" and len(args) >= 3 and args[0] == self.mode \
and args[0] in Image._MAPMODES:
try:
if hasattr(Image.core, "map"):
# use built-in mapper WIN32 only
self.map = Image.core.map(self.filename)
self.map.seek(offset)
self.im = self.map.readimage(
self.mode, self.size, args[1], args[2]
)
else:
# use mmap, if possible
import mmap
fp = open(self.filename, "r")
size = os.path.getsize(self.filename)
self.map = mmap.mmap(fp.fileno(), size, access=mmap.ACCESS_READ)
self.im = Image.core.map_buffer(
self.map, self.size, decoder_name, extents, offset, args
)
readonly = 1
# After trashing self.im, we might need to reload the palette data.
if self.palette:
self.palette.dirty = 1
except (AttributeError, EnvironmentError, ImportError):
self.map = None
self.load_prepare()
err_code = -3 # initialize to unknown error
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
try:
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = self.tile_prefix
except AttributeError:
prefix = b""
for decoder_name, extents, offset, args in self.tile:
decoder = Image._getdecoder(self.mode, decoder_name,
args, self.decoderconfig)
try:
seek(offset)
decoder.setimage(self.im, extents)
if decoder.pulls_fd:
decoder.setfd(self.fp)
status, err_code = decoder.decode(b"")
else:
b = prefix
while True:
try:
s = read(self.decodermaxblock)
except (IndexError, struct.error): # truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError("image file is truncated")
if not s: # truncated jpeg
if LOAD_TRUNCATED_IMAGES:
break
else:
self.tile = []
raise IOError("image file is truncated "
"(%d bytes not processed)" % len(b))
b = b + s
n, err_code = decoder.decode(b)
if n < 0:
break
b = b[n:]
finally:
# Need to cleanup here to prevent leaks
decoder.cleanup()
self.tile = []
self.readonly = readonly
self.load_end()
if self._exclusive_fp and self._close_exclusive_fp_after_loading:
self.fp.close()
self.fp = None
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
# still raised if decoder fails to return anything
raise_ioerror(err_code)
return Image.Image.load(self)
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.new(self.mode, self.size)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def load_end(self):
# may be overridden
pass
# may be defined for contained formats
# def load_seek(self, pos):
# pass
# may be defined for blocked formats (e.g. PNG)
# def load_read(self, bytes):
# pass
def _seek_check(self, frame):
if (frame < self._min_frame or
# Only check upper limit on frames if additional seek operations
# are not required to do so
(not (hasattr(self, "_n_frames") and self._n_frames is None) and
frame >= self.n_frames+self._min_frame)):
raise EOFError("attempt to seek outside sequence")
return self.tell() != frame
class StubImageFile(ImageFile):
"""
Base class for stub image loaders.
A stub loader is an image loader that can identify files of a
certain format, but relies on external code to load the file.
"""
def _open(self):
raise NotImplementedError(
"StubImageFile subclass must implement _open"
)
def load(self):
loader = self._load()
if loader is None:
raise IOError("cannot find loader for this %s file" % self.format)
image = loader.load(self)
assert image is not None
# become the other object (!)
self.__class__ = image.__class__
self.__dict__ = image.__dict__
def _load(self):
"(Hook) Find actual image loader."
raise NotImplementedError(
"StubImageFile subclass must implement _load"
)
class Parser(object):
"""
Incremental image parser. This class implements the standard
feed/close consumer interface.
"""
incremental = None
image = None
data = None
decoder = None
offset = 0
finished = 0
def reset(self):
"""
(Consumer) Reset the parser. Note that you can only call this
method immediately after you've created a parser; parser
instances cannot be reused.
"""
assert self.data is None, "cannot reuse parsers"
def feed(self, data):
"""
(Consumer) Feed data to the parser.
:param data: A string buffer.
:exception IOError: If the parser failed to parse the image file.
"""
# collect data
if self.finished:
return
if self.data is None:
self.data = data
else:
self.data = self.data + data
# parse what we have
if self.decoder:
if self.offset > 0:
# skip header
skip = min(len(self.data), self.offset)
self.data = self.data[skip:]
self.offset = self.offset - skip
if self.offset > 0 or not self.data:
return
n, e = self.decoder.decode(self.data)
if n < 0:
# end of stream
self.data = None
self.finished = 1
if e < 0:
# decoding error
self.image = None
raise_ioerror(e)
else:
# end of image
return
self.data = self.data[n:]
elif self.image:
# if we end up here with no decoder, this file cannot
# be incrementally parsed. wait until we've gotten all
# available data
pass
else:
# attempt to open this file
try:
with io.BytesIO(self.data) as fp:
im = Image.open(fp)
except IOError:
# traceback.print_exc()
pass # not enough data
else:
flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
if flag or len(im.tile) != 1:
# custom load code, or multiple tiles
self.decode = None
else:
# initialize decoder
im.load_prepare()
d, e, o, a = im.tile[0]
im.tile = []
self.decoder = Image._getdecoder(
im.mode, d, a, im.decoderconfig
)
self.decoder.setimage(im.im, e)
# calculate decoder offset
self.offset = o
if self.offset <= len(self.data):
self.data = self.data[self.offset:]
self.offset = 0
self.image = im
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
(Consumer) Close the stream.
:returns: An image object.
:exception IOError: If the parser failed to parse the image file either
because it cannot be identified or cannot be
decoded.
"""
# finish decoding
if self.decoder:
# get rid of what's left in the buffers
self.feed(b"")
self.data = self.decoder = None
if not self.finished:
raise IOError("image was incomplete")
if not self.image:
raise IOError("cannot parse this image")
if self.data:
# incremental parsing not possible; reopen the file
# not that we have all data
with io.BytesIO(self.data) as fp:
try:
self.image = Image.open(fp)
finally:
self.image.load()
return self.image
# --------------------------------------------------------------------
def _save(im, fp, tile, bufsize=0):
"""Helper to save image based on tile list
:param im: Image object.
:param fp: File object.
:param tile: Tile list.
:param bufsize: Optional buffer size
"""
im.load()
if not hasattr(im, "encoderconfig"):
im.encoderconfig = ()
tile.sort(key=_tilesort)
# FIXME: make MAXBLOCK a configuration parameter
# It would be great if we could have the encoder specify what it needs
# But, it would need at least the image size in most cases. RawEncode is
# a tricky case.
bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
if fp == sys.stdout:
fp.flush()
return
try:
fh = fp.fileno()
fp.flush()
except (AttributeError, io.UnsupportedOperation):
# compress to Python file-compatible object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
if e.pushes_fd:
e.setfd(fp)
l, s = e.encode_to_pyfd()
else:
while True:
l, s, d = e.encode(bufsize)
fp.write(d)
if s:
break
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
else:
# slight speedup: compress to real file object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
if e.pushes_fd:
e.setfd(fp)
l, s = e.encode_to_pyfd()
else:
s = e.encode_to_file(fh, bufsize)
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
if hasattr(fp, "flush"):
fp.flush()
def _safe_read(fp, size):
"""
Reads large blocks in a safe way. Unlike fp.read(n), this function
doesn't trust the user. If the requested size is larger than
SAFEBLOCK, the file is read block by block.
:param fp: File handle. Must implement a <b>read</b> method.
:param size: Number of bytes to read.
:returns: A string containing up to <i>size</i> bytes of data.
"""
if size <= 0:
return b""
if size <= SAFEBLOCK:
return fp.read(size)
data = []
while size > 0:
block = fp.read(min(size, SAFEBLOCK))
if not block:
break
data.append(block)
size -= len(block)
return b"".join(data)
class PyCodecState(object):
def __init__(self):
self.xsize = 0
self.ysize = 0
self.xoff = 0
self.yoff = 0
def extents(self):
return (self.xoff, self.yoff,
self.xoff+self.xsize, self.yoff+self.ysize)
class PyDecoder(object):
"""
Python implementation of a format decoder. Override this class and
add the decoding logic in the `decode` method.
See :ref:`Writing Your Own File Decoder in Python<file-decoders-py>`
"""
_pulls_fd = False
def __init__(self, mode, *args):
self.im = None
self.state = PyCodecState()
self.fd = None
self.mode = mode
self.init(args)
def init(self, args):
"""
Override to perform decoder specific initialization
:param args: Array of args items from the tile entry
:returns: None
"""
self.args = args
@property
def pulls_fd(self):
return self._pulls_fd
def decode(self, buffer):
"""
Override to perform the decoding process.
:param buffer: A bytes object with the data to be decoded. If `handles_eof`
is set, then `buffer` will be empty and `self.fd` will be set.
:returns: A tuple of (bytes consumed, errcode). If finished with decoding
return <0 for the bytes consumed. Err codes are from `ERRORS`
"""
raise NotImplementedError()
def cleanup(self):
"""
Override to perform decoder specific cleanup
:returns: None
"""
pass
def setfd(self, fd):
"""
Called from ImageFile to set the python file-like object
:param fd: A python file-like object
:returns: None
"""
self.fd = fd
def setimage(self, im, extents=None):
"""
Called from ImageFile to set the core output image for the decoder
:param im: A core image object
:param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle
for this tile
:returns: None
"""
# following c code
self.im = im
if extents:
(x0, y0, x1, y1) = extents
else:
(x0, y0, x1, y1) = (0, 0, 0, 0)
if x0 == 0 and x1 == 0:
self.state.xsize, self.state.ysize = self.im.size
else:
self.state.xoff = x0
self.state.yoff = y0
self.state.xsize = x1 - x0
self.state.ysize = y1 - y0
if self.state.xsize <= 0 or self.state.ysize <= 0:
raise ValueError("Size cannot be negative")
if (self.state.xsize + self.state.xoff > self.im.size[0] or
self.state.ysize + self.state.yoff > self.im.size[1]):
raise ValueError("Tile cannot extend outside image")
def set_as_raw(self, data, rawmode=None):
"""
Convenience method to set the internal image from a stream of raw data
:param data: Bytes to be set
:param rawmode: The rawmode to be used for the decoder. If not specified,
it will default to the mode of the image
:returns: None
"""
if not rawmode:
rawmode = self.mode
d = Image._getdecoder(self.mode, 'raw', (rawmode))
d.setimage(self.im, self.state.extents())
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
|
the-stack_0_5079 | import os
import copy
import re
import yaml
from fabric.colors import yellow as _yellow
from ghost_log import log
from .provisioner import FeaturesProvisioner
SALT_PILLAR_TOP = {'base': {'*': ['features']}}
class FeaturesProvisionerSalt(FeaturesProvisioner):
""" Build features with SaltStack """
def __init__(self, log_file, unique_id, options, config, global_config):
FeaturesProvisioner.__init__(self, log_file, 'salt', unique_id, options, config, global_config)
self._salt_state_tree = os.path.join(self.local_repo_path, 'salt')
self._salt_pillar_roots = os.path.join(self.local_repo_path, 'pillar')
self._provisioner_log_level = self.global_config.get('provisioner_log_level', 'info')
self._salt_state_top_path = os.path.join(self._salt_state_tree, 'top.sls')
self._salt_pillar_top_path = os.path.join(self._salt_pillar_roots, 'top.sls')
self._salt_pillar_features_path = os.path.join(self._salt_pillar_roots, 'features.sls')
self._salt_additional_pillar = config.get('salt_additional_pillar', '')
def build_packer_provisioner_config(self, features_config):
features = self._format_provisioner_features(features_config)
provisioner_params = self._format_provisioner_params(features_config)
enabled_packer_salt_config = self._test_not_empty_salt_features(features)
if enabled_packer_salt_config:
self.build_provisioner_features_files(features, provisioner_params)
_provisionner_config = {
'type': 'salt-masterless',
'local_state_tree': self._salt_state_tree,
'local_pillar_roots': self._salt_pillar_roots,
'skip_bootstrap': self._options[0],
'log_level': self._provisioner_log_level,
}
else:
return None
return [_provisionner_config]
def build_provisioner_features_files(self, features, provisioner_params):
""" Build salt files only if features with salt provisioner """
self._build_salt_top(features)
self._build_salt_pillar(provisioner_params)
def build_packer_provisioner_cleanup(self):
return {
'type': 'shell',
'inline': [
"sudo rm -rf /srv/salt || echo 'Salt - no cleanup salt'",
"sudo rm -rf /srv/pillar || echo 'Salt - no cleanup pillar'"
]
}
def _test_not_empty_salt_features(self, features):
""" Test is features set
>>> features = []
>>> import pprint
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._test_not_empty_salt_features(features))
False
>>> features = ['pkg']
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._test_not_empty_salt_features(features))
True
"""
return features != []
def _build_salt_top(self, params):
""" Build salt salt/top.sls file from features """
with open(self._salt_state_top_path, "w") as stream:
log("Salt - Writing Top state to: {0}".format(self._salt_state_top_path), self._log_file)
if os.path.exists(os.path.join(self._salt_state_tree, 'common')):
data = {'base': {'*': ['common'] + params}}
else:
data = {'base': {'*': params}}
log('Salt - state: top.sls: {0}'.format(data), self._log_file)
yaml.dump(data, stream, default_flow_style=False)
def _build_salt_pillar(self, features):
""" Build salt pillar/top.sls and pillar/features.sls """
data_top = copy.deepcopy(SALT_PILLAR_TOP)
with open(self._salt_pillar_top_path, "w") as stream_top:
if self._salt_additional_pillar != '':
data_top['base']['*'].append(self._salt_additional_pillar)
else:
log('Salt - No additional pillar to add', self._log_file)
log('Salt - pillar: top.sls: {0}'.format(data_top), self._log_file)
yaml.dump(data_top, stream_top, default_flow_style=False)
with open(self._salt_pillar_features_path, "w") as stream_features:
log(_yellow('Salt - pillar: features.sls: {0}'.format(features)), self._log_file)
yaml.dump(features, stream_features, default_flow_style=False)
def _format_provisioner_features(self, features):
""" Generates the formula dictionnary object with all required features
>>> features = [{'name': 'pkg', 'version': 'git_vim'}, {'name': 'pkg', 'version': 'package=lsof'}, {'name': 'pkg', 'version': 'package=curl'}]
>>> FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_features(features)
['pkg']
>>> features = [{'name': 'pkg', 'version': 'git_vim', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=lsof', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=curl', 'provisioner': 'salt'}]
>>> FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_features(features)
['pkg']
>>> features = []
>>> FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_features(features)
[]
"""
top = []
for i in features:
if i.get('provisioner', self._default_provisioner) != self.name:
continue
if re.search('^(php|php5)-(.*)', i['name']):
continue
if re.search('^zabbix-(.*)', i['name']):
continue
if re.search('^gem-(.*)', i['name']):
continue
if not i['name'].encode('utf-8') in top:
top.append(i['name'].encode('utf-8'))
return top
def _format_provisioner_params(self, features):
""" Generates the pillar dictionnary object with all required features and their options
>>> features = [{'name': 'pkg', 'version': 'git_vim'}, {'name': 'pkg', 'version': 'package=lsof'}, {'name': 'pkg', 'version': 'package=curl'}]
>>> import pprint
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[('pkg', {'package': ['lsof', 'curl'], 'version': 'git_vim'})]
>>> features = [{'name': 'pkg', 'version': 'git_vim', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=lsof', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=curl', 'provisioner': 'salt'}]
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[('pkg', {'package': ['lsof', 'curl'], 'version': 'git_vim'})]
>>> features = [{'name': 'pkg', 'version': 'git_vim', 'provisioner': 'ansible'}, {'name': 'pkg', 'version': 'package=lsof', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=curl', 'provisioner': 'salt'}]
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[('pkg', {'package': ['lsof', 'curl']})]
>>> features = [{'name': 'pkg', 'version': 'git_vim', 'provisioner': 'ansible'}, {'name': 'pkg', 'version': 'package=lsof', 'provisioner': 'ansible'}, {'name': 'pkg', 'version': 'package=curl', 'provisioner': 'ansible'}]
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[]
>>> features = []
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[]
"""
pillar = {}
for ft in features:
if ft.get('provisioner', self._default_provisioner) != self.name:
continue
values = ft.get('version', '').split('=', 1) # Split only one time
feature_name = ft['name'].encode('utf-8')
if not feature_name in pillar:
pillar[feature_name] = {}
if len(values) == 2:
ft_param_key = values[0].encode('utf-8')
ft_param_val = values[1].encode('utf-8')
if not ft_param_key in pillar[feature_name]:
pillar[feature_name][ft_param_key] = []
pillar[feature_name][ft_param_key].append(ft_param_val)
else:
pillar[feature_name]['version'] = ft.get('version', '').encode('utf-8')
return pillar
|
the-stack_0_5084 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hello world v2 engine pipeline."""
from __future__ import annotations
import unittest
from pprint import pprint
import kfp
import kfp_server_api
from .producer_consumer_param import producer_consumer_param_pipeline
from ..test.util import KfpTask, TaskInputs, TaskOutputs, run_pipeline_func, TestCase, KfpMlmdClient
from ml_metadata.proto import Execution
def verify(run: kfp_server_api.ApiRun, mlmd_connection_config, **kwargs):
t = unittest.TestCase()
t.maxDiff = None # we always want to see full diff
t.assertEqual(run.status, 'Succeeded')
client = KfpMlmdClient(mlmd_connection_config=mlmd_connection_config)
tasks = client.get_tasks(run_id=run.id)
pprint(tasks)
t.assertEqual({
'consumer':
KfpTask(
name='consumer',
type='system.ContainerExecution',
state=Execution.State.COMPLETE,
inputs=TaskInputs(
parameters={
'input_value':
'Hello world, this is an output parameter\n'
},
artifacts=[]
),
outputs=TaskOutputs(parameters={}, artifacts=[])
),
'producer':
KfpTask(
name='producer',
type='system.ContainerExecution',
state=Execution.State.COMPLETE,
inputs=TaskInputs(
parameters={'input_text': 'Hello world'}, artifacts=[]
),
outputs=TaskOutputs(
parameters={
'output_value':
'Hello world, this is an output parameter\n'
},
artifacts=[]
)
)
}, tasks)
if __name__ == '__main__':
run_pipeline_func([
TestCase(
pipeline_func=producer_consumer_param_pipeline,
verify_func=verify,
mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE,
),
])
|
the-stack_0_5085 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# Given a sentence and an incident id, check their similarity
# Usage:
# sen_incident.py _input_sentence_ -i incident id -v [optional]
# Need w2v for word2v, sif for SIF, vec for caced vecs
# if -v is used, need inc_ids.json and inc_sen.json
import argparse
import numpy as np
from fn_machine_learning_nlp.lib.file_manage import FileManage
from fn_machine_learning_nlp.lib.nlp.res_sen2vec import ResSen2Vec
from fn_machine_learning_nlp.lib.nlp.res_sif import ResSIF
from fn_machine_learning_nlp.lib.nlp.res_nlp import ResNLP
from fn_machine_learning_nlp.lib.nlp.word_sentence_utils import WordSentenceUtils
import json
from nltk.corpus import words
setofwords = set(words.words())
SIF_A = 10e-3
parser = argparse.ArgumentParser(description="Find similarity between given sentence and incident")
parser.add_argument("sentence",
help="input sentence")
parser.add_argument("-i", "--incident",
help="incident id")
parser.add_argument("-s", "--sif",
help="SIF file",
default=FileManage.DEFAULT_SIF_FILE)
parser.add_argument("-w", "--w2v",
help="trained word2vec model",
default=FileManage.DEFAULT_NLP_FILE)
parser.add_argument("-v", "--vec",
help="saved vectors for incidents",
default=FileManage.DEFAULT_VEC_FILE)
parser.add_argument("-d", "--debug",
help="print extra debug information",
action="store_true")
parser.add_argument("-a", "--all_ids",
help="json file of list of all incident ids",
default="inc_ids.json")
parser.add_argument("-e", "--inc_sen",
help="json file of list of words for incidents",
default="inc_sen.json")
args, unknow_args = parser.parse_known_args()
sentence = args.sentence
inc_id = int(args.incident)
sif_file = args.sif
w2v_file = args.w2v
vec_file = args.vec
debug = args.debug
all_ids_file = args.all_ids
inc_sen_file = args.inc_sen
s_util = WordSentenceUtils()
sif = ResSIF()
sif.load_sif(sif_file)
w2v = ResNLP()
w2v.load_model(w2v_file)
vec = ResSen2Vec(w2v.word2vec, sif)
vec.load_s2v(vec_file)
inc_vec = vec.get_incident_vec(str(inc_id))
sen_vec = vec.get_vec_for_sentence(sentence)
u = []
with open("vec_en_new_pcs.json", "r") as infile:
u = json.load(infile)
u = np.multiply(u, np.transpose(u))
sub = np.multiply(u, sen_vec)
sen_vec = np.subtract(sen_vec, sub)
inc_vec_norm = np.linalg.norm(inc_vec)
sen_vec_norm = np.linalg.norm(sen_vec)
sim = np.dot(inc_vec, sen_vec)/(np.linalg.norm(inc_vec) * np.linalg.norm(sen_vec))
sim1 = np.dot(inc_vec, sen_vec)/(inc_vec_norm * sen_vec_norm)
print("Similarity between input incident and sentence:")
print("\t\t%-30s %s"%("similarity:", sim))
if debug:
print("Debug information:")
with open(inc_sen_file, "r") as infile:
sentences = json.load(infile)
with open(all_ids_file, "r") as infile:
ids = json.load(infile)
inc_id_index = None
for i in range(len(ids)):
if ids[i] == inc_id:
inc_id_index = i
break
if inc_id_index is not None:
words = sentences[inc_id_index]
inc_v = np.zeros(w2v.word2vec.vector_size)
inv_v_count = 0
for w in words:
if w in setofwords:
wc = sif.get_word_count(w)
if wc < 300:
wc = 300 - (300 - wc) / 3
a_value = SIF_A / (SIF_A + wc)
try:
w_v = w2v.get_word_vec(w)
inc_v += np.multiply(a_value, w_v)
inv_v_count += 1
except:
pass
if inv_v_count > 0:
inc_v /= inv_v_count
#inc_v = vec.get_vec_for_words(sentences[inc_id_index])
sub = np.multiply(u, inc_v)
inc_v = np.subtract(inc_v, sub)
sim1 = np.dot(inc_vec, inc_v)/(np.linalg.norm(inc_vec) * np.linalg.norm(inc_v))
print("\trecompute incident vec, and check with cached one. Sim shall be close to 1:")
print("\t\t%-30s %s" % ("recom sim:", sim1))
wrd1 = None
wrd2 = None
sim_max = 0
words_1 = s_util.get_words(sentence)
words_2 = sentences[inc_id_index]
for w1 in words_1:
for w2 in words_2:
try:
v1 = w2v.get_word_vec(w1)
v2 = w2v.get_word_vec(w2)
v1_norm = np.linalg.norm(v1)
v2_norm = np.linalg.norm(v2)
if v1_norm > 0 and v2_norm > 0:
sim1 = np.dot(v1, v2) / (v1_norm * v2_norm)
if abs(sim1) > sim_max:
sim_max = abs(sim1)
wrd1 = w1
wrd2 = w2
except:
pass
if sim_max != 0:
print("\ttop matching words:")
print("\t\t%-30s %s" % ("sentence:", wrd1))
print("\t\t%-30s %s" % ("incident:", wrd2))
print("\t\t%-30s %s" % ("similarity:", sim_max))
print("\tsentence top 5 word count:")
s_count = [(w, sif.get_word_count(w)) for w in words_1]
s_count.sort(key=lambda u: u[1])
for i in range(min(5, len(s_count))):
print("\t\t%-30s %s" % (s_count[i][0] + ':', s_count[i][1]))
print("\tincident top 5 word count:")
s_count = [(w, sif.get_word_count(w)) for w in words_2]
s_count.sort(key=lambda u: u[1])
s_tmp = [s for s in s_count if s[1] > 0]
for i in range(min(5, len(s_tmp))):
print("\t\t%-30s %s" % (s_tmp[i][0] + ':', s_tmp[i][1]))
count_threshold = 10
v2_high = np.zeros(w2v.word2vec.vector_size)
v2_low = np.zeros(w2v.word2vec.vector_size)
high_count = 0
low_count = 0
v2_all = np.zeros(w2v.word2vec.vector_size)
v2_all_count = 0
sen_vec_norm = np.linalg.norm(sen_vec)
total_wc = 0
for w2 in words_2:
total_wc += sif.get_word_count(w2)
res = []
for w2 in words_2:
wc = sif.get_word_count(w2)
a_value = SIF_A/(SIF_A + wc)
try:
w_v = w2v.get_word_vec(w2)
w_sim = np.dot(w_v, sen_vec)/(np.linalg.norm(w_v) * sen_vec_norm)
res.append((w2, wc, w_sim))
#if wc/total_wc < 0.005:
#if wc < 500 and w_sim < 0.50:
if wc > 10:
v2_high += np.multiply(a_value, w2v.get_word_vec(w2))
high_count += 1
if wc > 100:
v2_low += np.multiply(a_value, w2v.get_word_vec(w2))
low_count += 1
v2_all += np.multiply(a_value, w2v.get_word_vec(w2))
v2_all_count += 1
except:
pass
if high_count > 0:
v2_high /= high_count
if low_count > 0:
v2_low /= low_count
if v2_all_count > 0:
v2_all /= v2_all_count
sim_high = np.dot(v2_high, sen_vec)/(np.linalg.norm(v2_high) * sen_vec_norm)
sim_low = np.dot(v2_low, sen_vec)/(np.linalg.norm(v2_low) * sen_vec_norm)
sim_all = np.dot(v2_all, sen_vec) / (np.linalg.norm(v2_all) * sen_vec_norm)
print("\tLow sim: {}".format(sim_low))
print("\tHigh sim: {}".format(sim_high))
print("\tAll sim: {}".format(sim_all))
res.sort(key=lambda u:u[2])
for w in res:
print("%-20s, %-8s, %s"%(w[0], str(w[1]), str(w[2]))) |
the-stack_0_5089 | #!/usr/bin/env python3
import os
from pathlib import Path
import shutil
import argparse
import json
from pprint import pprint
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from vfat import Vfat
# ---------------------------------------------------------------
## CONSTANTS
floppy_size = 1536 * 1024 # REVIEW: 1536 or 1440?
# ---------------------------------------------------------------
## ARGS
parser = argparse.ArgumentParser("Extract Akai MPC 2000 floppy files")
parser.add_argument("--src", help="path to disk image file or device (/dev/sd?)", required=True)
parser.add_argument("--floppy", help="virtual floppy id(s), list and ranges supported", required=False)
parser.add_argument("--dest", help="folder to write to", required=False)
parser.add_argument("--out-format", help="output format for listing files", choices=['txt', 'json'], required=False)
parser.add_argument("-v", "--verbose", action = "store_true")
args = parser.parse_args()
sudo_user = ''
if 'SUDO_USER' in os.environ:
sudo_user = os.environ["SUDO_USER"]
if args.src.startswith("~/"):
args.src = args.src.replace("~/", "~"+sudo_user+"/")
args.src = os.path.expanduser(args.src)
if args.dest and args.dest.startswith("~/"):
args.dest = args.dest.replace("~/", "~"+sudo_user+"/")
args.dest = os.path.expanduser(args.dest)
if not args.dest:
args.verbose = True
if not args.out_format:
# NB: default option doesn't seem to work / choices
args.out_format = 'txt'
# print(args.out_format)
floppy_list = []
if args.floppy:
floppy_ranges = args.floppy.split(',')
for frange in floppy_ranges:
split_frange = frange.split('-')
if len(split_frange) == 2:
f, t = split_frange
floppy_list.extend(range(int(f), int(t)+1))
else:
floppy_list.append(int(frange))
floppy_list = list(set(floppy_list))
if args.src.startswith("/dev/sd"):
if not floppy_list:
parser.error("When targeting a Gotek-formated USB drive, please precise `--floppy`, i.e. which virtual floppy to extract.")
## ------------------------------------------------------------------------
## FUNCTIONS: GENERIC
def is_printable_ascii_char(c):
return c >= 0x20 and c <= 0x7e
def bytes_to_ascii(byte_arr):
filtered_arr = bytearray()
for b in byte_arr:
if is_printable_ascii_char(b):
filtered_arr.append(b)
return filtered_arr.decode(u"ASCII")
## ------------------------------------------------------------------------
## FUNCTIONS: FIELD PARSING
def parse_vfat_lfn(r):
lfn_arr = bytearray()
for i in [1, 3, 5, 7, 9]:
lfn_arr.append(r.file_name[i])
for i in [2, 4, 6, 8]:
lfn_arr.append(r.reserved[i])
r_time = r.time.to_bytes(2, 'little')
for i in [0]:
lfn_arr.append(r_time[i])
r_date = r.date.to_bytes(2, 'little')
for i in [0]:
lfn_arr.append(r_date[i])
r_size = r.file_size.to_bytes(4, 'little')
for i in [0, 2]:
lfn_arr.append(r_size[i])
return bytes_to_ascii(lfn_arr)
def parse_mpc_lfn_ext(reserved):
return bytes_to_ascii(reserved[:-2]).replace("[Q", "").rstrip()
## ------------------------------------------------------------------------
## FUNCTIONS: FLOPPY PARSING
def get_floppy_file_list(floppy_bytes, vfloppy_offest=0):
data = Vfat.from_bytes(floppy_bytes)
# those might always the same for FAT12 but whatever...
bytes_per_ls = data.boot_sector.bpb.bytes_per_ls
ls_per_clus = data.boot_sector.bpb.ls_per_clus
clus_size = bytes_per_ls * ls_per_clus
data_start_clus = 33
# cf https://www.eit.lth.se/fileadmin/eit/courses/eitn50/Literature/fat12_description.pdf
start_clus_offset = None
parsed_files = []
if data.boot_sector.is_fat32:
floppy_name = data.boot_sector.ebpb_fat32.partition_volume_label
else:
floppy_name = data.boot_sector.ebpb_fat16.partition_volume_label
current_vfat_lfn = ""
for r in data.root_dir.records:
# NB: the records index is at 0x2600
if r.attribute in [8, 0]: # current dir, empty slot
continue
if r.attribute == 15: # vFAT LFN
current_vfat_lfn = parse_vfat_lfn(r)
continue
if r.file_size == 0: # empty file
if current_vfat_lfn:
current_vfat_lfn = ""
continue
sfn_no_ext = bytes_to_ascii(r.file_name[:-3]).rstrip()
ext = r.file_name[-3:].decode(u"ASCII")
# NB: MPC implementation of LFN uses reserved bytes of a record instead of separate record
mpc_lfn_part = parse_mpc_lfn_ext(r.reserved)
mpc_fn = sfn_no_ext + mpc_lfn_part + "." + ext
if mpc_lfn_part:
fn = mpc_fn
elif current_vfat_lfn:
fn = current_vfat_lfn
else:
fn = mpc_fn
if args.verbose and args.out_format == "txt":
fn_text = mpc_fn
if current_vfat_lfn:
fn_text += " (" + current_vfat_lfn + ")"
print("- " + fn_text)
print(" start cluster: #" + str(r.start_clus))
print(" size: " + str(r.file_size))
if start_clus_offset is None:
start_bytes = data_start_clus * clus_size
start_clus_offset = r.start_clus
else:
start_bytes = (data_start_clus - start_clus_offset + r.start_clus) * clus_size
current_vfat_lfn = ""
if args.verbose and args.out_format == "txt":
print(" start pos in floppy: " + str(start_bytes))
if vfloppy_offest:
print(" start pos in img: " + str(vfloppy_offest + start_bytes))
parsed_files.append({
'name': fn,
'start': vfloppy_offest + start_bytes,
'size': r.file_size,
})
return (floppy_name, parsed_files)
def extract_parsed_files(parsed_files, floppy_id=None):
dest_dir = args.dest
if floppy_id:
dest_dir = args.dest.rstrip("/") + "/" + str(floppy_id) + "/"
Path(dest_dir).mkdir(parents=True, exist_ok=True)
if sudo_user:
shutil.chown(dest_dir, sudo_user, sudo_user)
with open(args.src, 'rb') as f:
for props in parsed_files:
f.seek(props['start'], 0)
file_bytes = f.read(props['size'])
with open(dest_dir + props['name'], "wb") as out_f:
out_f.write(file_bytes)
if sudo_user:
shutil.chown(dest_dir + props['name'], sudo_user, sudo_user)
## ------------------------------------------------------------------------
## PARSE FLOPPY IMAGES
vfloppy_offset = 0
file_bytes = None
f = open(args.src, 'rb')
if floppy_list:
parsed_files = []
for floppy in floppy_list:
if args.verbose and args.out_format == "txt":
print("-"*35)
print("FLOPPY #" + str(floppy))
vfloppy_offset = floppy * 1536 * 1024
f.seek(vfloppy_offset, 0)
file_bytes = f.read(floppy_size)
(name, files) = get_floppy_file_list(file_bytes, vfloppy_offset)
parsed_files.append({
'name': name,
'files': files,
})
else:
file_bytes = f.read(floppy_size)
(name, parsed_files) = get_floppy_file_list(file_bytes, vfloppy_offset)
f.close()
## ------------------------------------------------------------------------
## EXTRACT FILES
if not args.dest:
if args.out_format == "json":
print(json.dumps(parsed_files))
exit(0)
if floppy_list:
for f_id, props in parsed_files.items():
files = props['files']
if files:
extract_parsed_files(files, f)
else:
extract_parsed_files(parsed_files)
print("Extraction complete!")
|
the-stack_0_5094 | # File name: exercise3.py
# Author: Steve Hommy
# Description: Sorting list in ascending order
# Asking user for range of items that will be on list
number_of_elements = int(input("Enter number of elements in list: "))
# Creating lists
number_list = []
word_list = []
# Appending intgeres and strings to the list
for i in range(number_of_elements):
number = int(input("Enter number: "))
number_list.append(number)
for i in range(number_of_elements):
word = input("Type anything: ")
word_list.append(word)
# Sorting lists in ascending order
number_list.sort()
word_list.sort()
# Prints out lists
print(number_list)
print(word_list)
|
the-stack_0_5096 | from typing import List, Optional
from spacy.language import Language
from spacy.tokens import Doc, Span, Token
from edsnlp.pipelines.qualifiers.base import Qualifier
from edsnlp.pipelines.terminations import termination
from edsnlp.utils.filter import consume_spans, filter_spans, get_spans
from edsnlp.utils.inclusion import check_inclusion
from edsnlp.utils.resources import get_verbs
from .patterns import following, preceding, pseudo, verbs_eds, verbs_hyp
class Hypothesis(Qualifier):
"""
Hypothesis detection with spaCy.
The component looks for five kinds of expressions in the text :
- preceding hypothesis, ie cues that precede a hypothetic expression
- following hypothesis, ie cues that follow a hypothetic expression
- pseudo hypothesis : contain a hypothesis cue, but are not hypothesis
(eg "pas de doute"/"no doubt")
- hypothetic verbs : verbs indicating hypothesis (eg "douter")
- classic verbs conjugated to the conditional, thus indicating hypothesis
Parameters
----------
nlp : Language
spaCy nlp pipeline to use for matching.
pseudo : Optional[List[str]]
List of pseudo hypothesis cues.
preceding : Optional[List[str]]
List of preceding hypothesis cues
following : Optional[List[str]]
List of following hypothesis cues.
verbs_hyp : Optional[List[str]]
List of hypothetic verbs.
verbs_eds : Optional[List[str]]
List of mainstream verbs.
filter_matches : bool
Whether to filter out overlapping matches.
attr : str
spaCy's attribute to use:
a string with the value "TEXT" or "NORM", or a dict with the key 'term_attr'
we can also add a key for each regex.
on_ents_only : bool
Whether to look for matches around detected entities only.
Useful for faster inference in downstream tasks.
within_ents : bool
Whether to consider cues within entities.
explain : bool
Whether to keep track of cues for each entity.
regex : Optional[Dict[str, Union[List[str], str]]]
A dictionnary of regex patterns.
"""
defaults = dict(
following=following,
preceding=preceding,
pseudo=pseudo,
termination=termination,
verbs_eds=verbs_eds,
verbs_hyp=verbs_hyp,
)
def __init__(
self,
nlp: Language,
attr: str,
pseudo: Optional[List[str]],
preceding: Optional[List[str]],
following: Optional[List[str]],
termination: Optional[List[str]],
verbs_eds: Optional[List[str]],
verbs_hyp: Optional[List[str]],
on_ents_only: bool,
within_ents: bool,
explain: bool,
):
terms = self.get_defaults(
pseudo=pseudo,
preceding=preceding,
following=following,
termination=termination,
verbs_eds=verbs_eds,
verbs_hyp=verbs_hyp,
)
terms["verbs"] = self.load_verbs(
verbs_hyp=terms.pop("verbs_hyp"),
verbs_eds=terms.pop("verbs_eds"),
)
super().__init__(
nlp=nlp,
attr=attr,
on_ents_only=on_ents_only,
explain=explain,
**terms,
)
self.within_ents = within_ents
self.set_extensions()
@staticmethod
def set_extensions() -> None:
if not Token.has_extension("hypothesis"):
Token.set_extension("hypothesis", default=False)
if not Token.has_extension("hypothesis_"):
Token.set_extension(
"hypothesis_",
getter=lambda token: "HYP" if token._.hypothesis else "CERT",
)
if not Span.has_extension("hypothesis"):
Span.set_extension("hypothesis", default=False)
if not Span.has_extension("hypothesis_"):
Span.set_extension(
"hypothesis_",
getter=lambda span: "HYP" if span._.hypothesis else "CERT",
)
if not Span.has_extension("hypothesis_cues"):
Span.set_extension("hypothesis_cues", default=[])
if not Doc.has_extension("hypothesis"):
Doc.set_extension("hypothesis", default=[])
def load_verbs(
self,
verbs_hyp: List[str],
verbs_eds: List[str],
) -> List[str]:
"""
Conjugate "classic" verbs to conditional, and add hypothesis
verbs conjugated to all tenses.
Parameters
----------
verbs_hyp: List of verbs that specifically imply an hypothesis.
verbs_eds: List of general verbs.
Returns
-------
list of hypothesis verbs conjugated at all tenses and classic
verbs conjugated to conditional.
"""
classic_verbs = get_verbs(verbs_eds)
classic_verbs = classic_verbs.loc[classic_verbs["mode"] == "Conditionnel"]
list_classic_verbs = list(classic_verbs["term"].unique())
hypo_verbs = get_verbs(verbs_hyp)
list_hypo_verbs = list(hypo_verbs["term"].unique())
return list_hypo_verbs + list_classic_verbs
def process(self, doc: Doc) -> Doc:
"""
Finds entities related to hypothesis.
Parameters
----------
doc: spaCy Doc object
Returns
-------
doc: spaCy Doc object, annotated for hypothesis
"""
matches = self.get_matches(doc)
terminations = get_spans(matches, "termination")
boundaries = self._boundaries(doc, terminations)
# Removes duplicate matches and pseudo-expressions in one statement
matches = filter_spans(matches, label_to_remove="pseudo")
entities = list(doc.ents) + list(doc.spans.get("discarded", []))
ents = None
for start, end in boundaries:
ents, entities = consume_spans(
entities,
filter=lambda s: check_inclusion(s, start, end),
second_chance=ents,
)
sub_matches, matches = consume_spans(
matches, lambda s: start <= s.start < end
)
if self.on_ents_only and not ents:
continue
sub_preceding = get_spans(sub_matches, "preceding")
sub_following = get_spans(sub_matches, "following")
sub_verbs = get_spans(sub_matches, "verbs")
if not sub_preceding + sub_following + sub_verbs:
continue
if not self.on_ents_only:
for token in doc[start:end]:
token._.hypothesis = any(
m.end <= token.i for m in sub_preceding + sub_verbs
) or any(m.start > token.i for m in sub_following)
for ent in ents:
if self.within_ents:
cues = [m for m in sub_preceding + sub_verbs if m.end <= ent.end]
cues += [m for m in sub_following if m.start >= ent.start]
else:
cues = [m for m in sub_preceding + sub_verbs if m.end <= ent.start]
cues += [m for m in sub_following if m.start >= ent.end]
hypothesis = ent._.hypothesis or bool(cues)
ent._.hypothesis = hypothesis
if self.explain and hypothesis:
ent._.hypothesis_cues += cues
if not self.on_ents_only and hypothesis:
for token in ent:
token._.hypothesis = True
return doc
|
the-stack_0_5097 | import os
import re
import datetime
def benchmarks_Z3(input_path, output_path, option):
if option == "linear":
save_path_QF_LRA = output_path + "/linear/QF_LRA"
save_path_QF_LIA = output_path + "/linear/QF_LIA"
save_path_QF_BV = output_path + "/linear/QF_BV"
if option == "nonlinear":
save_path_QF_NRA = output_path + "/nonlinear/QF_NRA"
save_path_QF_NIA = output_path + "/nonlinear/QF_NIA"
save_path_QF_BV = output_path + "/nonlinear/QF_BV"
if option == "linear":
input_path = input_path + "/linear"
if option == "nonlinear":
input_path = input_path + "/nonlinear"
for root, dirs, files in os.walk(input_path, topdown=False):
for name in files:
if "output_SMT_Solver_Z3_" in os.path.join(root, name):
if "SMT2" in os.path.join(root, name):
if "-Sol" not in os.path.join(root, name):
aux = re.search('output_SMT_Solver_Z3_(.*)SMT2', os.path.join(root, name))
type_theory = (aux.group(1))
type_theory = type_theory[:-1]
partial_name = name.replace('.smt2', '')
if (type_theory == "RealReal" or type_theory == "RealBool" or type_theory == "RealPBC" or type_theory == "RealPBCMultiObjectives") and option == "linear" :
completeName_Path = os.path.join(save_path_QF_LRA, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "IntIntOr" or type_theory == "IntIntLessThan") and option == "linear" :
completeName_Path = os.path.join(save_path_QF_LIA, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "BV") and option == "linear" :
completeName_Path = os.path.join(save_path_QF_BV, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "RealReal" or type_theory == "RealBool" or type_theory == "RealPBC" or type_theory == "RealPBCMultiObjectives") and option == "nonlinear":
completeName_Path = os.path.join(save_path_QF_NRA, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "IntIntOr" or type_theory == "IntIntLessThan") and option == "nonlinear":
completeName_Path = os.path.join(save_path_QF_NIA, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "BV") and option == "nonlinear":
completeName_Path = os.path.join(save_path_QF_BV, partial_name + "_" + type_theory + ".smt2")
file = open(completeName_Path, "w")
file.write("(set-info :smt-lib-version 2.6)\n")
if (type_theory == "RealReal" or type_theory == "RealBool" or type_theory == "RealPBC" or type_theory == "RealPBCMultiObjectives") and option == "linear":
file.write("(set-logic QF_LRA)\n")
if (type_theory == "IntIntOr" or type_theory == "IntIntLessThan") and option == "linear":
file.write("(set-logic QF_LIA)\n")
if (type_theory == "BV") and option == "linear":
file.write("(set-logic QF_BV)\n")
if (type_theory == "RealReal" or type_theory == "RealBool" or type_theory == "RealPBC" or type_theory == "RealPBCMultiObjectives") and option == "nonlinear":
file.write("(set-logic QF_NRA)\n")
if (type_theory == "IntIntOr" or type_theory == "IntIntLessThan") and option == "nonlinear":
file.write("(set-logic QF_NIA)\n")
if (type_theory == "BV") and option == "nonlinear":
file.write("(set-logic QF_BV)\n")
file.write("(set-info :source |\n")
file.write("Generated by: Mădălina Erașcu, Răzvan Meteș \n")
file.write("Generated on: " + datetime.date.today().strftime("%Y-%m-%d") + "\n")
file.write("Application: " + name.split('-')[0] + "\n")
file.write("Target solver: Z3\n")
file.write("|)\n")
file.write("(set-info :license \"https://creativecommons.org/licenses/by/4.0/\")\n")
file.write("(set-info :category \"industrial\")\n")
file.write("(set-info :minimum")
aux_path = os.path.join(root, name).split('/SMT2')[0] + "/csv/" + name.split('.smt2')[0] + ".csv"
print(aux_path)
if os.path.isfile(aux_path) == False or os.stat(aux_path).st_size == 0:
file.write(" unknown)\n")
file.write("\n")
else:
with open(aux_path) as fin:
next(fin)
for line in fin:
min_price = line.split(None, 1)[0]
file.write(" " + min_price.split(',')[0] + ")\n")
file.write("\n")
with open(os.path.join(root, name)) as f:
lines = f.readlines()
file.writelines(lines)
file.close()
if __name__ == "__main__":
benchmarks_Z3("/Users/razvanmetes/Optimization-Modulo-Theory/experimentalResults", "/Users/razvanmetes/Optimization-Modulo-Theory/benchmarks/output_Z3", "linear") |
the-stack_0_5099 | #!/usr/bin/env python3
# Build and install fmt
import sys
import logging
from pathlib import Path
from subprocess import run, CalledProcessError
import multiprocessing
# Version check
if sys.version_info.minor < 6:
print("Python version is %s, 3.6+ is required." % sys.version)
sys.exit(1)
def build_fmt(fmt_path: Path, fmt_build_path: Path, fmt_install_path: Path):
"""Build fmt from source path into build path"""
# create directory with any intermediate parents, if needed
# similar to Unix: mkdir -p
Path(fmt_build_path).mkdir(parents=True, exist_ok=True)
# We want:
# - build for: Release (default)
# - install (default)
# - no doc with FMT_DOC:BOOL=OFF
# - no test with FMT_TEST=OFF
options = [
"-G", "Unix Makefiles",
f"-DCMAKE_INSTALL_PREFIX={fmt_install_path}",
f"-DFMT_DOC:BOOL=OFF",
f"-DFMT_TEST=OFF",
]
try:
run(["cmake", *options, fmt_path], cwd=fmt_build_path, check=True)
except CalledProcessError as e:
logging.error(f"cmake command failed")
sys.exit(e.returncode)
cpu_count = 1
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError as e:
logging.error(f"multiprocessing.cpu_count() not implemented, defaulting to -j1")
try:
run(["make", f"-j{cpu_count}"], cwd=fmt_build_path, check=True)
except CalledProcessError as e:
logging.error(f"make command failed")
sys.exit(e.returncode)
try:
run(["make", "install"], cwd=fmt_build_path, check=True)
except CalledProcessError as e:
logging.error(f"make install command failed")
sys.exit(e.returncode)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# get third-party dir from this file's path
third_party_path = Path(__file__).parent.absolute()
fmt_path = third_party_path.joinpath("fmt")
fmt_build_path = third_party_path.joinpath("build/fmt")
fmt_install_path = third_party_path.joinpath("install/fmt")
# build fmt in target path
logging.info(f"Building fmt from {fmt_path} in {fmt_build_path} and installing to {fmt_install_path}...")
build_fmt(fmt_path, fmt_build_path, fmt_install_path)
logging.info("Done.")
|
the-stack_0_5101 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Eugenio Parodi <ceccopierangiolieugenio AT googlemail DOT com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from TermTk.TTkCore.log import TTkLog
from TermTk.TTkCore.cfg import TTkCfg
from TermTk.TTkCore.constant import TTkK
from TermTk.TTkCore.helper import TTkHelper
# Ansi Escape Codes:
# https://conemu.github.io/en/AnsiEscapeCodes.html
# From http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html
# Code: Client: Meaning:
# [0m -- reset; clears all colors and styles (to white on black)
# [1m -- bold on (see below)
# [3m -- italics on
# [4m -- underline on
# [7m 2.50 inverse on; reverses foreground & background colors
# [9m 2.50 strikethrough on
# [22m 2.50 bold off (see below)
# [23m 2.50 italics off
# [24m 2.50 underline off
# [27m 2.50 inverse off
# [29m 2.50 strikethrough off
# [30m -- set foreground color to black
# [31m -- set foreground color to red
# [32m -- set foreground color to green
# [33m -- set foreground color to yellow
# [34m -- set foreground color to blue
# [35m -- set foreground color to magenta (purple)
# [36m -- set foreground color to cyan
# [37m -- set foreground color to white
# [39m 2.53 set foreground color to default (white)
# [40m -- set background color to black
# [41m -- set background color to red
# [42m -- set background color to green
# [43m -- set background color to yellow
# [44m -- set background color to blue
# [45m -- set background color to magenta (purple)
# [46m -- set background color to cyan
# [47m -- set background color to white
# [49m 2.53 set background color to default (black)
class _TTkColor:
__slots__ = ("_fg", "_bg", "_mod", "_colorMod")
_fg: str
_bg: str
_mod: str
def __init__(self, fg: str = "", bg: str = "", mod: str = "", colorMod=None):
self._fg = fg
self._bg = bg
self._mod = mod
self._colorMod = colorMod
def colorType(self):
return (
(TTkK.Foreground if self._fg != "" else TTkK.NONE)
| (TTkK.Background if self._bg != "" else TTkK.NONE)
| (TTkK.Modifier if self._mod != "" else TTkK.NONE)
)
def getHex(self, ctype):
if ctype == TTkK.Foreground:
r, g, b = self.fgToRGB()
else:
r, g, b = self.bgToRGB()
return "#{:06x}".format(r << 16 | g << 8 | b)
def fgToRGB(self):
if self._fg == "":
return 0xFF, 0xFF, 0xFF
cc = self._fg.split(";")
r = int(cc[2])
g = int(cc[3])
b = int(cc[4][:-1])
return r, g, b
def bgToRGB(self):
if self._bg == "":
return 0, 0, 0
cc = self._bg.split(";")
r = int(cc[2])
g = int(cc[3])
b = int(cc[4][:-1])
return r, g, b
def __str__(self):
return self._fg + self._bg + self._mod
def __eq__(self, other):
if other is None:
return False
return (
self._fg == other._fg and self._bg == other._bg and self._mod == other._mod
)
def __add__(self, other):
# TTkLog.debug("__add__")
if isinstance(other, str):
return str(self) + other
else:
fg: str = other._fg or self._fg
bg: str = other._bg or self._bg
mod: str = self._mod + other._mod
colorMod = other._colorMod or self._colorMod
return TTkColor(fg, bg, mod, colorMod)
def __radd__(self, other):
# TTkLog.debug("__radd__")
if isinstance(other, str):
return other + str(self)
else:
fg: str = other._fg or self._fg
bg: str = other._bg or self._bg
mod: self._mod + other._mod
colorMod = other._colorMod or self._colorMod
return TTkColor(fg, bg, mod, colorMod)
def __sub__(self, other):
# TTkLog.debug("__sub__")
# if other is None: return str(self)
if (
"" == self._bg != other._bg
or "" == self._fg != other._fg
or "" == self._mod != other._mod
):
return "\033[0m" + self
return str(self)
def modParam(self, *args, **kwargs):
if self._colorMod is None:
return self
ret = self.copy()
ret._colorMod.setParam(*args, **kwargs)
return ret
def mod(self, x, y):
if self._colorMod is None:
return self
return self._colorMod.exec(x, y, self)
def copy(self, modifier=True):
ret = _TTkColor()
ret._fg = self._fg
ret._bg = self._bg
ret._mod = self._mod
if modifier:
ret._colorMod = self._colorMod.copy()
return ret
class _TTkColorModifier:
def __init__(self, *args, **kwargs):
pass
def setParam(self, *args, **kwargs):
pass
def copy(self):
return self
class TTkColorGradient(_TTkColorModifier):
__slots__ = ("_increment", "_val", "_buffer")
_increment: int
_val: int
def __init__(self, *args, **kwargs):
_TTkColorModifier.__init__(self, *args, **kwargs)
self._increment = kwargs.get("increment", 0)
self._val = 0
self._buffer = {}
def setParam(self, *args, **kwargs):
self._val = kwargs.get("val", 0)
def exec(self, x, y, color):
def _applyGradient(c):
if c == "":
return c
multiplier = abs(self._val + y)
cc = c.split(";")
# TTkLog.debug("Eugenio "+c.replace('\033','<ESC>'))
r = int(cc[2]) + self._increment * multiplier
g = int(cc[3]) + self._increment * multiplier
b = int(cc[4][:-1]) + self._increment * multiplier
r = max(min(255, r), 0)
g = max(min(255, g), 0)
b = max(min(255, b), 0)
return f"{cc[0]};{cc[1]};{r};{g};{b}m"
bname = str(color)
# I made a buffer to keep all the gradient values to speed up the paint process
if bname not in self._buffer:
self._buffer[bname] = [None] * (256 * 2)
id = self._val + y - 256
if self._buffer[bname][id] is not None:
return self._buffer[bname][id]
copy = color.copy(modifier=False)
copy._fg = _applyGradient(color._fg)
copy._bg = _applyGradient(color._bg)
self._buffer[bname][id] = copy
return self._buffer[bname][id]
def copy(self):
return self
# ret = TTkColorGradient()
# ret._increment = self._increment
# ret._val = self._val
# return ret
class TTkColor(_TTkColor):
"""TermTk Color helper
.. role:: strike
:class: strike
.. role:: underline
:class: underline
The TTkColor constructor creates the color based on HEX values.
Example:
.. code:: python
# Foreground only colors:
color_fg_red = TTkColor.fg('#FF0000')
color_fg_green = TTkColor.fg('#00FF00')
color_fg_blue = TTkColor.fg('#0000FF')
# Background only colors:
color_bg_red = TTkColor.bg('#FF0000')
color_bg_green = TTkColor.bg('#00FF00')
color_bg_blue = TTkColor.bg('#0000FF')
# Combine
color_1 = color_fg_red + color_bg_blue
color_2 = color_fg_red + TTkColor.bg('#FFFF00')
color_3 = color_2 + TTkColor.UNDERLINE + TTkColor.BOLD
"""
RST = _TTkColor(fg="\033[0m")
"""Reset to the default terminal color and modifiers"""
# Modifiers:
BOLD = _TTkColor(mod="\033[1m")
"""**Bold** modifier"""
ITALIC = _TTkColor(mod="\033[3m")
"""*Italic* modifier"""
UNDERLINE = _TTkColor(mod="\033[4m")
""":underline:`Underline` modifier"""
STRIKETROUGH = _TTkColor(mod="\033[9m")
""":strike:`Striketrough` modifier"""
@staticmethod
def fg(*args, **kwargs):
"""Helper to generate a Foreground color
Example:
.. code:: python
color_1 = TTkColor.fg('#FF0000')
color_2 = TTkColor.fg(color='#00FF00')
color_3 = TTkColor.fg('#0000FF', modifier=TTkColorGradient(increment=6))
:param str color: the color representation in (str)HEX
:type color: str
:param str modifier: (experimental) the color modifier to be used to improve the **kinkiness**
:type modifier: TTkColorModifier, optional
"""
mod = kwargs.get("modifier", None)
if len(args) > 0:
color = args[0]
else:
color = kwargs.get("color", "")
return TTkColor(fg=TTkHelper.Color.fg(color), colorMod=mod)
@staticmethod
def bg(*args, **kwargs):
"""Helper to generate a Background color
Example:
.. code:: python
color_1 = TTkColor.bg('#FF0000')
color_2 = TTkColor.bg(color='#00FF00')
color_3 = TTkColor.bg('#0000FF', modifier=TTkColorGradient(increment=6))
:param str color: the color representation in (str)HEX
:type color: str
:param str modifier: (experimental) the color modifier to be used to improve the **kinkiness**
:type modifier: TTkColorModifier, optional
"""
mod = kwargs.get("modifier", None)
if len(args) > 0:
color = args[0]
else:
color = kwargs.get("color", "")
return TTkColor(bg=TTkHelper.Color.bg(color), colorMod=mod)
|
the-stack_0_5102 | import cv2 as cv
import argparse
import numpy as np
import sys
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
parser = argparse.ArgumentParser(description='Use this script to run semantic segmentation deep learning networks using OpenCV.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', required=True,
help='Path to a binary file of model contains trained weights. '
'It could be a file with extensions .caffemodel (Caffe), '
'.pb (TensorFlow), .t7 or .net (Torch), .weights (Darknet)')
parser.add_argument('--config',
help='Path to a text file of model contains network configuration. '
'It could be a file with extensions .prototxt (Caffe), .pbtxt (TensorFlow), .cfg (Darknet)')
parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet'],
help='Optional name of an origin framework of the model. '
'Detect it automatically if it does not set.')
parser.add_argument('--classes', help='Optional path to a text file with names of classes.')
parser.add_argument('--colors', help='Optional path to a text file with colors for an every class. '
'An every color is represented with three values from 0 to 255 in BGR channels order.')
parser.add_argument('--mean', nargs='+', type=float, default=[0, 0, 0],
help='Preprocess input image by subtracting mean values. '
'Mean values should be in BGR order.')
parser.add_argument('--scale', type=float, default=1.0,
help='Preprocess input image by multiplying on a scale factor.')
parser.add_argument('--width', type=int, required=True,
help='Preprocess input image by resizing to a specific width.')
parser.add_argument('--height', type=int, required=True,
help='Preprocess input image by resizing to a specific height.')
parser.add_argument('--rgb', action='store_true',
help='Indicate that model works with RGB input images instead BGR ones.')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
"%d: default C++ backend, "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.seek.intel.com/deep-learning-deployment)" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL' % targets)
args = parser.parse_args()
np.random.seed(324)
# Load names of classes
classes = None
if args.classes:
with open(args.classes, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Load colors
colors = None
if args.colors:
with open(args.colors, 'rt') as f:
colors = [np.array(color.split(' '), np.uint8) for color in f.read().rstrip('\n').split('\n')]
legend = None
def showLegend(classes):
global legend
if not classes is None and legend is None:
blockHeight = 30
assert(len(classes) == len(colors))
legend = np.zeros((blockHeight * len(colors), 200, 3), np.uint8)
for i in range(len(classes)):
block = legend[i * blockHeight:(i + 1) * blockHeight]
block[:,:] = colors[i]
cv.putText(block, classes[i], (0, blockHeight/2), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))
cv.namedWindow('Legend', cv.WINDOW_NORMAL)
cv.imshow('Legend', legend)
classes = None
# Load a network
net = cv.dnn.readNet(args.model, args.config, args.framework)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)
winName = 'Deep learning image classification in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
cap = cv.VideoCapture(args.input if args.input else 0)
legend = None
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, args.scale, (args.width, args.height), args.mean, args.rgb, crop=False)
# Run a model
net.setInput(blob)
score = net.forward()
numClasses = score.shape[1]
height = score.shape[2]
width = score.shape[3]
# Draw segmentation
if not colors:
# Generate colors
colors = [np.array([0, 0, 0], np.uint8)]
for i in range(1, numClasses):
colors.append((colors[i - 1] + np.random.randint(0, 256, [3], np.uint8)) / 2)
classIds = np.argmax(score[0], axis=0)
segm = np.stack([colors[idx] for idx in classIds.flatten()])
segm = segm.reshape(height, width, 3)
segm = cv.resize(segm, (frame.shape[1], frame.shape[0]), interpolation=cv.INTER_NEAREST)
frame = (0.1 * frame + 0.9 * segm).astype(np.uint8)
# Put efficiency information.
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
showLegend(classes)
cv.imshow(winName, frame)
|
the-stack_0_5103 | import attr
import json
from ._core import Enum
class GuestStatus(Enum):
INVITED = 1
GOING = 2
DECLINED = 3
@attr.s(cmp=False)
class Plan:
"""Represents a plan."""
#: ID of the plan
uid = attr.ib(None, init=False)
#: Plan time (timestamp), only precise down to the minute
time = attr.ib(converter=int)
#: Plan title
title = attr.ib()
#: Plan location name
location = attr.ib(None, converter=lambda x: x or "")
#: Plan location ID
location_id = attr.ib(None, converter=lambda x: x or "")
#: ID of the plan creator
author_id = attr.ib(None, init=False)
#: Dictionary of `User` IDs mapped to their `GuestStatus`
guests = attr.ib(None, init=False)
@property
def going(self):
"""List of the `User` IDs who will take part in the plan."""
return [
id_
for id_, status in (self.guests or {}).items()
if status is GuestStatus.GOING
]
@property
def declined(self):
"""List of the `User` IDs who won't take part in the plan."""
return [
id_
for id_, status in (self.guests or {}).items()
if status is GuestStatus.DECLINED
]
@property
def invited(self):
"""List of the `User` IDs who are invited to the plan."""
return [
id_
for id_, status in (self.guests or {}).items()
if status is GuestStatus.INVITED
]
@classmethod
def _from_pull(cls, data):
rtn = cls(
time=data.get("event_time"),
title=data.get("event_title"),
location=data.get("event_location_name"),
location_id=data.get("event_location_id"),
)
rtn.uid = data.get("event_id")
rtn.author_id = data.get("event_creator_id")
rtn.guests = {
x["node"]["id"]: GuestStatus[x["guest_list_state"]]
for x in json.loads(data["guest_state_list"])
}
return rtn
@classmethod
def _from_fetch(cls, data):
rtn = cls(
time=data.get("event_time"),
title=data.get("title"),
location=data.get("location_name"),
location_id=str(data["location_id"]) if data.get("location_id") else None,
)
rtn.uid = data.get("oid")
rtn.author_id = data.get("creator_id")
rtn.guests = {id_: GuestStatus[s] for id_, s in data["event_members"].items()}
return rtn
@classmethod
def _from_graphql(cls, data):
rtn = cls(
time=data.get("time"),
title=data.get("event_title"),
location=data.get("location_name"),
)
rtn.uid = data.get("id")
rtn.author_id = data["lightweight_event_creator"].get("id")
rtn.guests = {
x["node"]["id"]: GuestStatus[x["guest_list_state"]]
for x in data["event_reminder_members"]["edges"]
}
return rtn
|
the-stack_0_5104 | from sklearn.cluster import DBSCAN
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df_energy = pd.read_csv('results/SklearnScaledEnergy.csv')
df_energy = df_energy.drop(['id', '1', '2', '3', '4', '5','6','7','8','9'], axis=1)
df_energy.columns = ['energy']
df_perplex = pd.read_csv('results/SklearnScaledPPL.csv')
df_perplex = df_perplex.drop(['id','1','2','3','4','5','6','7','8','9'], axis=1)
df_perplex.columns = ['perplexity']
#df = pd.read_csv('data/actualbase.csv')
#df = df.drop(['id', 'vocab_size', 'hidden_size','num_hidden_layers', 'num_attention_heads', 'intermediate_size', 'actual_hidden_size', 'hidden_act', 'hidden_dropout_prob',
#'attention_probs_dropout_prog', 'max_position_embeddings', 'type_vocab_size', 'initializer_range', 'layer_norm_eps', 'gradient_checkpointing', 'position_embedding_type',
#'use_cache', 'energy_loss'], axis=1)
df = pd.concat([df_perplex, df_energy], axis=1)
#print(df)
#df = df[['perplexity', 'energy_consumption']]
#df['energy_consumption'] = df['energy_consumption'] * 142.3439911
#df['perplexity'] = df.apply(lambda x: np.log2(x))
X = df.to_numpy()
clustering = DBSCAN(eps=0.4, min_samples=5).fit(X)
labels = clustering.labels_
print(labels)
core_samples_mask = np.zeros_like(labels, dtype=bool)
core_samples_mask[clustering.core_sample_indices_] = True
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=10)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=10)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.xlabel('Perplexity, tranlated and scaled')
plt.ylabel('Energy Consumption (kWh), tranlated and scaled')
df['clusters'] = labels
#print(df)
df.to_csv('out.csv')
plt.show() |
the-stack_0_5105 | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_hosts, h2o_import as h2i
DO_COMPOUND = False
phrasesCompound = [
# use a dialetc with restricted grammar
# 1. all functions are on their own line
# 2. all functions only use data thru their params, or created in the function
# "a=1; a=2; function(x){x=a;a=3}",
# "a=r.hex; function(x){x=a;a=3;nrow(x)*a}(a)",
# "function(x){y=x*2; y+1}(2)",
# "mean=function(x){apply(x,1,sum)/nrow(x)};mean(r.hex)",
]
badPhrases = [
"&&",
"||",
"%*%",
"ifelse",
"cbind",
"print",
"apply",
"sapply",
"ddply",
"var",
"Reduce",
"cut",
"findInterval",
"runif",
"scale",
"t",
"seq_len",
"seq",
"rep_len",
"c",
"table",
"unique",
"factor",
]
phrases = [
"func1",
"func2",
"func3",
"func4",
"func5",
# "func6",
"nrow",
"ncol",
"length",
"is.factor",
"any.factor",
"any.na",
"isTRUE",
"min.na.rm",
"max.na.rm",
"min",
"max",
"xorsum",
]
if DO_COMPOUND:
phrases += phrasesCompound
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1, java_heap_GB=12)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_apply_phrases(self):
h2o.beta_features = True
bucket = 'home-0xdiag-datasets'
# csvPathname = 'standard/covtype.data'
csvPathname = "standard/covtype.shuffled.10pct.data"
hexKey = 'i.hex'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local', hex_key=hexKey)
for col in [1]:
initList = [
('r.hex', 'r.hex=i.hex'),
(None, "func1=function(x){max(x[,%s])}" % col),
(None, "func2=function(x){a=3;nrow(x[,%s])*a}" % col),
(None, "func3=function(x){apply(x[,%s],2,sum)/nrow(x[,%s])}" % (col, col) ),
# (None, "function(x) { cbind( mean(x[,1]), mean(x[,%s]) ) }" % col),
(None, "func4=function(x) { mean( x[,%s]) }" % col),
(None, "func5=function(x) { sd( x[,%s]) }" % col),
(None, "func6=function(x) { quantile(x[,%s] , c(0.9) ) }" % col),
]
for resultKey, execExpr in initList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=resultKey, timeoutSecs=60)
for p in phrases:
# execExpr = "apply(r.hex, c(2), " + p + ")"
execExpr = "apply(r.hex, 2, " + p + ")"
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=60)
if __name__ == '__main__':
h2o.unit_main()
|
the-stack_0_5109 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2017-2020 Airinnova AB and the PyTornado authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
# Authors:
# * Alessandro Gastaldi
# * Aaron Dettmann
"""
Visualisation of the VLM downwash matrix
Developed at Airinnova AB, Stockholm, Sweden.
"""
import logging
import numpy as np
import matplotlib.pyplot as plt
import pytornado.plot.plottools as pt
logger = logging.getLogger(__name__)
def view_downwash(vlmdata, plt_settings):
"""
Visualise matrix of downwash factors
Args:
:vlmdata: (object) data structure for VLM analysis data
:plt_settings: general plot settings
"""
logger.info("Generating downwash plot...")
if not isinstance(vlmdata.matrix_downwash, np.ndarray):
err_msg = "Downwash factor matrix is not a numpy array"
logger.error(err_msg)
raise TypeError(err_msg)
figure = plt.figure(figsize=(9, 9))
axes = figure.add_subplot(111)
axes.set_aspect('equal')
axes.matshow(vlmdata.matrix_downwash, cmap=pt.C.COLORMAP)
axes.set_xlabel('i')
axes.set_ylabel('j')
axes.set_title("Downwash factor matrix")
pt.show_and_save(plt_settings, (figure, 'downwash'))
plt.close('all')
|
the-stack_0_5110 | # Originally contributed by Stefan Schukat as part of this arbitrary-sized
# arrays patch.
from win32com.client import gencache
from win32com.test import util
import unittest
ZeroD = 0
OneDEmpty = []
OneD = [1, 2, 3]
TwoD = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
TwoD1 = [[[1, 2, 3, 5], [1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3], [1, 2, 3]]]
OneD1 = [[[1, 2, 3], [1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]]
OneD2 = [
[1, 2, 3],
[1, 2, 3, 4, 5],
[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]],
]
ThreeD = [[[1, 2, 3], [1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3], [1, 2, 3]]]
FourD = [
[
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
],
[
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
],
]
LargeD = [
[[list(range(10))] * 10],
] * 512
def _normalize_array(a):
if type(a) != type(()):
return a
ret = []
for i in a:
ret.append(_normalize_array(i))
return ret
class ArrayTest(util.TestCase):
def setUp(self):
self.arr = gencache.EnsureDispatch("PyCOMTest.ArrayTest")
def tearDown(self):
self.arr = None
def _doTest(self, array):
self.arr.Array = array
self.assertEqual(_normalize_array(self.arr.Array), array)
def testZeroD(self):
self._doTest(ZeroD)
def testOneDEmpty(self):
self._doTest(OneDEmpty)
def testOneD(self):
self._doTest(OneD)
def testTwoD(self):
self._doTest(TwoD)
def testThreeD(self):
self._doTest(ThreeD)
def testFourD(self):
self._doTest(FourD)
def testTwoD1(self):
self._doTest(TwoD1)
def testOneD1(self):
self._doTest(OneD1)
def testOneD2(self):
self._doTest(OneD2)
def testLargeD(self):
self._doTest(LargeD)
if __name__ == "__main__":
try:
util.testmain()
except SystemExit as rc:
if not rc:
raise
|
the-stack_0_5112 | """Package variables module.
Package-scoped configuration variable definitions.
"""
PKG_DEBUG_OPT = select({":enable_debug": ["-g"], "//conditions:default": []})
PKG_VERBOSE_OPT = select({":enable_verbose": ["-verbose"], "//conditions:default": []})
PKG_OPTS = PKG_DEBUG_OPT + PKG_VERBOSE_OPT
PKG_PPX_EXECUTABLE_OPTS = PKG_OPTS
PKG_PPX_MODULE_OPTS = PKG_OPTS
PKG_PPX_ARCHIVE_OPTS = PKG_OPTS
PKG_NS_MODULE_OPTS = PKG_OPTS
|
the-stack_0_5113 | # -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""sktime window forecaster base class."""
__author__ = ["@mloning", "@big-o"]
__all__ = ["_BaseWindowForecaster"]
import numpy as np
import pandas as pd
from sktime.forecasting.base._base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.model_selection import CutoffSplitter
from sktime.forecasting.model_selection import SlidingWindowSplitter
from sktime.utils.datetime import _shift
from sktime.utils.validation.forecasting import check_cv
class _BaseWindowForecaster(BaseForecaster):
"""Base class for forecasters that use sliding windows."""
def __init__(self, window_length=None):
super(_BaseWindowForecaster, self).__init__()
self.window_length = window_length
self.window_length_ = None
def update_predict(
self,
y,
cv=None,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Make and update predictions iteratively over the test set.
Parameters
----------
y : pd.Series
cv : temporal cross-validation generator, optional (default=None)
X : pd.DataFrame, optional (default=None)
update_params : bool, optional (default=True)
return_pred_int : bool, optional (default=False)
alpha : int or list of ints, optional (default=None)
Returns
-------
y_pred : pd.Series or pd.DataFrame
"""
if cv is not None:
cv = check_cv(cv)
else:
cv = SlidingWindowSplitter(
self.fh.to_relative(self.cutoff),
window_length=self.window_length_,
start_with_window=False,
)
return self._predict_moving_cutoff(
y,
cv,
X,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Predict core logic."""
if return_pred_int:
raise NotImplementedError()
kwargs = {"X": X, "return_pred_int": return_pred_int, "alpha": alpha}
# all values are out-of-sample
if fh.is_all_out_of_sample(self.cutoff):
return self._predict_fixed_cutoff(
fh.to_out_of_sample(self.cutoff), **kwargs
)
# all values are in-sample
elif fh.is_all_in_sample(self.cutoff):
return self._predict_in_sample(fh.to_in_sample(self.cutoff), **kwargs)
# both in-sample and out-of-sample values
else:
y_ins = self._predict_in_sample(fh.to_in_sample(self.cutoff), **kwargs)
y_oos = self._predict_fixed_cutoff(
fh.to_out_of_sample(self.cutoff), **kwargs
)
return y_ins.append(y_oos)
def _predict_fixed_cutoff(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Make single-step or multi-step fixed cutoff predictions.
Parameters
----------
fh : np.array
all positive (> 0)
X : pd.DataFrame
return_pred_int : bool
alpha : float or array-like
Returns
-------
y_pred = pd.Series
"""
# assert all(fh > 0)
y_pred = self._predict_last_window(
fh, X, return_pred_int=return_pred_int, alpha=alpha
)
index = fh.to_absolute(self.cutoff)
return pd.Series(y_pred, index=index)
def _predict_in_sample(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Make in-sample prediction using single-step moving-cutoff predictions.
Parameters
----------
fh : np.array
all non-positive (<= 0)
X : pd.DataFrame
return_pred_int : bool
alpha : float or array-like
Returns
-------
y_pred : pd.DataFrame or pd.Series
"""
y_train = self._y
# generate cutoffs from forecasting horizon, note that cutoffs are
# still based on integer indexes, so that they can be used with .iloc
cutoffs = fh.to_relative(self.cutoff) + len(y_train) - 2
cv = CutoffSplitter(cutoffs, fh=1, window_length=self.window_length_)
return self._predict_moving_cutoff(
y_train,
cv,
X,
update_params=False,
return_pred_int=return_pred_int,
alpha=alpha,
)
def _predict_last_window(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Predict core logic.
Parameters
----------
fh : np.array
X : pd.DataFrame
return_pred_int : bool
alpha : float or list of floats
Returns
-------
y_pred : np.array
"""
raise NotImplementedError("abstract method")
def _get_last_window(self):
"""Select last window."""
# Get the start and end points of the last window.
cutoff = self.cutoff
start = _shift(cutoff, by=-self.window_length_ + 1)
# Get the last window of the endogenous variable.
y = self._y.loc[start:cutoff].to_numpy()
# If X is given, also get the last window of the exogenous variables.
X = self._X.loc[start:cutoff].to_numpy() if self._X is not None else None
return y, X
@staticmethod
def _predict_nan(fh):
"""Predict nan if predictions are not possible."""
return np.full(len(fh), np.nan)
def _update_predict_single(
self,
y,
fh,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Update and make forecasts, core logic..
Implements default behaviour of calling update and predict
sequentially, but can be overwritten by subclasses
to implement more efficient updating algorithms when available.
Parameters
----------
y
fh
X
update_params
return_pred_int
alpha
Returns
-------
predictions
"""
if X is not None:
raise NotImplementedError()
self.update(y, X, update_params=update_params)
return self._predict(fh, X, return_pred_int=return_pred_int, alpha=alpha)
def _format_moving_cutoff_predictions(y_preds, cutoffs):
"""Format moving-cutoff predictions."""
if not isinstance(y_preds, list):
raise ValueError(f"`y_preds` must be a list, but found: {type(y_preds)}")
if len(y_preds[0]) == 1:
# return series for single step ahead predictions
return pd.concat(y_preds)
else:
# return data frame when we predict multiple steps ahead
y_pred = pd.DataFrame(y_preds).T
y_pred.columns = cutoffs
if y_pred.shape[1] == 1:
return y_pred.iloc[:, 0]
return y_pred
|
the-stack_0_5116 | import logging
import yaml
from .dict_util import deep_dict_merge
from .loader import IncludeLoader
logger = logging.getLogger(__name__)
def load_global_config(global_cfg_paths):
"""Given a list of file paths to global config files, load each of them and
return the joined dictionary.
This does a deep dict merge.
Args:
global_cfg_paths (list(str)): List of filenames to load from
Returns:
dict: joined global configs
"""
global_cfg = {}
if global_cfg_paths:
logger.debug("Loading global config from %s", global_cfg_paths)
for filename in global_cfg_paths:
with open(filename, "r") as gfileobj:
contents = yaml.load(gfileobj, Loader=IncludeLoader)
global_cfg = deep_dict_merge(global_cfg, contents)
return global_cfg
|
the-stack_0_5117 | """Media Player component to integrate TVs exposing the Joint Space API."""
from __future__ import annotations
from haphilipsjs import ConnectionFailure
from homeassistant.components.media_player import (
BrowseMedia,
MediaPlayerDeviceClass,
MediaPlayerEntity,
)
from homeassistant.components.media_player.const import (
MEDIA_CLASS_APP,
MEDIA_CLASS_CHANNEL,
MEDIA_CLASS_DIRECTORY,
MEDIA_TYPE_APP,
MEDIA_TYPE_APPS,
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_CHANNELS,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import LOGGER as _LOGGER, PhilipsTVDataUpdateCoordinator
from .const import DOMAIN
SUPPORT_PHILIPS_JS = (
SUPPORT_TURN_OFF
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_SELECT_SOURCE
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_BROWSE_MEDIA
| SUPPORT_PLAY
| SUPPORT_PAUSE
| SUPPORT_STOP
)
CONF_ON_ACTION = "turn_on_action"
def _inverted(data):
return {v: k for k, v in data.items()}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the configuration entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
PhilipsTVMediaPlayer(
coordinator,
)
]
)
class PhilipsTVMediaPlayer(CoordinatorEntity, MediaPlayerEntity):
"""Representation of a Philips TV exposing the JointSpace API."""
_coordinator: PhilipsTVDataUpdateCoordinator
_attr_device_class = MediaPlayerDeviceClass.TV
def __init__(
self,
coordinator: PhilipsTVDataUpdateCoordinator,
) -> None:
"""Initialize the Philips TV."""
self._tv = coordinator.api
self._coordinator = coordinator
self._sources = {}
self._channels = {}
self._supports = SUPPORT_PHILIPS_JS
self._system = coordinator.system
self._attr_name = coordinator.system["name"]
self._attr_unique_id = coordinator.unique_id
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, coordinator.unique_id),
},
manufacturer="Philips",
model=coordinator.system.get("model"),
sw_version=coordinator.system.get("softwareversion"),
name=coordinator.system["name"],
)
self._state = STATE_OFF
self._media_content_type: str | None = None
self._media_content_id: str | None = None
self._media_title: str | None = None
self._media_channel: str | None = None
super().__init__(coordinator)
self._update_from_coordinator()
async def _async_update_soon(self):
"""Reschedule update task."""
self.async_write_ha_state()
await self.coordinator.async_request_refresh()
@property
def supported_features(self):
"""Flag media player features that are supported."""
supports = self._supports
if self._coordinator.turn_on or (
self._tv.on and self._tv.powerstate is not None
):
supports |= SUPPORT_TURN_ON
return supports
@property
def state(self):
"""Get the device state. An exception means OFF state."""
if self._tv.on and (self._tv.powerstate == "On" or self._tv.powerstate is None):
return STATE_ON
return STATE_OFF
@property
def source(self):
"""Return the current input source."""
return self._sources.get(self._tv.source_id)
@property
def source_list(self):
"""List of available input sources."""
return list(self._sources.values())
async def async_select_source(self, source):
"""Set the input source."""
if source_id := _inverted(self._sources).get(source):
await self._tv.setSource(source_id)
await self._async_update_soon()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._tv.volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._tv.muted
async def async_turn_on(self):
"""Turn on the device."""
if self._tv.on and self._tv.powerstate:
await self._tv.setPowerState("On")
self._state = STATE_ON
else:
await self._coordinator.turn_on.async_run(self.hass, self._context)
await self._async_update_soon()
async def async_turn_off(self):
"""Turn off the device."""
if self._state == STATE_ON:
await self._tv.sendKey("Standby")
self._state = STATE_OFF
await self._async_update_soon()
else:
_LOGGER.debug("Ignoring turn off when already in expected state")
async def async_volume_up(self):
"""Send volume up command."""
await self._tv.sendKey("VolumeUp")
await self._async_update_soon()
async def async_volume_down(self):
"""Send volume down command."""
await self._tv.sendKey("VolumeDown")
await self._async_update_soon()
async def async_mute_volume(self, mute):
"""Send mute command."""
if self._tv.muted != mute:
await self._tv.sendKey("Mute")
await self._async_update_soon()
else:
_LOGGER.debug("Ignoring request when already in expected state")
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._tv.setVolume(volume, self._tv.muted)
await self._async_update_soon()
async def async_media_previous_track(self):
"""Send rewind command."""
await self._tv.sendKey("Previous")
await self._async_update_soon()
async def async_media_next_track(self):
"""Send fast forward command."""
await self._tv.sendKey("Next")
await self._async_update_soon()
async def async_media_play_pause(self):
"""Send pause command to media player."""
if self._tv.quirk_playpause_spacebar:
await self._tv.sendUnicode(" ")
else:
await self._tv.sendKey("PlayPause")
await self._async_update_soon()
async def async_media_play(self):
"""Send pause command to media player."""
await self._tv.sendKey("Play")
await self._async_update_soon()
async def async_media_pause(self):
"""Send play command to media player."""
await self._tv.sendKey("Pause")
await self._async_update_soon()
async def async_media_stop(self):
"""Send play command to media player."""
await self._tv.sendKey("Stop")
await self._async_update_soon()
@property
def media_channel(self):
"""Get current channel if it's a channel."""
return self._media_channel
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def media_content_type(self):
"""Return content type of playing media."""
return self._media_content_type
@property
def media_content_id(self):
"""Content type of current playing media."""
return self._media_content_id
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._media_content_id and self._media_content_type in (
MEDIA_TYPE_APP,
MEDIA_TYPE_CHANNEL,
):
return self.get_browse_image_url(
self._media_content_type, self._media_content_id, media_image_id=None
)
return None
@property
def app_id(self):
"""ID of the current running app."""
return self._tv.application_id
@property
def app_name(self):
"""Name of the current running app."""
if app := self._tv.applications.get(self._tv.application_id):
return app.get("label")
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Call play media type <%s>, Id <%s>", media_type, media_id)
if media_type == MEDIA_TYPE_CHANNEL:
list_id, _, channel_id = media_id.partition("/")
if channel_id:
await self._tv.setChannel(channel_id, list_id)
await self._async_update_soon()
else:
_LOGGER.error("Unable to find channel <%s>", media_id)
elif media_type == MEDIA_TYPE_APP:
if app := self._tv.applications.get(media_id):
await self._tv.setApplication(app["intent"])
await self._async_update_soon()
else:
_LOGGER.error("Unable to find application <%s>", media_id)
else:
_LOGGER.error("Unsupported media type <%s>", media_type)
async def async_browse_media_channels(self, expanded):
"""Return channel media objects."""
if expanded:
children = [
BrowseMedia(
title=channel.get("name", f"Channel: {channel_id}"),
media_class=MEDIA_CLASS_CHANNEL,
media_content_id=f"alltv/{channel_id}",
media_content_type=MEDIA_TYPE_CHANNEL,
can_play=True,
can_expand=False,
)
for channel_id, channel in self._tv.channels.items()
]
else:
children = None
return BrowseMedia(
title="Channels",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="channels",
media_content_type=MEDIA_TYPE_CHANNELS,
children_media_class=MEDIA_CLASS_CHANNEL,
can_play=False,
can_expand=True,
children=children,
)
async def async_browse_media_favorites(self, list_id, expanded):
"""Return channel media objects."""
if expanded:
favorites = await self._tv.getFavoriteList(list_id)
if favorites:
def get_name(channel):
channel_data = self._tv.channels.get(str(channel["ccid"]))
if channel_data:
return channel_data["name"]
return f"Channel: {channel['ccid']}"
children = [
BrowseMedia(
title=get_name(channel),
media_class=MEDIA_CLASS_CHANNEL,
media_content_id=f"{list_id}/{channel['ccid']}",
media_content_type=MEDIA_TYPE_CHANNEL,
can_play=True,
can_expand=False,
)
for channel in favorites
]
else:
children = None
else:
children = None
favorite = self._tv.favorite_lists[list_id]
return BrowseMedia(
title=favorite.get("name", f"Favorites {list_id}"),
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id=f"favorites/{list_id}",
media_content_type=MEDIA_TYPE_CHANNELS,
children_media_class=MEDIA_CLASS_CHANNEL,
can_play=False,
can_expand=True,
children=children,
)
async def async_browse_media_applications(self, expanded):
"""Return application media objects."""
if expanded:
children = [
BrowseMedia(
title=application["label"],
media_class=MEDIA_CLASS_APP,
media_content_id=application_id,
media_content_type=MEDIA_TYPE_APP,
can_play=True,
can_expand=False,
thumbnail=self.get_browse_image_url(
MEDIA_TYPE_APP, application_id, media_image_id=None
),
)
for application_id, application in self._tv.applications.items()
]
else:
children = None
return BrowseMedia(
title="Applications",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="applications",
media_content_type=MEDIA_TYPE_APPS,
children_media_class=MEDIA_CLASS_APP,
can_play=False,
can_expand=True,
children=children,
)
async def async_browse_media_favorite_lists(self, expanded):
"""Return favorite media objects."""
if self._tv.favorite_lists and expanded:
children = [
await self.async_browse_media_favorites(list_id, False)
for list_id in self._tv.favorite_lists
]
else:
children = None
return BrowseMedia(
title="Favorites",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="favorite_lists",
media_content_type=MEDIA_TYPE_CHANNELS,
children_media_class=MEDIA_CLASS_CHANNEL,
can_play=False,
can_expand=True,
children=children,
)
async def async_browse_media_root(self):
"""Return root media objects."""
return BrowseMedia(
title="Library",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="",
media_content_type="",
can_play=False,
can_expand=True,
children=[
await self.async_browse_media_channels(False),
await self.async_browse_media_applications(False),
await self.async_browse_media_favorite_lists(False),
],
)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
if not self._tv.on:
raise BrowseError("Can't browse when tv is turned off")
if media_content_id in (None, ""):
return await self.async_browse_media_root()
path = media_content_id.partition("/")
if path[0] == "channels":
return await self.async_browse_media_channels(True)
if path[0] == "applications":
return await self.async_browse_media_applications(True)
if path[0] == "favorite_lists":
return await self.async_browse_media_favorite_lists(True)
if path[0] == "favorites":
return await self.async_browse_media_favorites(path[2], True)
raise BrowseError(f"Media not found: {media_content_type} / {media_content_id}")
async def async_get_browse_image(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> tuple[bytes | None, str | None]:
"""Serve album art. Returns (content, content_type)."""
try:
if media_content_type == MEDIA_TYPE_APP and media_content_id:
return await self._tv.getApplicationIcon(media_content_id)
if media_content_type == MEDIA_TYPE_CHANNEL and media_content_id:
return await self._tv.getChannelLogo(media_content_id)
except ConnectionFailure:
_LOGGER.warning("Failed to fetch image")
return None, None
async def async_get_media_image(self):
"""Serve album art. Returns (content, content_type)."""
return await self.async_get_browse_image(
self.media_content_type, self.media_content_id, None
)
@callback
def _update_from_coordinator(self):
if self._tv.on:
if self._tv.powerstate in ("Standby", "StandbyKeep"):
self._state = STATE_OFF
else:
self._state = STATE_ON
else:
self._state = STATE_OFF
self._sources = {
srcid: source.get("name") or f"Source {srcid}"
for srcid, source in (self._tv.sources or {}).items()
}
if self._tv.channel_active:
self._media_content_type = MEDIA_TYPE_CHANNEL
self._media_content_id = f"all/{self._tv.channel_id}"
self._media_title = self._tv.channels.get(self._tv.channel_id, {}).get(
"name"
)
self._media_channel = self._media_title
elif self._tv.application_id:
self._media_content_type = MEDIA_TYPE_APP
self._media_content_id = self._tv.application_id
self._media_title = self._tv.applications.get(
self._tv.application_id, {}
).get("label")
self._media_channel = None
else:
self._media_content_type = None
self._media_content_id = None
self._media_title = self._sources.get(self._tv.source_id)
self._media_channel = None
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._update_from_coordinator()
super()._handle_coordinator_update()
|
the-stack_0_5118 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""doc
"""
import sys
import time
import datetime
import os
import yaml
import random
import shutil
import six
import warnings
import glob
import numpy as np
def get_last_dir(path):
"""Get the last directory of a path.
"""
if os.path.isfile(path):
# e.g: "../checkpoints/task_name/epoch0_step300/predict.txt"
# return "epoch0_step300"
last_dir = path.split("/")[-2]
elif os.path.isdir(path):
if path[-1] == '/':
# e.g: "../checkpoints/task_name/epoch0_step300/"
last_dir = path.split('/')[-2]
else:
# e.g: "../checkpoints/task_name/epoch0_step300"
last_dir = path.split('/')[-1]
else:
# path or file is not existed
warnings.warn('%s is not a existed file or path' % path)
last_dir = ""
return last_dir
class AttrDict(dict):
def __init__(self, d={}, **kwargs):
if kwargs:
d.update(**kwargs)
for k, v in d.items():
setattr(self, k, v)
# Class attributes
# for k in self.__class__.__dict__.keys():
# if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
# setattr(self, k, getattr(self, k))
def __setattr__(self, name, value):
if isinstance(value, (list, tuple)):
value = [
self.__class__(x) if isinstance(x, dict) else x for x in value
]
elif isinstance(value, dict) and not isinstance(value, self.__class__):
value = self.__class__(value)
super(AttrDict, self).__setattr__(name, value)
super(AttrDict, self).__setitem__(name, value)
__setitem__ = __setattr__
def __getattr__(self, attr):
try:
value = super(AttrDict, self).__getitem__(attr)
except KeyError:
# log.warn("%s attribute is not existed, return None" % attr)
warnings.warn("%s attribute is not existed, return None" % attr)
value = None
return value
def update(self, e=None, **f):
d = e or dict()
d.update(f)
for k in d:
setattr(self, k, d[k])
def pop(self, k, d=None):
delattr(self, k)
return super(AttrDict, self).pop(k, d)
def make_dir(path):
"""Build directory"""
if not os.path.exists(path):
os.makedirs(path)
def load_config(config_file):
"""Load config file"""
with open(config_file) as f:
if hasattr(yaml, 'FullLoader'):
config = yaml.load(f, Loader=yaml.FullLoader)
else:
config = yaml.load(f)
return config
def create_necessary_dirs(config, worker_index=None):
"""Create some necessary directories to save some important files.
"""
config.log_dir = os.path.join(config.log_dir, config.task_name)
config.save_dir = os.path.join(config.save_dir, config.task_name)
config.output_dir = os.path.join(config.output_dir, config.task_name)
# if worker_index is None or worker_index == 0:
np.random.seed(worker_index)
time.sleep(np.random.uniform() * 2)
make_dir(config.log_dir)
make_dir(config.save_dir)
make_dir(config.output_dir)
def save_files(config):
"""Save config file so that we can know the config when we look back
"""
filelist = config.files2saved
targetpath = config.log_dir
if filelist is not None:
for file_or_dir in filelist:
if os.path.isdir(file_or_dir):
last_name = get_last_dir(file_or_dir)
dst = os.path.join(targetpath, last_name)
try:
copy_and_overwrite(file_or_dir, dst)
except Exception as e:
print(e)
print("backup %s to %s" % (file_or_dir, targetpath))
else:
for filename in files(files=file_or_dir):
if os.path.isfile(filename):
print("backup %s to %s" % (filename, targetpath))
shutil.copy2(filename, targetpath)
else:
print("%s is not existed." % filename)
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
def files(curr_dir='./', files='*.py'):
for i in glob.glob(os.path.join(curr_dir, files)):
yield i
def prepare_config(config_file,
isCreate=False,
isSave=False,
worker_index=None):
if os.path.isfile(config_file):
config = load_config(config_file)
config = AttrDict(config)
else:
raise TypeError("%s is not a yaml file" % config_file)
if isCreate:
create_necessary_dirs(config, worker_index)
if isSave:
if worker_index is None or worker_index == 0:
save_files(config)
return config
|
the-stack_0_5119 | """*********************************************************************
* *
* Description: A simple asynchronous http library *
* Date: 12/02/2021 *
* Author: Marcos Vinicios da Silveira *
* *
* *
************************************************************************
"""
import os
import sys
from codecs import open
from setuptools import setup
BASE = os.path.abspath(os.path.dirname(__file__))
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
packages = ['fasthttp', 'tests']
requires = [
"requests==2.22.0",
"colorama==0.4.3",
"aiohttp==3.7.4.post0",
"urllib3==1.25.11",
"dataclasses==0.8" ,
]
test_requirements = []
about = {}
with open(os.path.join(BASE, 'fasthttp', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
with open('README.md', 'r', 'utf-8') as f:
readme = f.read()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
long_description_content_type='text/markdown',
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
packages=packages,
include_package_data=True,
python_requires=">=3.6",
install_requires=requires,
license=about['__license__'],
tests_require=test_requirements,
)
# end-of-file
|
the-stack_0_5120 | # Usage: python demo_receiver.py [dummy|ss|gbn]
import config
import sys
import time
import util
def msg_handler(msg):
print(repr(msg))
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python demo_receiver.py [dummy|ss|gbn|sr]")
sys.exit(1)
transport_layer = None
name = sys.argv[1]
try:
transport_layer = util.get_transport_layer_by_name(
name, config.RECEIVER_LISTEN_PORT, config.SENDER_LISTEN_PORT, msg_handler
)
while True:
time.sleep(1)
finally:
if transport_layer:
transport_layer.shutdown()
|
the-stack_0_5124 | import json
import os
import requests # Install with easy_install or pip install
def get_release(version_tag):
print('Getting release metadata for {version_tag}...'.format(
version_tag=version_tag))
releases = requests.get(
'https://api.github.com/repos/facebook/buck/releases').json()
for data in releases:
if 'tag_name' in data and data['tag_name'] == version_tag:
return data
raise RuntimeError(
'Unable to find release for version {version_tag}!'.format(
version_tag=version_tag))
def upload_release(bottle_file, upload_url, github_token, content_type):
fname = os.path.basename(bottle_file)
upload_url = upload_url.replace('{?name,label}', '?name=') + fname
print('Uploading release to {url}...'.format(url=upload_url))
with open(bottle_file, 'rb') as bottle_bin:
r = requests.post(
upload_url,
auth=('token', github_token),
headers=content_type,
data=bottle_bin)
print(json.dumps(r.json(), indent=2))
|
the-stack_0_5125 | #!/usr/bin/env -S python3 -B
# Copyright (c) 2022 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import xmlrpc.client
IP = '127.0.0.1'
PORT = 9000
if sys.platform == 'linux':
IP = '10.10.10.5'
# Passing in sys.argv[2:] gets rid of the script name and key to the apps register. The remaining
# values in the list are key-value pairs, e.g. [option1, value1, option2, value2, ...]
with xmlrpc.client.ServerProxy('http://' + IP + ':' + str(PORT) + '/', allow_none=True) as proxy:
proxy.start(sys.argv[1], sys.argv[2:])
|
the-stack_0_5127 | import math
from datetime import datetime, timedelta
from .data_types import (
Header, FileControl, BatchHeader,
BatchControl, EntryDetail, AddendaRecord
)
class AchFile(object):
"""
This class is what stores the ach data. Its main external methods
are `add_batch` and `render_to_string`.
"""
def __init__(self, file_id_mod, settings):
"""
The file_id_mod should be 'A' for the first of the day, 'B'
for the second and so on.
"""
self.settings = settings
try:
self.header = Header(
settings['immediate_dest'],
settings['immediate_org'], file_id_mod,
settings['immediate_dest_name'], settings['immediate_org_name']
)
except KeyError:
raise Exception(
'Settings require: "immediate_dest", "immediate_org", \
immediate_dest_name", and "immediate_org_name"'
)
self.batches = list()
def add_batch(self, std_ent_cls_code, batch_entries=None,
credits=True, debits=False, eff_ent_date=None,
company_id=None):
"""
Use this to add batches to the file. For valid std_ent_cls_codes see:
http://en.wikipedia.org/wiki/Automated_Clearing_House#SEC_codes
"""
if batch_entries is None:
batch_entries = list()
entry_desc = self.get_entry_desc(std_ent_cls_code)
batch_count = len(self.batches) + 1
if not eff_ent_date:
eff_ent_date = datetime.today() + timedelta(days=1)
if credits and debits:
serv_cls_code = '200'
elif credits:
serv_cls_code = '220'
elif debits:
serv_cls_code = '225'
batch_header = BatchHeader(
serv_cls_code=serv_cls_code,
batch_id=batch_count,
company_id=company_id or self.settings['company_id'],
std_ent_cls_code=std_ent_cls_code,
entry_desc=entry_desc,
desc_date='',
eff_ent_date=eff_ent_date.strftime('%y%m%d'), # YYMMDD
orig_stat_code='1',
orig_dfi_id=self.settings['immediate_dest'][:8],
company_name=self.settings['immediate_org_name']
)
entries = list()
entry_counter = 1
for record in batch_entries:
entry = EntryDetail(std_ent_cls_code)
entry.transaction_code = record.get('type')
entry.recv_dfi_id = record.get('routing_number')
if len(record['routing_number']) < 9:
entry.calc_check_digit()
else:
entry.check_digit = record['routing_number'][8]
entry.dfi_acnt_num = record['account_number']
entry.amount = int(round(float(record['amount']) * 100))
entry.ind_name = record['name'].upper()[:22]
entry.trace_num = self.settings['immediate_dest'][:8] \
+ entry.validate_numeric_field(entry_counter, 7)
entries.append((entry, record.get('addenda', [])))
entry_counter += 1
self.batches.append(FileBatch(batch_header, entries))
self.set_control()
def set_control(self):
batch_count = len(self.batches)
block_count = self.get_block_count(self.batches)
entry_hash = self.get_entry_hash(self.batches)
entadd_count = self.get_entadd_count(self.batches)
debit_amount = self.get_debit_amount(self.batches)
credit_amount = self.get_credit_amount(self.batches)
self.control = FileControl(
batch_count, block_count, entadd_count,
entry_hash, debit_amount, credit_amount
)
def get_block_count(self, batches):
return int(math.ceil(self.get_lines(batches) / 10.0))
def get_lines(self, batches):
header_count = 1
control_count = 1
batch_header_count = len(batches)
batch_footer_count = batch_header_count
entadd_count = self.get_entadd_count(batches)
lines = header_count + control_count + batch_header_count \
+ batch_footer_count + entadd_count
return lines
def get_entadd_count(self, batches):
entadd_count = 0
for batch in batches:
entadd_count = entadd_count + int(batch.batch_control.entadd_count)
return entadd_count
def get_entry_hash(self, batches):
entry_hash = 0
for batch in batches:
entry_hash = entry_hash + int(batch.batch_control.entry_hash)
if len(str(entry_hash)) > 10:
pos = len(str(entry_hash)) - 10
entry_hash = str(entry_hash)[pos:]
else:
entry_hash = str(entry_hash)
return entry_hash
def get_debit_amount(self, batches):
debit_amount = 0
for batch in batches:
debit_amount = debit_amount + int(batch.batch_control.debit_amount)
return debit_amount
def get_credit_amount(self, batches):
credit_amount = 0
for batch in batches:
credit_amount = credit_amount + \
int(batch.batch_control.credit_amount)
return credit_amount
def get_nines(self, rows, line_ending):
nines = ''
for i in range(rows):
nines += '9'*94
if i == rows - 1:
continue
nines += line_ending
return nines
def get_entry_desc(self, std_ent_cls_code):
if std_ent_cls_code == 'PPD':
entry_desc = 'PAYROLL'
elif std_ent_cls_code == 'CCD':
entry_desc = 'DUES'
else:
entry_desc = 'OTHER'
return entry_desc
def render_to_string(self, force_crlf=False):
"""
Renders a nacha file as a string
"""
line_ending = "\n"
if force_crlf:
line_ending = "\r\n"
ret_string = self.header.get_row() + line_ending
for batch in self.batches:
ret_string += batch.render_to_string(force_crlf=force_crlf)
ret_string += self.control.get_row() + line_ending
lines = self.get_lines(self.batches)
nine_lines = int(round(10 * (math.ceil(lines / 10.0) - (lines / 10.0))))
ret_string += self.get_nines(nine_lines, line_ending)
return ret_string
class FileBatch(object):
"""
Holds:
BatchHeader (1)
Entry (n) <-- multiple
BatchControl (1)
"""
def __init__(self, batch_header, entries):
"""
args: batch_header (BatchHeader), entries (List[FileEntry])
"""
entadd_count = 0
self.batch_header = batch_header
self.entries = []
for entry, addenda in entries:
entadd_count += 1
entadd_count += len(addenda)
self.entries.append(FileEntry(entry, addenda))
#set up batch_control
batch_control = BatchControl(self.batch_header.serv_cls_code)
batch_control.entadd_count = entadd_count
batch_control.entry_hash = self.get_entry_hash(self.entries)
batch_control.debit_amount = self.get_debit_amount(self.entries)
batch_control.credit_amount = self.get_credit_amount(self.entries)
batch_control.company_id = self.batch_header.company_id
batch_control.orig_dfi_id = self.batch_header.orig_dfi_id
batch_control.batch_id = self.batch_header.batch_id
self.batch_control = batch_control
def get_entry_hash(self, entries):
entry_hash = 0
for entry in entries:
entry_hash += int(entry.entry_detail.recv_dfi_id[:8])
if len(str(entry_hash)) > 10:
pos = len(str(entry_hash)) - 10
entry_hash = str(entry_hash)[pos:]
else:
entry_hash = str(entry_hash)
return entry_hash
def get_debit_amount(self, entries):
debit_amount = 0
for entry in entries:
if str(entry.entry_detail.transaction_code) in \
['27', '37', '28', '38']:
debit_amount = debit_amount + int(entry.entry_detail.amount)
return debit_amount
def get_credit_amount(self, entries):
credit_amount = 0
for entry in entries:
if str(entry.entry_detail.transaction_code) in \
['22', '32', '23', '33']:
credit_amount += int(entry.entry_detail.amount)
return credit_amount
def render_to_string(self, force_crlf=False):
"""
Renders a nacha file batch to string
"""
line_ending = "\n"
if force_crlf:
line_ending = "\r\n"
ret_string = self.batch_header.get_row() + line_ending
for entry in self.entries:
ret_string += entry.render_to_string(force_crlf=force_crlf)
ret_string += self.batch_control.get_row() + line_ending
return ret_string
class FileEntry(object):
"""
Holds:
EntryDetail (1)
AddendaRecord (n) <-- for some types of entries there can be more than one
"""
def __init__(self, entry_detail, addenda_record=[]):
"""
args: entry_detail( EntryDetail), addenda_record (List[AddendaRecord])
"""
self.entry_detail = entry_detail
self.addenda_record = []
for index, addenda in enumerate(addenda_record):
self.addenda_record.append(
AddendaRecord(
self.entry_detail.std_ent_cls_code,
pmt_rel_info=addenda.get('payment_related_info').upper(),
add_seq_num=index + 1,
ent_det_seq_num=entry_detail.trace_num[-7:]
)
)
if self.addenda_record:
self.entry_detail.add_rec_ind = 1
def render_to_string(self, force_crlf=False):
"""
Renders a nacha batch entry and addenda to string
"""
line_ending = "\n"
if force_crlf:
line_ending = "\r\n"
ret_string = self.entry_detail.get_row() + line_ending
for addenda in self.addenda_record:
ret_string += addenda.get_row() + line_ending
return ret_string
|
the-stack_0_5128 | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum, IntEnum
from typing import TYPE_CHECKING, overload
from ...exceptions import EmbedOverflow
from ...utils.api_object import APIObject
from ...utils.conversion import construct_client_dict
from ...utils.types import MISSING
if TYPE_CHECKING:
from typing import List, Optional
from ..message.attachment import Attachment
from ..message.component import MessageComponent
from ..message.embed import Embed
from ..message.user_message import UserMessage
from ..message.user_message import AllowedMentions
from ..user.user import User
from ..guild.guild import Guild
from ..guild.channel import Channel
from ...utils.types import APINullable
from ...utils.snowflake import Snowflake
from ...client import Client
class WebhookCompatibility(Enum):
GitHub = "github"
Slack = "slack"
Default = ""
class WebhookType(IntEnum):
"""Represents the type of webhook.
Attributes
----------
INCOMING:
Incoming Webhooks can post messages to channel with a
generated token.
CHANNEL_FOLLOWER:
Channel Follower Webhooks are internal webhooks used with
Channel Following to post new messages into channels.
APPLICATION:
Application webhooks are webhooks used with Interactions
"""
INCOMING = 1
CHANNEL_FOLLOWER = 2
APPLICATION = 3
@dataclass(repr=False)
class Webhook(APIObject):
"""Represents a Discord channel webhook.
Attributes
----------
id: :class:`~pincer.utils.snowflake.Snowflake`
The id of the webhook
type: :class:`~pincer.objects.guild.webhook.WebhookType`
The type of the webhook
channel_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
The channel id this webhook is for, if any
name: Optional[:class:`str`]
The default name of the webhook
avatar: Optional[:class:`str`]
The default user avatar hash of the webhook
application_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
The bot/OAuth2 application that created this webhook
user: APINullable[:class:`~pincer.objects.user.user.User`]
The user this webhook was created by
(not returned when getting a webhook with its token)
token: APINullable[:class:`str`]
The secure token of the webhook
(returned for Incoming Webhooks)
source_guild: APINullable[:class:`~pincer.objects.guild.guild.Guild`]
The guild of the channel that this webhook is following
(returned for Channel Follower Webhooks)
source_channel: APINullable[:class:`~pincer.objects.guild.channel.Channel`]
The channel that this webhook is following
(returned for Channel Follower Webhooks)
url: APINullable[:class:`str`]
The url used for executing the webhook
(returned by the webhooks OAuth2 flow)
guild_id: APINullable[Optional[:class:`~pincer.objects.guild.guild.Guild`]]
The guild id this webhook is for, if any
"""
id: Snowflake
type: WebhookType
channel_id: Optional[Snowflake] = None
name: Optional[str] = None
avatar: Optional[str] = None
application_id: Optional[Snowflake] = None
user: APINullable[User] = MISSING
token: APINullable[str] = MISSING
source_guild: APINullable[Guild] = MISSING
source_channel: APINullable[Channel] = MISSING
url: APINullable[str] = MISSING
guild_id: APINullable[Optional[Snowflake]] = MISSING
async def edit(
self,
*,
name: Optional[str] = None,
avatar: Optional[str] = None,
channel_id: Optional[Snowflake] = None,
token: Optional[str] = None
) -> Webhook:
"""
Modifies a webhook and returns it.
Requires the ``MANAGE_WEBHOOKS`` permission.
Parameters
----------
name: Optional[:class:`str`]
The new name of the webhook
avatar: Optional[:class:`str`]
The new avatar hash of the webhook
channel_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
The new channel id this webhook is for
token: Optional[:class:`str`]
The new token of the webhook
"""
request_route = (
f"webhooks/{self.id}"
+ (f"/{token}" if token else "")
)
request_data = {
"name": name,
"avatar": avatar,
"channel_id": channel_id
}
if token:
del request_data["channel_id"]
data = await self._http.patch(
request_route,
data=request_data
)
return Webhook.from_dict(
construct_client_dict(self._client, data)
)
async def delete(self, token: Optional[str] = None):
"""
Deletes a webhook.
Requires the ``MANAGE_WEBHOOKS`` permission.
Parameters
----------
token: Optional[:class:`str`]
The token of the webhook
"""
await self._http.delete(
f"webhooks/{self.id}"
+ (f"/{token}" if token else "")
)
@overload
async def execute(
self,
webhook_compatibility: WebhookCompatibility = WebhookCompatibility.Default, # noqa: E501
*,
thread_id: Optional[Snowflake] = None,
wait: Optional[bool] = None,
content: Optional[str] = None,
username: Optional[str] = None,
avatar_url: Optional[str] = None,
tts: Optional[bool] = None,
embeds: Optional[List[Embed]] = None,
allowed_mentions: Optional[AllowedMentions] = None,
components: Optional[List[MessageComponent]] = None,
files: Optional[str] = None, # TODO: Add support for files
payload_json: Optional[str] = None,
attachments: Optional[List[Attachment]] = None
):
"""|coro|
Executes a webhook.
Note that when sending a message, you must provide a value
for at least one of ``content``, ``embeds``, or ``file``.
Parameters
----------
webhook_compatibility: :class:`~pincer.objects.guild.webhook.WebhookCompatibility`
The compatibility of the webhook
thread_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
ID of the thread to send message in
wait: Optional[:class:`bool`]
Waits for server confirmation of message send before
response (defaults to ``true``, when ``false`` a message
that is not saved does not return an error)
content: Optional[:class:`str`]
The message contents (up to 2000 characters)
username: Optional[:class:`str`]
Override the default username of the webhook
avatar_url: Optional[:class:`str`]
Override the default avatar of the webhook
tts: Optional[:class:`bool`]
True if this is a TTS message
embeds: Optional[List[:class:`~pincer.objects.message.embed.Embed`]]
Embedded ``rich`` content, up to 10 embeds
allowed_mentions: Optional[:class:`~pincer.objects.message.user_message.AllowedMentions`]
Allowed mentions for the message
components: Optional[List[:class:`~pincer.objects.message.component.MessageComponent`]]
The components to include in the message
files: Optional[:class:`str`]
The contents of the file being sent
payload_json: Optional[:class:`str`]
JSON encoded body of non-file params
attachments: Optional[List[:class:`~pincer.objects.message.attachment.Attachment`]]
Attachment objects with filename and description
"""
...
async def execute(
self,
webhook_compatibility: WebhookCompatibility = WebhookCompatibility.Default, # noqa: E501
*,
thread_id: Optional[Snowflake] = None,
wait: Optional[bool] = None,
**kwargs
):
if len(kwargs.get("embeds", [])) > 10:
raise EmbedOverflow("You can only include up to 10 embeds")
request_route = f"webhooks/{self.id}/{self.token}"
# Adding the subdirectory
if webhook_compatibility.value:
request_route += f"/{webhook_compatibility.value}"
# Adding query params
if wait is not None:
request_route += f"?{wait=}"
if thread_id is not None:
request_route += "&?"[wait is None] + f"{thread_id=}"
if webhook_compatibility == WebhookCompatibility.Default:
request_data = kwargs
else:
request_data = None
await self._http.post(request_route, data=request_data)
async def execute_github(
self,
*,
thread_id: Optional[Snowflake] = None,
wait: Optional[bool] = None
):
"""|coro|
Executes a GitHub compatible webhook.
Parameters
----------
thread_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
ID of the thread to send message in
wait: Optional[:class:`bool`]
Waits for server confirmation of message send before
response (defaults to ``true``, when ``false`` a message
that is not saved does not return an error)
"""
await self.execute(
WebhookCompatibility.GitHub,
thread_id=thread_id,
wait=wait
)
async def execute_slack(
self,
*,
thread_id: Optional[Snowflake] = None,
wait: Optional[bool] = None
):
"""|coro|
Executes a Slack compatible webhook.
Parameters
----------
thread_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
ID of the thread to send message in
wait: Optional[:class:`bool`]
Waits for server confirmation of message send before
response (defaults to ``true``, when ``false`` a message
that is not saved does not return an error)
"""
await self.execute(
WebhookCompatibility.Slack,
thread_id=thread_id,
wait=wait
)
async def get_message(
self,
message_id: Snowflake,
thread_id: Snowflake
) -> UserMessage:
"""|coro|
Returns a previously-sent webhook message from the same token.
Parameters
----------
message_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the message to get
thread_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the thread to get the message from
Returns
-------
:class:`~pincer.objects.message.message.Message`
The message
"""
return UserMessage.from_dict(
construct_client_dict(
self._client,
await self._http.get(
f"webhooks/{self.id}/{self.token}/messages/{message_id}",
params={"thread_id": thread_id}
)
)
)
async def delete_message(
self,
message_id: Snowflake,
thread_id: Snowflake
):
"""|coro|
Deletes a message created by a webhook.
Parameters
----------
message_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the message to delete
thread_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the thread to delete the message from
"""
await self._http.delete(
f"webhooks/{self.id}/{self.token}/messages/{message_id}"
+ (f"?{thread_id=}" if thread_id else "")
)
@overload
async def edit_message(
self,
message_id: Snowflake,
*,
thread_id: Optional[Snowflake] = None,
content: Optional[str] = None,
embeds: Optional[List[Embed]] = None,
allowed_mentions: Optional[AllowedMentions] = None,
components: Optional[List[MessageComponent]] = None,
files: Optional[str] = None, # TODO: Add support for files
payload_json: Optional[str] = None,
attachments: Optional[List[Attachment]] = None
) -> UserMessage:
"""|coro|
Edits a previously-sent webhook message from the same token.
Parameters
----------
message_id: :class:`~pincer.utils.snowflake.Snowflake`
The ID of the message to edit
thread_id: Optional[:class:`~pincer.utils.snowflake.Snowflake`]
ID of the thread the message is in
content: Optional[:class:`str`]
The new content of the message (up to 2000 characters)
embeds: Optional[List[:class:`~pincer.objects.message.embed.Embed`]]
Embedded ``rich`` content, up to 10 embeds
allowed_mentions: Optional[:class:`~pincer.objects.message.user_message.AllowedMentions`]
Allowed mentions for the message
components: Optional[List[:class:`~pincer.objects.message.component.MessageComponent`]]
The components to include in the message
files: Optional[:class:`str`]
The contents of the file being sent/edited
payload_json: Optional[:class:`str`]
JSON encoded body of non-file params
(multipart/form-data only)
attachments: Optional[List[:class:`~pincer.objects.message.attachment.Attachment`]]
Attached files to keep and
possible descriptions for new files
"""
...
async def edit_message(
self,
message_id: Snowflake,
*,
thread_id: Optional[Snowflake] = None,
**kwargs
) -> UserMessage:
if len(kwargs.get("embeds", [])) > 10:
raise EmbedOverflow("You can only include up to 10 embeds")
data = await self._http.patch(
f"webhooks/{self.id}/{self.token}/messages/{message_id}"
+ (f"?{thread_id=}" if thread_id else ""),
data=kwargs
)
return UserMessage.from_dict(
construct_client_dict(self._client, data)
)
@classmethod
async def from_id(
cls,
client: Client,
id: Snowflake,
token: Optional[str] = None
) -> Webhook:
"""|coro|
Gets a webhook by its ID.
Parameters
----------
client: `~pincer.client.Client`
The client to use to make the request.
id: `~pincer.utils.snowflake.Snowflake`
The ID of the webhook to get.
token: Optional[:class:`str`]
The token of the webhook to get.
Returns
-------
`~pincer.objects.guild.webhook.Webhook`
The webhook with the given ID.
"""
return cls.from_dict(
construct_client_dict(
client,
await client.http.get(
f"webhooks/{id}"
+ (f"/{token}" if token else "")
)
)
)
|
the-stack_0_5129 | # -*- coding: utf-8 -*-
""" pykwalify """
# python stdlib
import logging
import logging.config
import os
__author__ = 'Grokzen <[email protected]>'
__version_info__ = (1, 8, 0)
__version__ = '.'.join(map(str, __version_info__))
log_level_to_string_map = {
5: "DEBUG",
4: "INFO",
3: "WARNING",
2: "ERROR",
1: "CRITICAL",
0: "INFO"
}
def init_logging(log_level):
"""
Init logging settings with default set to INFO
"""
log_level = log_level_to_string_map[min(log_level, 5)]
msg = "%(levelname)s - %(name)s:%(lineno)s - %(message)s" if log_level in os.environ else "%(levelname)s - %(message)s"
logging_conf = {
"version": 1,
"root": {
"level": log_level,
"handlers": ["console"]
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": log_level,
"formatter": "simple",
"stream": "ext://sys.stdout"
}
},
"formatters": {
"simple": {
"format": " {0}".format(msg)
}
}
}
logging.config.dictConfig(logging_conf)
partial_schemas = {}
|
the-stack_0_5130 | # -*- coding:utf-8 -*-
import logging
def en_logging(log_file, log_level):
level = 0
if log_level == "debug":
level = logging.DEBUG
elif log_level == "info":
level = logging.INFO
elif log_level == "warn":
level = logging.WARN
elif log_level == "error":
level = logging.ERROR
elif log_level == "fatal":
level = logging.FATAL
else:
level = logging.INFO
logging.basicConfig(filename=log_file, format='%(asctime)s:%(filename)s:%(lineno)d:%(levelname)s: %(message)s',
filemode='w', level=level)
|
the-stack_0_5131 | # -*- coding: utf-8 -*-
import pytest
import gevent
from raiden.utils import sha3
from raiden.api.python import RaidenAPI
from raiden.messages import (
decode,
Ack,
Ping,
)
from raiden.tests.utils.transport import UnreliableTransport
from raiden.tests.utils.messages import setup_messages_cb
from raiden.tests.utils.transfer import channel
from raiden.tests.fixtures.raiden_network import CHAIN
@pytest.mark.parametrize('number_of_nodes', [2])
def test_ping(raiden_network):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
messages = setup_messages_cb()
ping_message = Ping(nonce=0)
app0.raiden.sign(ping_message)
ping_encoded = ping_message.encode()
async_result = app0.raiden.protocol.send_raw_with_result(
ping_encoded,
app1.raiden.address,
)
assert async_result.wait(2), 'The message was not acknowledged'
expected_echohash = sha3(ping_encoded + app1.raiden.address)
messages_decoded = [decode(m) for m in messages]
ack_message = next(
decoded
for decoded in messages_decoded
if isinstance(decoded, Ack) and decoded.echo == expected_echohash
)
# the ping message was sent and acknowledged
assert ping_encoded in messages
assert ack_message
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('transport_class', [UnreliableTransport])
def test_ping_unreachable(raiden_network):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
# drop everything to force disabling of re-sends
app0.raiden.protocol.transport.droprate = 1
app1.raiden.protocol.transport.droprate = 1
app0.raiden.protocol.retry_interval = 0.1 # for fast tests
messages = setup_messages_cb()
ping_message = Ping(nonce=0)
app0.raiden.sign(ping_message)
ping_encoded = ping_message.encode()
async_result = app0.raiden.protocol.send_raw_with_result(
ping_encoded,
app1.raiden.address,
)
assert async_result.wait(2) is None, "The message was dropped, it can't be acknowledged"
# Raiden node will start pinging as soon as a new channel
# is established. We need to test if
# a) there is our original message in the queue
# b) there are only Ping message types in
messages_decoded = [decode(m) for m in messages]
assert ping_message in messages_decoded
for message in messages_decoded:
assert isinstance(message, Ping)
@pytest.mark.parametrize('deposit', [0])
def test_receive_direct_before_deposit(raiden_network):
"""Regression test that ensures we accept incoming direct transfers, even if we don't have
any back channel balance. """
app0, app1, _ = raiden_network
token_address = app0.raiden.default_registry.token_addresses()[0]
channel_0_1 = channel(app0, app1, token_address)
back_channel = channel(app1, app0, token_address)
assert not channel_0_1.can_transfer
assert not back_channel.can_transfer
deposit_amount = 2
transfer_amount = 1
api0 = RaidenAPI(app0.raiden)
api0.deposit(token_address, app1.raiden.address, deposit_amount)
app0.raiden.chain.next_block()
gevent.sleep(app0.raiden.alarm.wait_time)
assert channel_0_1.can_transfer
assert not back_channel.can_transfer
assert back_channel.distributable == 0
api0.transfer_and_wait(token_address, transfer_amount, app1.raiden.address)
gevent.sleep(app1.raiden.alarm.wait_time)
assert back_channel.can_transfer
assert back_channel.distributable == transfer_amount
@pytest.mark.parametrize('deposit', [0])
@pytest.mark.parametrize('channels_per_node', [CHAIN])
def test_receive_mediated_before_deposit(raiden_network, token_addresses):
"""Regression test that ensures we accept incoming mediated transfers, even if we don't have
any back channel balance. """
app_alice, app_bob, app_charlie = raiden_network
token_address = token_addresses[0]
# path alice -> bob -> charlie
alice_bob = channel(app_alice, app_bob, token_address)
bob_alice = channel(app_bob, app_alice, token_address)
bob_charlie = channel(app_bob, app_charlie, token_address)
charlie_bob = channel(app_charlie, app_bob, token_address)
# ensure alice charlie is mediated
with pytest.raises(KeyError):
channel(app_alice, app_charlie, token_address)
assert not alice_bob.can_transfer
assert not bob_charlie.can_transfer
assert not bob_alice.can_transfer
deposit_amount = 3
RaidenAPI(app_alice.raiden).deposit(
token_address,
app_bob.raiden.address,
deposit_amount,
)
RaidenAPI(app_bob.raiden).deposit(
token_address,
app_charlie.raiden.address,
deposit_amount,
)
# catch up with the Balance events
for app in raiden_network:
app.raiden.poll_blockchain_events()
assert alice_bob.can_transfer
assert bob_charlie.can_transfer
assert not bob_alice.can_transfer
assert alice_bob.distributable == deposit_amount
assert bob_charlie.distributable == deposit_amount
transfer_amount = 1
async_result = app_alice.raiden.mediated_transfer_async(
token_address,
transfer_amount,
app_charlie.raiden.address,
1,
)
assert async_result.wait(10)
# give extra time for the intermediaries to process the secret messages and
# withdraw the tokens
gevent.sleep(1)
assert alice_bob.distributable == deposit_amount - transfer_amount
assert bob_charlie.distributable == deposit_amount - transfer_amount
assert bob_alice.distributable == transfer_amount
assert charlie_bob.distributable == transfer_amount
assert alice_bob.can_transfer
assert bob_alice.can_transfer
assert charlie_bob.can_transfer
|
the-stack_0_5132 | # coding=utf-8
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cached tf.Transform analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
# GOOGLE-INITIALIZATION
import apache_beam as beam
from apache_beam.testing import util as beam_test_util
import numpy as np
import six
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import impl_helper
from tensorflow_transform import nodes
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.beam import analysis_graph_builder
from tensorflow_transform.beam import analyzer_cache
from tensorflow_transform import test_case
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import schema_utils
def _get_counter_value(metrics, name):
metric = metrics.query(
beam.metrics.metric.MetricsFilter().with_name(name))['counters']
committed = sum([r.committed for r in metric])
attempted = sum([r.attempted for r in metric])
assert committed == attempted, '{} != {}'.format(committed, attempted)
return committed
class _TestPipeline(beam.Pipeline):
@property
def has_ran(self):
return hasattr(self, '_run_result')
@property
def metrics(self):
if not self.has_ran:
raise RuntimeError('Pipeline has to run before accessing its metrics')
return self._run_result.metrics()
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
assert not self.has_ran
self._run_result = self.run()
self._run_result.wait_until_finish()
def _preprocessing_fn_for_common_optimize_traversal(inputs):
_ = tft.vocabulary(inputs['s'])
x = inputs['x']
x_mean = tft.mean(x, name='x')
x_square_deviations = tf.square(x - x_mean)
# 2nd analysis phase defined here.
x_var = tft.mean(x_square_deviations, name='x_square_deviations')
x_normalized = (x - x_mean) / tf.sqrt(x_var)
return {'x_normalized': x_normalized}
_OPTIMIZE_TRAVERSAL_COMMON_CASE = dict(
testcase_name='common',
feature_spec={
'x': tf.io.FixedLenFeature([], tf.float32),
's': tf.io.FixedLenFeature([], tf.string)
},
preprocessing_fn=_preprocessing_fn_for_common_optimize_traversal,
dataset_input_cache_dict={
b'__v0__CacheableCombineAccumulate[x/mean_and_var]-/Y\xe8\xd6\x1a\xb8OxZ_\xb4\xbes\x17AK&mXg':
'cache hit',
},
expected_dot_graph_str=r"""digraph G {
directed=True;
node [shape=Mrecord];
"CreateSavedModelForAnalyzerInputs[0]" [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('vocabulary/Reshape', \"Tensor\<shape: [None], \<dtype: 'string'\>\>\"), ('x/mean_and_var/Cast', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x/mean_and_var/truediv', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x/mean_and_var/truediv_1', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x/mean_and_var/zeros', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\")])|label: CreateSavedModelForAnalyzerInputs[0]}"];
"ApplySavedModel[0][span-0]" [label="{ApplySavedModel|dataset_key: span-0|phase: 0|label: ApplySavedModel[0][span-0]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0][span-0]";
"TensorSource[vocabulary][span-0]" [label="{ExtractFromDict|keys: ('vocabulary/Reshape',)|label: TensorSource[vocabulary][span-0]|partitionable: True}"];
"ApplySavedModel[0][span-0]" -> "TensorSource[vocabulary][span-0]";
"VocabularyAccumulate[vocabulary][span-0]" [label="{VocabularyAccumulate|vocab_ordering_type: 1|input_dtype: string|label: VocabularyAccumulate[vocabulary][span-0]|partitionable: True}"];
"TensorSource[vocabulary][span-0]" -> "VocabularyAccumulate[vocabulary][span-0]";
"ApplySavedModel[0][span-1]" [label="{ApplySavedModel|dataset_key: span-1|phase: 0|label: ApplySavedModel[0][span-1]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0][span-1]";
"TensorSource[vocabulary][span-1]" [label="{ExtractFromDict|keys: ('vocabulary/Reshape',)|label: TensorSource[vocabulary][span-1]|partitionable: True}"];
"ApplySavedModel[0][span-1]" -> "TensorSource[vocabulary][span-1]";
"VocabularyAccumulate[vocabulary][span-1]" [label="{VocabularyAccumulate|vocab_ordering_type: 1|input_dtype: string|label: VocabularyAccumulate[vocabulary][span-1]|partitionable: True}"];
"TensorSource[vocabulary][span-1]" -> "VocabularyAccumulate[vocabulary][span-1]";
"FlattenCache[VocabularyMerge[vocabulary]]" [label="{Flatten|label: FlattenCache[VocabularyMerge[vocabulary]]|partitionable: True}"];
"VocabularyAccumulate[vocabulary][span-0]" -> "FlattenCache[VocabularyMerge[vocabulary]]";
"VocabularyAccumulate[vocabulary][span-1]" -> "FlattenCache[VocabularyMerge[vocabulary]]";
"VocabularyMerge[vocabulary]" [label="{VocabularyMerge|vocab_ordering_type: 1|use_adjusted_mutual_info: False|min_diff_from_avg: None|label: VocabularyMerge[vocabulary]}"];
"FlattenCache[VocabularyMerge[vocabulary]]" -> "VocabularyMerge[vocabulary]";
"VocabularyOrderAndFilter[vocabulary]" [label="{VocabularyOrderAndFilter|top_k: None|frequency_threshold: None|coverage_top_k: None|coverage_frequency_threshold: None|key_fn: None|label: VocabularyOrderAndFilter[vocabulary]}"];
"VocabularyMerge[vocabulary]" -> "VocabularyOrderAndFilter[vocabulary]";
"VocabularyWrite[vocabulary]" [label="{VocabularyWrite|vocab_filename: vocab_vocabulary|store_frequency: False|input_dtype: string|label: VocabularyWrite[vocabulary]|fingerprint_shuffle: False}"];
"VocabularyOrderAndFilter[vocabulary]" -> "VocabularyWrite[vocabulary]";
"CreateTensorBinding[vocabulary/Placeholder]" [label="{CreateTensorBinding|tensor: vocabulary/Placeholder:0|is_asset_filepath: True|label: CreateTensorBinding[vocabulary/Placeholder]}"];
"VocabularyWrite[vocabulary]" -> "CreateTensorBinding[vocabulary/Placeholder]";
"DecodeCache[span-0][CacheableCombineAccumulate[x/mean_and_var]]" [label="{DecodeCache|dataset_key: span-0|cache_key: \<bytes\>|cache_entry_identifier: CacheableCombineAccumulate[x/mean_and_var]|coder: \<JsonNumpyCacheCoder\>|label: DecodeCache[span-0][CacheableCombineAccumulate[x/mean_and_var]]|partitionable: True}"];
"TensorSource[x/mean_and_var][span-1]" [label="{ExtractFromDict|keys: ('x/mean_and_var/Cast', 'x/mean_and_var/truediv', 'x/mean_and_var/truediv_1', 'x/mean_and_var/zeros')|label: TensorSource[x/mean_and_var][span-1]|partitionable: True}"];
"ApplySavedModel[0][span-1]" -> "TensorSource[x/mean_and_var][span-1]";
"CacheableCombineAccumulate[x/mean_and_var][span-1]" [label="{CacheableCombineAccumulate|combiner: \<WeightedMeanAndVarCombiner\>|label: CacheableCombineAccumulate[x/mean_and_var][span-1]|partitionable: True}"];
"TensorSource[x/mean_and_var][span-1]" -> "CacheableCombineAccumulate[x/mean_and_var][span-1]";
"FlattenCache[CacheableCombineMerge[x/mean_and_var]]" [label="{Flatten|label: FlattenCache[CacheableCombineMerge[x/mean_and_var]]|partitionable: True}"];
"DecodeCache[span-0][CacheableCombineAccumulate[x/mean_and_var]]" -> "FlattenCache[CacheableCombineMerge[x/mean_and_var]]";
"CacheableCombineAccumulate[x/mean_and_var][span-1]" -> "FlattenCache[CacheableCombineMerge[x/mean_and_var]]";
"CacheableCombineMerge[x/mean_and_var]" [label="{CacheableCombineMerge|combiner: \<WeightedMeanAndVarCombiner\>|label: CacheableCombineMerge[x/mean_and_var]|{<0>0|<1>1}}"];
"FlattenCache[CacheableCombineMerge[x/mean_and_var]]" -> "CacheableCombineMerge[x/mean_and_var]";
"CreateTensorBinding[x/mean_and_var/Placeholder]" [label="{CreateTensorBinding|tensor: x/mean_and_var/Placeholder:0|is_asset_filepath: False|label: CreateTensorBinding[x/mean_and_var/Placeholder]}"];
"CacheableCombineMerge[x/mean_and_var]":0 -> "CreateTensorBinding[x/mean_and_var/Placeholder]";
"CreateTensorBinding[x/mean_and_var/Placeholder_1]" [label="{CreateTensorBinding|tensor: x/mean_and_var/Placeholder_1:0|is_asset_filepath: False|label: CreateTensorBinding[x/mean_and_var/Placeholder_1]}"];
"CacheableCombineMerge[x/mean_and_var]":1 -> "CreateTensorBinding[x/mean_and_var/Placeholder_1]";
"CreateSavedModelForAnalyzerInputs[1]" [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('x_square_deviations/mean_and_var/Cast', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x_square_deviations/mean_and_var/truediv', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x_square_deviations/mean_and_var/truediv_1', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\"), ('x_square_deviations/mean_and_var/zeros', \"Tensor\<shape: [], \<dtype: 'float32'\>\>\")])|label: CreateSavedModelForAnalyzerInputs[1]}"];
"CreateTensorBinding[vocabulary/Placeholder]" -> "CreateSavedModelForAnalyzerInputs[1]";
"CreateTensorBinding[x/mean_and_var/Placeholder]" -> "CreateSavedModelForAnalyzerInputs[1]";
"CreateTensorBinding[x/mean_and_var/Placeholder_1]" -> "CreateSavedModelForAnalyzerInputs[1]";
"ApplySavedModel[1]" [label="{ApplySavedModel|dataset_key: None|phase: 1|label: ApplySavedModel[1]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[1]" -> "ApplySavedModel[1]";
"TensorSource[x_square_deviations/mean_and_var]" [label="{ExtractFromDict|keys: ('x_square_deviations/mean_and_var/Cast', 'x_square_deviations/mean_and_var/truediv', 'x_square_deviations/mean_and_var/truediv_1', 'x_square_deviations/mean_and_var/zeros')|label: TensorSource[x_square_deviations/mean_and_var]|partitionable: True}"];
"ApplySavedModel[1]" -> "TensorSource[x_square_deviations/mean_and_var]";
"CacheableCombineAccumulate[x_square_deviations/mean_and_var]" [label="{CacheableCombineAccumulate|combiner: \<WeightedMeanAndVarCombiner\>|label: CacheableCombineAccumulate[x_square_deviations/mean_and_var]|partitionable: True}"];
"TensorSource[x_square_deviations/mean_and_var]" -> "CacheableCombineAccumulate[x_square_deviations/mean_and_var]";
"CacheableCombineMerge[x_square_deviations/mean_and_var]" [label="{CacheableCombineMerge|combiner: \<WeightedMeanAndVarCombiner\>|label: CacheableCombineMerge[x_square_deviations/mean_and_var]|{<0>0|<1>1}}"];
"CacheableCombineAccumulate[x_square_deviations/mean_and_var]" -> "CacheableCombineMerge[x_square_deviations/mean_and_var]";
"CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder]" [label="{CreateTensorBinding|tensor: x_square_deviations/mean_and_var/Placeholder:0|is_asset_filepath: False|label: CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder]}"];
"CacheableCombineMerge[x_square_deviations/mean_and_var]":0 -> "CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder]";
"CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder_1]" [label="{CreateTensorBinding|tensor: x_square_deviations/mean_and_var/Placeholder_1:0|is_asset_filepath: False|label: CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder_1]}"];
"CacheableCombineMerge[x_square_deviations/mean_and_var]":1 -> "CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder_1]";
CreateSavedModel [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('x_normalized', \"Tensor\<shape: [None], \<dtype: 'float32'\>\>\")])|label: CreateSavedModel}"];
"CreateTensorBinding[vocabulary/Placeholder]" -> CreateSavedModel;
"CreateTensorBinding[x/mean_and_var/Placeholder]" -> CreateSavedModel;
"CreateTensorBinding[x/mean_and_var/Placeholder_1]" -> CreateSavedModel;
"CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder]" -> CreateSavedModel;
"CreateTensorBinding[x_square_deviations/mean_and_var/Placeholder_1]" -> CreateSavedModel;
"EncodeCache[CacheableCombineAccumulate[x/mean_and_var]][span-1]" [label="{EncodeCache|coder: \<JsonNumpyCacheCoder\>|label: EncodeCache[CacheableCombineAccumulate[x/mean_and_var]][span-1]|partitionable: True}"];
"CacheableCombineAccumulate[x/mean_and_var][span-1]" -> "EncodeCache[CacheableCombineAccumulate[x/mean_and_var]][span-1]";
"EncodeCache[VocabularyAccumulate[vocabulary]][span-0]" [label="{EncodeCache|coder: \<_VocabularyAccumulatorCoder\>|label: EncodeCache[VocabularyAccumulate[vocabulary]][span-0]|partitionable: True}"];
"VocabularyAccumulate[vocabulary][span-0]" -> "EncodeCache[VocabularyAccumulate[vocabulary]][span-0]";
"EncodeCache[VocabularyAccumulate[vocabulary]][span-1]" [label="{EncodeCache|coder: \<_VocabularyAccumulatorCoder\>|label: EncodeCache[VocabularyAccumulate[vocabulary]][span-1]|partitionable: True}"];
"VocabularyAccumulate[vocabulary][span-1]" -> "EncodeCache[VocabularyAccumulate[vocabulary]][span-1]";
}
""")
def _preprocessing_fn_for_generalized_chained_ptransforms(inputs):
class FakeChainablePartitionable(
collections.namedtuple('FakeChainablePartitionable', ['label']),
nodes.OperationDef):
def __new__(cls, label=None):
if label is None:
scope = tf.compat.v1.get_default_graph().get_name_scope()
label = '{}[{}]'.format(cls.__name__, scope)
return super(FakeChainablePartitionable, cls).__new__(cls, label=label)
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return True
class FakeChainableCacheable(
collections.namedtuple('FakeChainableCacheable', ['label']),
nodes.OperationDef):
def __new__(cls, label=None):
if label is None:
scope = tf.compat.v1.get_default_graph().get_name_scope()
label = '{}[{}]'.format(cls.__name__, scope)
return super(FakeChainableCacheable, cls).__new__(cls, label=label)
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return True
@property
def cache_coder(self):
return 'Not-a-coder-but-thats-ok!'
class FakeChainable(
collections.namedtuple('FakeChainable', ['label']), nodes.OperationDef):
def __new__(cls, label=None):
if label is None:
scope = tf.compat.v1.get_default_graph().get_name_scope()
label = '{}[{}]'.format(cls.__name__, scope)
return super(FakeChainable, cls).__new__(cls, label=label)
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return False
with tf.compat.v1.name_scope('x'):
input_values_node = nodes.apply_operation(
analyzer_nodes.TensorSource, tensors=[inputs['x']])
with tf.compat.v1.name_scope('partitionable1'):
partitionable_outputs = nodes.apply_multi_output_operation(
FakeChainablePartitionable, input_values_node)
with tf.compat.v1.name_scope('cacheable1'):
intermediate_cached_value_node = nodes.apply_multi_output_operation(
FakeChainableCacheable, *partitionable_outputs)
with tf.compat.v1.name_scope('partitionable2'):
partitionable_outputs = nodes.apply_multi_output_operation(
FakeChainablePartitionable, *intermediate_cached_value_node)
with tf.compat.v1.name_scope('cacheable2'):
cached_value_node = nodes.apply_multi_output_operation(
FakeChainableCacheable, *partitionable_outputs)
with tf.compat.v1.name_scope('partitionable3'):
output_value_node = nodes.apply_multi_output_operation(
FakeChainablePartitionable, *cached_value_node)
with tf.compat.v1.name_scope('merge'):
output_value_node = nodes.apply_operation(FakeChainable,
*output_value_node)
with tf.compat.v1.name_scope('not-cacheable'):
non_cached_output = nodes.apply_operation(FakeChainable,
input_values_node)
x_chained = analyzer_nodes.bind_future_as_tensor(
output_value_node, analyzer_nodes.TensorInfo(tf.float32, (17, 27),
False))
x_plain = analyzer_nodes.bind_future_as_tensor(
non_cached_output, analyzer_nodes.TensorInfo(tf.int64, (7, 13), False))
return {'x_chained': x_chained, 'x_plain': x_plain}
_OPTIMIZE_TRAVERSAL_GENERALIZED_CHAINED_PTRANSFORMS_CASE = dict(
testcase_name='generalized_chained_ptransforms',
feature_spec={'x': tf.io.FixedLenFeature([], tf.float32)},
preprocessing_fn=_preprocessing_fn_for_generalized_chained_ptransforms,
dataset_input_cache_dict=None,
expected_dot_graph_str=r"""digraph G {
directed=True;
node [shape=Mrecord];
"CreateSavedModelForAnalyzerInputs[0]" [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('inputs/x', \"Tensor\<shape: [None], \<dtype: 'float32'\>\>\")])|label: CreateSavedModelForAnalyzerInputs[0]}"];
"ApplySavedModel[0][span-0]" [label="{ApplySavedModel|dataset_key: span-0|phase: 0|label: ApplySavedModel[0][span-0]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0][span-0]";
"TensorSource[x][span-0]" [label="{ExtractFromDict|keys: ('inputs/x',)|label: TensorSource[x][span-0]|partitionable: True}"];
"ApplySavedModel[0][span-0]" -> "TensorSource[x][span-0]";
"FakeChainablePartitionable[x/partitionable1][span-0]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable1][span-0]|partitionable: True}"];
"TensorSource[x][span-0]" -> "FakeChainablePartitionable[x/partitionable1][span-0]";
"FakeChainableCacheable[x/cacheable1][span-0]" [label="{FakeChainableCacheable|label: FakeChainableCacheable[x/cacheable1][span-0]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable1][span-0]" -> "FakeChainableCacheable[x/cacheable1][span-0]";
"FakeChainablePartitionable[x/partitionable2][span-0]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable2][span-0]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable1][span-0]" -> "FakeChainablePartitionable[x/partitionable2][span-0]";
"FakeChainableCacheable[x/cacheable2][span-0]" [label="{FakeChainableCacheable|label: FakeChainableCacheable[x/cacheable2][span-0]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable2][span-0]" -> "FakeChainableCacheable[x/cacheable2][span-0]";
"FakeChainablePartitionable[x/partitionable3][span-0]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable3][span-0]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable2][span-0]" -> "FakeChainablePartitionable[x/partitionable3][span-0]";
"ApplySavedModel[0][span-1]" [label="{ApplySavedModel|dataset_key: span-1|phase: 0|label: ApplySavedModel[0][span-1]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0][span-1]";
"TensorSource[x][span-1]" [label="{ExtractFromDict|keys: ('inputs/x',)|label: TensorSource[x][span-1]|partitionable: True}"];
"ApplySavedModel[0][span-1]" -> "TensorSource[x][span-1]";
"FakeChainablePartitionable[x/partitionable1][span-1]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable1][span-1]|partitionable: True}"];
"TensorSource[x][span-1]" -> "FakeChainablePartitionable[x/partitionable1][span-1]";
"FakeChainableCacheable[x/cacheable1][span-1]" [label="{FakeChainableCacheable|label: FakeChainableCacheable[x/cacheable1][span-1]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable1][span-1]" -> "FakeChainableCacheable[x/cacheable1][span-1]";
"FakeChainablePartitionable[x/partitionable2][span-1]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable2][span-1]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable1][span-1]" -> "FakeChainablePartitionable[x/partitionable2][span-1]";
"FakeChainableCacheable[x/cacheable2][span-1]" [label="{FakeChainableCacheable|label: FakeChainableCacheable[x/cacheable2][span-1]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable2][span-1]" -> "FakeChainableCacheable[x/cacheable2][span-1]";
"FakeChainablePartitionable[x/partitionable3][span-1]" [label="{FakeChainablePartitionable|label: FakeChainablePartitionable[x/partitionable3][span-1]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable2][span-1]" -> "FakeChainablePartitionable[x/partitionable3][span-1]";
"FlattenCache[FakeChainable[x/merge]]" [label="{Flatten|label: FlattenCache[FakeChainable[x/merge]]|partitionable: True}"];
"FakeChainablePartitionable[x/partitionable3][span-0]" -> "FlattenCache[FakeChainable[x/merge]]";
"FakeChainablePartitionable[x/partitionable3][span-1]" -> "FlattenCache[FakeChainable[x/merge]]";
"FakeChainable[x/merge]" [label="{FakeChainable|label: FakeChainable[x/merge]}"];
"FlattenCache[FakeChainable[x/merge]]" -> "FakeChainable[x/merge]";
"CreateTensorBinding[x/Placeholder]" [label="{CreateTensorBinding|tensor: x/Placeholder:0|is_asset_filepath: False|label: CreateTensorBinding[x/Placeholder]}"];
"FakeChainable[x/merge]" -> "CreateTensorBinding[x/Placeholder]";
"ApplySavedModel[0]" [label="{ApplySavedModel|dataset_key: None|phase: 0|label: ApplySavedModel[0]|partitionable: True}"];
"CreateSavedModelForAnalyzerInputs[0]" -> "ApplySavedModel[0]";
"TensorSource[x]" [label="{ExtractFromDict|keys: ('inputs/x',)|label: TensorSource[x]|partitionable: True}"];
"ApplySavedModel[0]" -> "TensorSource[x]";
"FakeChainable[x/not-cacheable]" [label="{FakeChainable|label: FakeChainable[x/not-cacheable]}"];
"TensorSource[x]" -> "FakeChainable[x/not-cacheable]";
"CreateTensorBinding[x/Placeholder_1]" [label="{CreateTensorBinding|tensor: x/Placeholder_1:0|is_asset_filepath: False|label: CreateTensorBinding[x/Placeholder_1]}"];
"FakeChainable[x/not-cacheable]" -> "CreateTensorBinding[x/Placeholder_1]";
CreateSavedModel [label="{CreateSavedModel|table_initializers: 0|output_signature: OrderedDict([('x_chained', \"Tensor\<shape: [17, 27], \<dtype: 'float32'\>\>\"), ('x_plain', \"Tensor\<shape: [7, 13], \<dtype: 'int64'\>\>\")])|label: CreateSavedModel}"];
"CreateTensorBinding[x/Placeholder]" -> CreateSavedModel;
"CreateTensorBinding[x/Placeholder_1]" -> CreateSavedModel;
"EncodeCache[FakeChainableCacheable[x/cacheable1]][span-0]" [label="{EncodeCache|coder: Not-a-coder-but-thats-ok!|label: EncodeCache[FakeChainableCacheable[x/cacheable1]][span-0]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable1][span-0]" -> "EncodeCache[FakeChainableCacheable[x/cacheable1]][span-0]";
"EncodeCache[FakeChainableCacheable[x/cacheable1]][span-1]" [label="{EncodeCache|coder: Not-a-coder-but-thats-ok!|label: EncodeCache[FakeChainableCacheable[x/cacheable1]][span-1]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable1][span-1]" -> "EncodeCache[FakeChainableCacheable[x/cacheable1]][span-1]";
"EncodeCache[FakeChainableCacheable[x/cacheable2]][span-0]" [label="{EncodeCache|coder: Not-a-coder-but-thats-ok!|label: EncodeCache[FakeChainableCacheable[x/cacheable2]][span-0]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable2][span-0]" -> "EncodeCache[FakeChainableCacheable[x/cacheable2]][span-0]";
"EncodeCache[FakeChainableCacheable[x/cacheable2]][span-1]" [label="{EncodeCache|coder: Not-a-coder-but-thats-ok!|label: EncodeCache[FakeChainableCacheable[x/cacheable2]][span-1]|partitionable: True}"];
"FakeChainableCacheable[x/cacheable2][span-1]" -> "EncodeCache[FakeChainableCacheable[x/cacheable2]][span-1]";
}
""")
_OPTIMIZE_TRAVERSAL_TEST_CASES = [
_OPTIMIZE_TRAVERSAL_COMMON_CASE,
_OPTIMIZE_TRAVERSAL_GENERALIZED_CHAINED_PTRANSFORMS_CASE,
]
class CachedImplTest(test_case.TransformTestCase):
def setUp(self):
super(CachedImplTest, self).setUp()
self.base_test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self._cache_dir = os.path.join(self.base_test_dir, 'cache')
self._context = tft_beam.Context(temp_dir=self.get_temp_dir())
self._context.__enter__()
def tearDown(self):
self._context.__exit__()
def test_single_phase_mixed_analyzer_run_once(self):
span_0_key = 'span-0'
span_1_key = 'span-1'
def preprocessing_fn(inputs):
integerized_s = tft.compute_and_apply_vocabulary(inputs['s'])
_ = tft.bucketize(inputs['x'], 2, name='bucketize')
return {
'integerized_s':
integerized_s,
'x_min':
tft.min(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
'x_mean':
tft.mean(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
'y_min':
tft.min(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
'y_mean':
tft.mean(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
}
# Run AnalyzeAndTransform on some input data and compare with expected
# output.
input_data = [{'x': 12, 'y': 1, 's': 'd'}, {'x': 10, 'y': 1, 's': 'c'}]
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32),
's': tf.io.FixedLenFeature([], tf.string),
}))
input_data_dict = {
span_0_key: [{
'x': -2,
'y': 1,
's': 'b',
}, {
'x': 4,
'y': -4,
's': 'b',
}],
span_1_key: input_data,
}
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
cache_dict = {
span_0_key: {
b'__v0__CacheableCombineAccumulate[x_1/mean_and_var]-.\xc4t>ZBv\xea\xa5SU\xf4\x065\xc6\x1c\x81W\xf9\x1b':
p | 'CreateA' >> beam.Create([b'[2.0, 1.0, 9.0, 0.0]']),
b'__v0__CacheableCombineAccumulate[x/x]-\x95\xc5w\x88\x85\x8b5V\xc9\x00\xe0\x0f\x03\x1a\xdaL\x9d\xd5\xb3\xe3':
p | 'CreateB' >> beam.Create([b'[2.0, 4.0]']),
b'__v0__CacheableCombineAccumulate[y_1/mean_and_var]-E^\xb7VZ\xeew4rm\xab\xa3\xa4k|J\x80ck\x16':
p | 'CreateC' >> beam.Create([b'[2.0, -1.5, 6.25, 0.0]']),
b'__v0__CacheableCombineAccumulate[y/y]-\xdf\x1ey\x03\x1c\x96\xd5'
b' e\x9bJ\xa1\xd2\xfc\x9c\x03\x0fM \xdb':
p | 'CreateD' >> beam.Create([b'[4.0, 1.0]']),
},
span_1_key: {},
}
transform_fn, cache_output = (
(flat_data, input_data_dict, cache_dict, input_metadata)
| 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
_ = (cache_output | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, self._cache_dir))
transformed_dataset = ((
(input_data_dict[span_1_key], input_metadata), transform_fn)
| 'Transform' >> tft_beam.TransformDataset())
dot_string = nodes.get_dot_graph([analysis_graph_builder._ANALYSIS_GRAPH
]).to_string()
self.WriteRenderedDotFile(dot_string)
# The output cache should not have entries for the cache that is present
# in the input cache.
self.assertEqual(
len(cache_output[span_0_key]),
len(cache_output[span_1_key]) - 4)
transformed_data, unused_transformed_metadata = transformed_dataset
expected_transformed = [
{
'x_mean': 6.0,
'x_min': -2.0,
'y_mean': -0.25,
'y_min': -4.0,
'integerized_s': 1,
},
{
'x_mean': 6.0,
'x_min': -2.0,
'y_mean': -0.25,
'y_min': -4.0,
'integerized_s': 2,
},
]
beam_test_util.assert_that(transformed_data,
beam_test_util.equal_to(expected_transformed))
transform_fn_dir = os.path.join(self.base_test_dir, 'transform_fn')
_ = transform_fn | tft_beam.WriteTransformFn(transform_fn_dir)
# 4 from analyzing 2 spans, and 2 from transform.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 6)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 4)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
def test_single_phase_run_twice(self):
span_0_key = 'span-0'
span_1_key = 'span-1'
def preprocessing_fn(inputs):
_ = tft.vocabulary(inputs['s'], vocab_filename='vocab1')
_ = tft.bucketize(inputs['x'], 2, name='bucketize')
return {
'x_min':
tft.min(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
'x_mean':
tft.mean(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
'y_min':
tft.min(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
'y_mean':
tft.mean(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
's_integerized':
tft.compute_and_apply_vocabulary(
inputs['s'],
labels=inputs['label'],
use_adjusted_mutual_info=True),
}
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32),
's': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
}))
input_data_dict = {
span_0_key: [{
'x': -2,
'y': 1,
's': 'a',
'label': 0,
}, {
'x': 4,
'y': -4,
's': 'a',
'label': 1,
}, {
'x': 5,
'y': 11,
's': 'a',
'label': 1,
}, {
'x': 1,
'y': -4,
's': u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'),
'label': 1,
}],
span_1_key: [{
'x': 12,
'y': 1,
's': u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'),
'label': 0
}, {
'x': 10,
'y': 1,
's': 'c',
'label': 1
}],
}
expected_vocabulary_contents = np.array(
[b'a', u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'), b'c'],
dtype=object)
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
# wrap each value in input_data_dict as a pcoll.
input_data_pcoll_dict = {}
for a, b in six.iteritems(input_data_dict):
input_data_pcoll_dict[a] = p | a >> beam.Create(b)
transform_fn_1, cache_output = (
(flat_data, input_data_pcoll_dict, {}, input_metadata)
| 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
_ = (
cache_output
| 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, self._cache_dir))
transformed_dataset = ((
(input_data_pcoll_dict[span_1_key], input_metadata), transform_fn_1)
| 'Transform' >> tft_beam.TransformDataset())
del input_data_pcoll_dict
transformed_data, unused_transformed_metadata = transformed_dataset
expected_transformed_data = [
{
'x_mean': 5.0,
'x_min': -2.0,
'y_mean': 1.0,
'y_min': -4.0,
's_integerized': 0,
},
{
'x_mean': 5.0,
'x_min': -2.0,
'y_mean': 1.0,
'y_min': -4.0,
's_integerized': 2,
},
]
beam_test_util.assert_that(
transformed_data,
beam_test_util.equal_to(expected_transformed_data),
label='first')
transform_fn_dir = os.path.join(self.base_test_dir, 'transform_fn_1')
_ = transform_fn_1 | tft_beam.WriteTransformFn(transform_fn_dir)
for key in input_data_dict:
self.assertIn(key, cache_output)
self.assertEqual(7, len(cache_output[key]))
tf_transform_output = tft.TFTransformOutput(transform_fn_dir)
vocab1_path = tf_transform_output.vocabulary_file_by_name('vocab1')
self.AssertVocabularyContents(vocab1_path, expected_vocabulary_contents)
# 4 from analyzing 2 spans, and 2 from transform.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 0)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 14)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
# wrap each value in input_data_dict as a pcoll.
input_data_pcoll_dict = {}
for a, b in six.iteritems(input_data_dict):
input_data_pcoll_dict[a] = p | a >> beam.Create(b)
input_cache = p | analyzer_cache.ReadAnalysisCacheFromFS(
self._cache_dir, list(input_data_dict.keys()))
transform_fn_2, second_output_cache = (
(flat_data, input_data_pcoll_dict, input_cache, input_metadata)
| 'AnalyzeAgain' >>
(tft_beam.AnalyzeDatasetWithCache(preprocessing_fn)))
_ = (
second_output_cache
| 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, self._cache_dir))
dot_string = nodes.get_dot_graph([analysis_graph_builder._ANALYSIS_GRAPH
]).to_string()
self.WriteRenderedDotFile(dot_string)
transformed_dataset = ((
(input_data_dict[span_1_key], input_metadata), transform_fn_2)
| 'TransformAgain' >> tft_beam.TransformDataset())
transformed_data, unused_transformed_metadata = transformed_dataset
beam_test_util.assert_that(
transformed_data,
beam_test_util.equal_to(expected_transformed_data),
label='second')
transform_fn_dir = os.path.join(self.base_test_dir, 'transform_fn_2')
_ = transform_fn_2 | tft_beam.WriteTransformFn(transform_fn_dir)
tf_transform_output = tft.TFTransformOutput(transform_fn_dir)
vocab1_path = tf_transform_output.vocabulary_file_by_name('vocab1')
self.AssertVocabularyContents(vocab1_path, expected_vocabulary_contents)
self.assertFalse(second_output_cache)
# Only 2 from transform.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 2)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 14)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 0)
# The root CreateSavedModel is optimized away because the data doesn't get
# processed at all (only cache).
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 1)
def test_caching_vocab_for_integer_categorical(self):
span_0_key = 'span-0'
span_1_key = 'span-1'
def preprocessing_fn(inputs):
return {
'x_vocab':
tft.compute_and_apply_vocabulary(
inputs['x'], frequency_threshold=2)
}
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'x': tf.FixedLenFeature([], tf.int64),
}))
input_data_dict = {
span_0_key: [{
'x': -2,
}, {
'x': -4,
}, {
'x': -1,
}, {
'x': 4,
}],
span_1_key: [{
'x': -2,
}, {
'x': -1,
}, {
'x': 6,
}, {
'x': 7,
}],
}
expected_transformed_data = [{
'x_vocab': 0,
}, {
'x_vocab': 1,
}, {
'x_vocab': -1,
}, {
'x_vocab': -1,
}]
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
cache_dict = {
span_0_key: {
b'__v0__VocabularyAccumulate[compute_and_apply_vocabulary/vocabulary]-\x05e\xfe4\x03H.P\xb5\xcb\xd22\xe3\x16\x15\xf8\xf5\xe38\xd9':
p | 'CreateB' >> beam.Create(
[b'[-2, 2]', b'[-4, 1]', b'[-1, 1]', b'[4, 1]']),
},
span_1_key: {},
}
transform_fn, cache_output = (
(flat_data, input_data_dict, cache_dict, input_metadata)
| 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
dot_string = nodes.get_dot_graph(
[analysis_graph_builder._ANALYSIS_GRAPH]).to_string()
self.WriteRenderedDotFile(dot_string)
self.assertNotIn(span_0_key, cache_output)
_ = cache_output | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, self._cache_dir)
transformed_dataset = ((
(input_data_dict[span_1_key], input_metadata), transform_fn)
| 'Transform' >> tft_beam.TransformDataset())
transformed_data, _ = transformed_dataset
beam_test_util.assert_that(
transformed_data,
beam_test_util.equal_to(expected_transformed_data),
label='first')
# 4 from analysis since 1 span was completely cached, and 4 from transform.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 1)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 1)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
def test_non_frequency_vocabulary_merge(self):
"""This test compares vocabularies produced with and without cache."""
mi_vocab_name = 'mutual_information_vocab'
adjusted_mi_vocab_name = 'adjusted_mutual_information_vocab'
weighted_frequency_vocab_name = 'weighted_frequency_vocab'
def preprocessing_fn(inputs):
_ = tft.vocabulary(
inputs['s'],
labels=inputs['label'],
store_frequency=True,
vocab_filename=mi_vocab_name,
min_diff_from_avg=0.1,
use_adjusted_mutual_info=False)
_ = tft.vocabulary(
inputs['s'],
labels=inputs['label'],
store_frequency=True,
vocab_filename=adjusted_mi_vocab_name,
min_diff_from_avg=1.0,
use_adjusted_mutual_info=True)
_ = tft.vocabulary(
inputs['s'],
weights=inputs['weight'],
store_frequency=True,
vocab_filename=weighted_frequency_vocab_name,
use_adjusted_mutual_info=False)
return inputs
span_0_key = 'span-0'
span_1_key = 'span-1'
input_data = [
dict(s='a', weight=1, label=1),
dict(s='a', weight=0.5, label=1),
dict(s='b', weight=0.75, label=1),
dict(s='b', weight=1, label=0),
]
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
's': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
'weight': tf.io.FixedLenFeature([], tf.float32),
}))
input_data_dict = {
span_0_key: input_data,
span_1_key: input_data,
}
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(
list(itertools.chain(*input_data_dict.values())))
# wrap each value in input_data_dict as a pcoll.
input_data_pcoll_dict = {}
for a, b in six.iteritems(input_data_dict):
input_data_pcoll_dict[a] = p | a >> beam.Create(b)
transform_fn_with_cache, output_cache = (
(flat_data, input_data_pcoll_dict, {}, input_metadata)
| tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
transform_fn_with_cache_dir = os.path.join(self.base_test_dir,
'transform_fn_with_cache')
_ = transform_fn_with_cache | tft_beam.WriteTransformFn(
transform_fn_with_cache_dir)
expected_accumulators = {
b'__v0__VocabularyAccumulate[vocabulary]-<GhZ\xac\xb8\xa9\x8c\xce\x1c\xb2-ck\xca\xe8\xec\t%\x8f':
[
b'["a", [2, [0.0, 1.0], [0.0, 0.0], 1.0]]',
b'["b", [2, [0.5, 0.5], [0.0, 0.0], 1.0]]',
b'["global_y_count_sentinel", [4, [0.25, 0.75], [0.0, 0.0], '
b'1.0]]'
],
b'__v0__VocabularyAccumulate[vocabulary_1]-\xa6\xae\nd\xe3\xd1\x9f\xa0\xe2\xb4\x05j\xa5\xfd\x8c\xfaeN\xd1\x1f':
[
b'["a", [2, [0.0, 1.0], [0.0, 0.0], 1.0]]',
b'["b", [2, [0.5, 0.5], [0.0, 0.0], 1.0]]',
b'["global_y_count_sentinel", [4, [0.25, 0.75], [0.0, 0.0], '
b'1.0]]'
],
b"__v0__VocabularyAccumulate[vocabulary_2]-\x97\x1c>\x851\x94'\xdc\xdf\xfd\xcc\x86\xb7\xb8\xe1\xe8*\x89B\t":
[b'["a", 1.5]', b'["b", 1.75]'],
}
spans = [span_0_key, span_1_key]
self.assertCountEqual(output_cache.keys(), spans)
for span in spans:
self.assertCountEqual(output_cache[span].keys(),
expected_accumulators.keys())
for idx, (key,
value) in enumerate(six.iteritems(expected_accumulators)):
beam_test_util.assert_that(
output_cache[span][key],
beam_test_util.equal_to(value),
label='AssertCache[{}][{}]'.format(span, idx))
# 4 from analysis on each of the input spans.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 0)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 6)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
with _TestPipeline() as p:
flat_data = p | 'CreateInputData' >> beam.Create(input_data * 2)
transform_fn_no_cache = ((flat_data, input_metadata)
| tft_beam.AnalyzeDataset(preprocessing_fn))
transform_fn_no_cache_dir = os.path.join(self.base_test_dir,
'transform_fn_no_cache')
_ = transform_fn_no_cache | tft_beam.WriteTransformFn(
transform_fn_no_cache_dir)
# 4 from analysis on each of the input spans.
self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_decoded'), 0)
self.assertEqual(_get_counter_value(p.metrics, 'cache_entries_encoded'), 0)
self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'), 2)
tft_output_cache = tft.TFTransformOutput(transform_fn_with_cache_dir)
tft_output_no_cache = tft.TFTransformOutput(transform_fn_no_cache_dir)
for vocab_filename in (mi_vocab_name, adjusted_mi_vocab_name,
weighted_frequency_vocab_name):
cache_path = tft_output_cache.vocabulary_file_by_name(vocab_filename)
no_cache_path = tft_output_no_cache.vocabulary_file_by_name(
vocab_filename)
with tf.io.gfile.GFile(cache_path, 'rb') as f1, tf.io.gfile.GFile(
no_cache_path, 'rb') as f2:
self.assertEqual(
f1.readlines(), f2.readlines(),
'vocab with cache != vocab without cache for: {}'.format(
vocab_filename))
@test_case.named_parameters(*_OPTIMIZE_TRAVERSAL_TEST_CASES)
def test_optimize_traversal(self, feature_spec, preprocessing_fn,
dataset_input_cache_dict, expected_dot_graph_str):
span_0_key, span_1_key = 'span-0', 'span-1'
if dataset_input_cache_dict is not None:
cache = {span_0_key: dataset_input_cache_dict}
else:
cache = {}
with tf.compat.v1.name_scope('inputs'):
input_signature = impl_helper.feature_spec_as_batched_placeholders(
feature_spec)
output_signature = preprocessing_fn(input_signature)
transform_fn_future, cache_output_dict = analysis_graph_builder.build(
tf.compat.v1.get_default_graph(), input_signature, output_signature,
{span_0_key, span_1_key}, cache)
leaf_nodes = [transform_fn_future] + sorted(
cache_output_dict.values(), key=str)
dot_string = nodes.get_dot_graph(leaf_nodes).to_string()
self.WriteRenderedDotFile(dot_string)
self.assertSameElements(
dot_string.split('\n'),
expected_dot_graph_str.split('\n'),
msg='Result dot graph is:\n{}\nCache output dict keys are: {}'.format(
dot_string, cache_output_dict.keys()))
def test_no_data_needed(self):
span_0_key = 'span-0'
span_1_key = 'span-1'
def preprocessing_fn(inputs):
return {k: tf.identity(v) for k, v in six.iteritems(inputs)}
input_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
}))
input_data_dict = {
span_0_key: None,
span_1_key: None,
}
with _TestPipeline() as p:
flat_data = None
cache_dict = {
span_0_key: {},
span_1_key: {},
}
_, output_cache = (
(flat_data, input_data_dict, cache_dict, input_metadata)
| 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(
preprocessing_fn, pipeline=p))
self.assertFalse(output_cache)
if __name__ == '__main__':
# TODO(b/133440043): Remove this once TFT supports eager execution.
tf.compat.v1.disable_eager_execution()
test_case.main()
|
the-stack_0_5134 | """
Script used to build the tiles databases for the Sentinel2,
Landsat5, and Landsat8 spacecrafts.
"""
import os
import geopandas as gpd
from pathlib import Path
def build_sentinel2_db():
"""Extract the Sentinel2 tiles information and store it in pickle format."""
data_dir = Path(__file__).parent
wrs_file = os.path.join(
data_dir, "./sentinel2/sentinel2_tiles_world.zip!sentinel2_tiles_world.shp"
)
gpd_ = gpd.read_file(wrs_file)
gpd_.columns = ["TILE", "geometry"]
gpd_.to_file(
os.path.join(data_dir, "sentinel2/sentinel2_tiles.shp"), driver="ESRI Shapefile"
)
gpd_ = None
def build_lansat_db():
"""Extract the Landsat tiles (path/row) information and store it in pickle format."""
data_dir = Path(__file__).parent
wrs_file = os.path.join(
data_dir, "landsat/WRS2_descending_0.zip!WRS2_descending.shp"
)
gpd_ = gpd.read_file(wrs_file)
gpd_["PATH#ROW"] = (
gpd_["PATH"].apply(lambda x: f"{x:003d}")
+ "#"
+ gpd_["ROW"].apply(lambda x: f"{x:003d}")
)
gpd_[["PATH#ROW", "geometry"]].to_file(
os.path.join(data_dir, "landsat/landsat_tiles.shp"), driver="ESRI Shapefile"
)
gpd_ = None
if __name__ == "__main__":
build_sentinel2_db()
build_lansat_db()
|
the-stack_0_5135 | """Representation of an IHM mmCIF file as a set of Python classes.
Generally class names correspond to mmCIF table names and class
attributes to mmCIF attributes (with prefixes like `pdbx_` stripped).
For example, the data item _entity.details is found in the
:class:`Entity` class, as the `details` member.
Ordinals and IDs are generally not used in this representation (instead,
pointers to objects are used).
"""
import itertools
import re
import sys
# Handle different naming of urllib in Python 2/3
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import json
__version__ = '0.21'
class __UnknownValue(object):
# Represent the mmCIF 'unknown' special value
def __str__(self):
return '?'
__repr__ = __str__
def __bool__(self):
return False
# Python2 compatibility
__nonzero__ = __bool__
# Needs to be hashable so that classes like Software (that might
# use unknown values as attributes) are hashable
def __hash__(self):
return 0
# Unknown value is a singleton and should only compare equal to itself
def __eq__(self, other):
return self is other
def __lt__(self, other):
return False
__gt__ = __lt__
__le__ = __ge__ = __eq__
#: A value that isn't known. Note that this is distinct from a value that
#: is deliberately omitted, which is represented by Python None.
unknown = __UnknownValue()
def _remove_identical(gen):
"""Return only unique objects from `gen`.
Objects that are identical are only returned once, although multiple
non-identical objects that compare equal may be returned."""
seen_objs = {}
for obj in gen:
if id(obj) in seen_objs:
continue
seen_objs[id(obj)] = None
yield obj
class System(object):
"""Top-level class representing a complete modeled system.
:param str title: Title (longer text description) of the system.
:param str id: Unique identifier for this system in the mmCIF file.
"""
def __init__(self, title=None, id='model'):
self.id = id
self.title = title
#: List of plain text comments. These will be added to the top of
#: the mmCIF file.
self.comments = []
#: List of all software used in the modeling. See :class:`Software`.
self.software = []
#: List of all authors of this system, as a list of strings (last name
#: followed by initials, e.g. "Smith AJ"). When writing out a file,
#: if this is list is empty, the set of all citation authors (see
#: :attr:`Citation.authors`) is used instead.
self.authors = []
#: List of all grants that supported this work. See :class:`Grant`.
self.grants = []
#: List of all citations. See :class:`Citation`.
self.citations = []
#: All entities used in the system. See :class:`Entity`.
self.entities = []
#: All asymmetric units used in the system. See :class:`AsymUnit`.
self.asym_units = []
#: All orphaned chemical descriptors in the system.
#: See :class:`ChemDescriptor`. This can be used to track descriptors
#: that are not otherwise used - normally one is assigned to a
#: :class:`ihm.restraint.CrossLinkRestraint`.
self.orphan_chem_descriptors = []
#: All orphaned assemblies in the system. See :class:`Assembly`.
#: This can be used to keep track of all assemblies that are not
#: otherwise used - normally one is assigned to a
#: :class:`~ihm.model.Model`,
#: :class:`ihm.protocol.Step`, or
#: :class:`~ihm.restraint.Restraint`.
self.orphan_assemblies = []
#: The assembly of the entire system. By convention this is always
#: the first assembly in the mmCIF file (assembly_id=1). Note that
#: currently this isn't filled in on output until dumper.write()
#: is called. See :class:`Assembly`.
self.complete_assembly = Assembly((), name='Complete assembly',
description='All known components')
#: Locations of all extra resources.
#: See :class:`~ihm.location.Location`.
self.locations = []
#: All orphaned datasets.
#: This can be used to keep track of all datasets that are not
#: otherwise used - normally a dataset is assigned to a
#: :class:`~ihm.dataset.DatasetGroup`,
#: :class:`~ihm.startmodel.StartingModel`,
#: :class:`~ihm.restraint.Restraint`,
#: :class:`~ihm.startmodel.Template`,
#: or as the parent of another :class:`~ihm.dataset.Dataset`.
#: See :class:`~ihm.dataset.Dataset`.
self.orphan_datasets = []
#: All orphaned groups of datasets.
#: This can be used to keep track of all dataset groups that are not
#: otherwise used - normally a group is assigned to a
#: :class:`~ihm.protocol.Protocol`.
#: See :class:`~ihm.dataset.DatasetGroup`.
self.orphan_dataset_groups = []
#: All orphaned representations of the system.
#: This can be used to keep track of all representations that are not
#: otherwise used - normally one is assigned to a
#: :class:`~ihm.model.Model`.
#: See :class:`~ihm.representation.Representation`.
self.orphan_representations = []
#: All orphaned starting models for the system.
#: This can be used to keep track of all starting models that are not
#: otherwise used - normally one is assigned to an
#: :class:`ihm.representation.Segment`.
#: See :class:`~ihm.startmodel.StartingModel`.
self.orphan_starting_models = []
#: All restraints on the system.
#: See :class:`~ihm.restraint.Restraint`.
self.restraints = []
#: All restraint groups.
#: See :class:`~ihm.restraint.RestraintGroup`.
self.restraint_groups = []
#: All orphaned modeling protocols.
#: This can be used to keep track of all protocols that are not
#: otherwise used - normally a protocol is assigned to a
#: :class:`~ihm.model.Model`.
#: See :class:`~ihm.protocol.Protocol`.
self.orphan_protocols = []
#: All ensembles.
#: See :class:`~ihm.model.Ensemble`.
self.ensembles = []
#: All ordered processes.
#: See :class:`~ihm.model.OrderedProcess`.
self.ordered_processes = []
#: All state groups (collections of models).
#: See :class:`~ihm.model.StateGroup`.
self.state_groups = []
#: All orphaned geometric objects.
#: This can be used to keep track of all objects that are not
#: otherwise used - normally an object is assigned to a
#: :class:`~ihm.restraint.GeometricRestraint`.
#: See :class:`~ihm.geometry.GeometricObject`.
self.orphan_geometric_objects = []
#: All orphaned features.
#: This can be used to keep track of all features that are not
#: otherwise used - normally a feature is assigned to a
#: :class:`~ihm.restraint.GeometricRestraint`.
#: See :class:`~ihm.restraint.Feature`.
self.orphan_features = []
#: All orphaned pseudo sites.
#: This can be used to keep track of all pseudo sites that are not
#: otherwise used - normally a site is used in a
#: :class:`~ihm.restraint.PseudoSiteFeature` or a
#: :class:`~ihm.restraint.CrossLinkPseudoSite`.
self.orphan_pseudo_sites = []
#: Contains the fluorescence (FLR) part.
#: See :class:`~ihm.flr.FLRData`.
self.flr_data = []
def update_locations_in_repositories(self, repos):
"""Update all :class:`Location` objects in the system that lie within
a checked-out :class:`Repository` to point to that repository.
This is intended for the use case where the current working
directory is a checkout of a repository which is archived somewhere
with a DOI. Locations can then be simply constructed pointing to
local files, and retroactively updated with this method to point
to the DOI if appropriate.
For each Location, if it points to a local file that is below the
`root` of one of the `repos`, update it to point to that repository.
If is under multiple roots, pick the one that gives the shortest
path. For example, if run in a subdirectory `foo` of a repository
archived as `repo.zip`, the local path `simple.pdb` will
be updated to be `repo-top/foo/simple.pdb` in `repo.zip`::
l = ihm.location.InputFileLocation("simple.pdb")
system.locations.append(l)
r = ihm.location.Repository(doi='1.2.3.4',
url='https://example.com/repo.zip',)
top_directory="repo-top", root="..")
system.update_locations_in_repositories([r])
"""
import ihm.location
for loc in self._all_locations():
if isinstance(loc, ihm.location.FileLocation):
ihm.location.Repository._update_in_repos(loc, repos)
def _all_restraints(self):
"""Iterate over all Restraints in the system.
Duplicates may be present."""
def _all_restraints_in_groups():
for rg in self.restraint_groups:
for r in rg:
yield r
return itertools.chain(self.restraints, _all_restraints_in_groups())
def _all_chem_descriptors(self):
"""Iterate over all ChemDescriptors in the system.
Duplicates may be present."""
return itertools.chain(
self.orphan_chem_descriptors,
(restraint.linker for restraint in self._all_restraints()
if hasattr(restraint, 'linker') and restraint.linker),
(itertools.chain.from_iterable(
f._all_flr_chemical_descriptors() for f in self.flr_data)))
def _all_model_groups(self, only_in_states=True):
"""Iterate over all ModelGroups in the system.
If only_in_states is True, only return ModelGroups referenced
by a State object; otherwise, also include ModelGroups referenced
by an OrderedProcess or Ensemble."""
# todo: raise an error if a modelgroup is present in multiple states
for state_group in self.state_groups:
for state in state_group:
for model_group in state:
yield model_group
if not only_in_states:
for ensemble in self.ensembles:
if ensemble.model_group:
yield ensemble.model_group
for ss in ensemble.subsamples:
if ss.model_group:
yield ss.model_group
for proc in self.ordered_processes:
for step in proc.steps:
for edge in step:
yield edge.group_begin
yield edge.group_end
def _all_models(self):
"""Iterate over all Models in the system"""
# todo: raise an error if a model is present in multiple groups
for group in self._all_model_groups():
seen_models = {}
for model in group:
if model in seen_models:
continue
seen_models[model] = None
yield group, model
def _all_representations(self):
"""Iterate over all Representations in the system.
This includes all Representations referenced from other objects,
plus any orphaned Representations. Duplicates are filtered out."""
return _remove_identical(itertools.chain(
self.orphan_representations,
(model.representation for group, model in self._all_models()
if model.representation)))
def _all_segments(self):
for representation in self._all_representations():
for segment in representation:
yield segment
def _all_starting_models(self):
"""Iterate over all StartingModels in the system.
This includes all StartingModels referenced from other objects, plus
any orphaned StartingModels. Duplicates are filtered out."""
return _remove_identical(itertools.chain(
self.orphan_starting_models,
(segment.starting_model for segment in self._all_segments()
if segment.starting_model)))
def _all_protocols(self):
"""Iterate over all Protocols in the system.
This includes all Protocols referenced from other objects, plus
any orphaned Protocols. Duplicates are filtered out."""
return _remove_identical(itertools.chain(
self.orphan_protocols,
(model.protocol for group, model in self._all_models()
if model.protocol)))
def _all_protocol_steps(self):
for protocol in self._all_protocols():
for step in protocol.steps:
yield step
def _all_analysis_steps(self):
for protocol in self._all_protocols():
for analysis in protocol.analyses:
for step in analysis.steps:
yield step
def _all_assemblies(self):
"""Iterate over all Assemblies in the system.
This includes all Assemblies referenced from other objects, plus
any orphaned Assemblies. Duplicates may be present."""
return itertools.chain(
# Complete assembly is always first
(self.complete_assembly,),
self.orphan_assemblies,
(model.assembly for group, model in self._all_models()
if model.assembly),
(step.assembly for step in self._all_protocol_steps()
if step.assembly),
(step.assembly for step in self._all_analysis_steps()
if step.assembly),
(restraint.assembly
for restraint in self._all_restraints() if restraint.assembly))
def _all_dataset_groups(self):
"""Iterate over all DatasetGroups in the system.
This includes all DatasetGroups referenced from other objects, plus
any orphaned groups. Duplicates may be present."""
return itertools.chain(
self.orphan_dataset_groups,
(step.dataset_group for step in self._all_protocol_steps()
if step.dataset_group),
(step.dataset_group for step in self._all_analysis_steps()
if step.dataset_group))
def _all_templates(self):
"""Iterate over all Templates in the system."""
for startmodel in self._all_starting_models():
for template in startmodel.templates:
yield template
def _all_datasets_except_parents(self):
"""Iterate over all Datasets except those referenced only
as the parent of another Dataset. Duplicates may be present."""
def _all_datasets_in_groups():
for dg in self._all_dataset_groups():
for d in dg:
yield d
return itertools.chain(
self.orphan_datasets,
_all_datasets_in_groups(),
(sm.dataset for sm in self._all_starting_models()
if sm.dataset),
(restraint.dataset for restraint in self._all_restraints()
if restraint.dataset),
(template.dataset for template in self._all_templates()
if template.dataset))
def _all_datasets(self):
"""Iterate over all Datasets in the system.
This includes all Datasets referenced from other objects, plus
any orphaned datasets. Duplicates may be present."""
def _all_datasets_and_parents(d):
for p in d.parents:
# Handle transformed datasets
if hasattr(p, 'dataset'):
pd = p.dataset
else:
pd = p
for alld in _all_datasets_and_parents(pd):
yield alld
yield d
for d in self._all_datasets_except_parents():
for alld in _all_datasets_and_parents(d):
yield alld
def _all_densities(self):
for ensemble in self.ensembles:
for density in ensemble.densities:
yield density
def _all_locations(self):
"""Iterate over all Locations in the system.
This includes all Locations referenced from other objects, plus
any referenced from the top-level system.
Duplicates may be present."""
def _all_ensemble_locations():
for ensemble in self.ensembles:
if ensemble.file:
yield ensemble.file
for ss in ensemble.subsamples:
if ss.file:
yield ss.file
return itertools.chain(
self.locations,
(dataset.location for dataset in self._all_datasets()
if hasattr(dataset, 'location') and dataset.location),
_all_ensemble_locations(),
(density.file for density in self._all_densities()
if density.file),
(sm.script_file for sm in self._all_starting_models()
if sm.script_file),
(template.alignment_file for template in self._all_templates()
if template.alignment_file),
(step.script_file for step in self._all_protocol_steps()
if step.script_file),
(step.script_file for step in self._all_analysis_steps()
if step.script_file))
def _all_geometric_objects(self):
"""Iterate over all GeometricObjects in the system.
This includes all GeometricObjects referenced from other objects,
plus any referenced from the top-level system.
Duplicates may be present."""
return itertools.chain(
self.orphan_geometric_objects,
(restraint.geometric_object
for restraint in self._all_restraints()
if hasattr(restraint, 'geometric_object')
and restraint.geometric_object))
def _all_features(self):
"""Iterate over all Features in the system.
This includes all Features referenced from other objects,
plus any referenced from the top-level system.
Duplicates may be present."""
def _all_restraint_features():
for r in self._all_restraints():
if hasattr(r, '_all_features'):
for feature in r._all_features:
if feature:
yield feature
return itertools.chain(self.orphan_features, _all_restraint_features())
def _all_pseudo_sites(self):
"""Iterate over all PseudoSites in the system.
This includes all PseudoSites referenced from other objects,
plus any referenced from the top-level system.
Duplicates may be present."""
def _all_restraint_sites():
for r in self._all_restraints():
if hasattr(r, 'cross_links'):
for xl in r.cross_links:
if xl.pseudo1:
for x in xl.pseudo1:
yield x.site
if xl.pseudo2:
for x in xl.pseudo2:
yield x.site
return itertools.chain(self.orphan_pseudo_sites,
_all_restraint_sites(),
(f.site for f in self._all_features()
if hasattr(f, 'site') and f.site))
def _all_software(self):
"""Iterate over all Software in the system.
This includes all Software referenced from other objects, plus
any referenced from the top-level system.
Duplicates may be present."""
return (itertools.chain(
self.software,
(sm.software for sm in self._all_starting_models()
if sm.software),
(step.software for step in self._all_protocol_steps()
if step.software),
(step.software for step in self._all_analysis_steps()
if step.software),
(r.software for r in self._all_restraints()
if hasattr(r, 'software') and r.software)))
def _all_citations(self):
"""Iterate over all Citations in the system.
This includes all Citations referenced from other objects, plus
any referenced from the top-level system.
Duplicates are filtered out."""
return _remove_identical(itertools.chain(
self.citations,
(software.citation for software in self._all_software()
if software.citation),
(restraint.fitting_method_citation_id
for restraint in self._all_restraints()
if hasattr(restraint, 'fitting_method_citation_id')
and restraint.fitting_method_citation_id)))
def _all_entity_ranges(self):
"""Iterate over all Entity ranges in the system (these may be
:class:`Entity`, :class:`AsymUnit`, :class:`EntityRange` or
:class:`AsymUnitRange` objects).
Note that we don't include self.entities or self.asym_units here,
as we only want ranges that were actually used.
Duplicates may be present."""
return (itertools.chain(
(sm.asym_unit for sm in self._all_starting_models()),
(seg.asym_unit for seg in self._all_segments()),
(comp for a in self._all_assemblies() for comp in a),
(comp for f in self._all_features()
for comp in f._all_entities_or_asyms()),
(d.asym_unit for d in self._all_densities())))
def _make_complete_assembly(self):
"""Fill in the complete assembly with all asym units"""
# Clear out any existing components
self.complete_assembly[:] = []
# Include all asym units
for asym in self.asym_units:
self.complete_assembly.append(asym)
class Software(object):
"""Software used as part of the modeling protocol.
:param str name: The name of the software.
:param str classification: The major function of the sofware, for
example 'model building', 'sample preparation',
'data collection'.
:param str description: A longer text description of the software.
:param str location: Place where the software can be found (e.g. URL).
:param str type: Type of software (program/package/library/other).
:param str version: The version used.
:param citation: Publication describing the software.
:type citation: :class:`Citation`
Generally these objects are added to :attr:`System.software` or
passed to :class:`ihm.startmodel.StartingModel`,
:class:`ihm.protocol.Step`,
:class:`ihm.analysis.Step`, or
:class:`ihm.restraint.PredictedContactResstraint` objects.
"""
def __init__(self, name, classification, description, location,
type='program', version=None, citation=None):
self.name = name
self.classification = classification
self.description = description
self.location = location
self.type = type
self.version = version
self.citation = citation
# Software compares equal if the names and versions are the same
def _eq_vals(self):
return (self.name, self.version)
def __eq__(self, other):
return self._eq_vals() == other._eq_vals()
def __hash__(self):
return hash(self._eq_vals())
class Grant(object):
"""Information on funding support for the modeling.
See :attr:`System.grants`.
:param str funding_organization: The name of the organization providing
the funding, e.g. "National Institutes of Health".
:param str country: The country that hosts the funding organization,
e.g. "United States".
:param str grant_number: Identifying information for the grant, e.g.
"1R01GM072999-01".
"""
def __init__(self, funding_organization, country, grant_number):
self.funding_organization = funding_organization
self.country = country
self.grant_number = grant_number
class Citation(object):
"""A publication that describes the modeling.
Generally citations are added to :attr:`System.citations` or
passed to :class:`ihm.Software` or
:class:`ihm.restraint.EM3DRestraint` objects.
:param str pmid: The PubMed ID.
:param str title: Full title of the publication.
:param str journal: Abbreviated journal name.
:param int volume: Journal volume number.
:param page_range: The page (int) or page range (as a 2-element
int tuple).
:param int year: Year of publication.
:param authors: All authors in order, as a list of strings (last name
followed by initials, e.g. "Smith AJ").
:param str doi: Digital Object Identifier of the publication.
"""
def __init__(self, pmid, title, journal, volume, page_range, year, authors,
doi):
self.title, self.journal, self.volume = title, journal, volume
self.page_range, self.year = page_range, year
self.pmid, self.doi = pmid, doi
self.authors = authors if authors is not None else []
@classmethod
def from_pubmed_id(cls, pubmed_id):
"""Create a Citation from just a PubMed ID.
This is done by querying NCBI's web API, so requires network access.
:param int pubmed_id: The PubMed identifier.
:return: A new Citation for the given identifier.
:rtype: :class:`Citation`
"""
def get_doi(ref):
for art_id in ref['articleids']:
if art_id['idtype'] == 'doi':
return enc(art_id['value'])
def get_page_range(ref):
rng = enc(ref['pages']).split('-')
if len(rng) == 2 and len(rng[1]) < len(rng[0]):
# map ranges like "2730-43" to 2730,2743 not 2730, 43
rng[1] = rng[0][:len(rng[0]) - len(rng[1])] + rng[1]
# Handle one page or empty page range
if len(rng) == 1:
rng = rng[0]
if rng == '':
rng = None
return rng
# JSON values are always Unicode, but on Python 2 we want non-Unicode
# strings, so convert to ASCII
if sys.version_info[0] < 3:
def enc(s):
return s.encode('ascii')
else:
def enc(s):
return s
url = ('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi'
'?db=pubmed&retmode=json&rettype=abstract&id=%s' % pubmed_id)
fh = urllib2.urlopen(url)
j = json.load(fh)
fh.close()
ref = j['result'][str(pubmed_id)]
authors = [enc(x['name']) for x in ref['authors']
if x['authtype'] == 'Author']
return cls(pmid=pubmed_id, title=enc(ref['title']),
journal=enc(ref['source']),
volume=enc(ref['volume']) or None,
page_range=get_page_range(ref),
year=enc(ref['pubdate']).split()[0],
authors=authors, doi=get_doi(ref))
class ChemComp(object):
"""A chemical component from which :class:`Entity` objects are constructed.
Usually these are amino acids (see :class:`LPeptideChemComp`) or
nucleic acids (see :class:`DNAChemComp` and :class:`RNAChemComp`).
For standard amino and nucleic acids, it is generally easier to use
a :class:`Alphabet` and refer to the components with their one-letter
(amino acids, RNA) or two-letter (DNA) codes.
:param str id: A globally unique identifier for this component (usually
three letters).
:param str code: A shorter identifier (usually one letter) that only
needs to be unique in the entity.
:param str code_canonical: Canonical version of `code` (which need not
be unique).
:param str name: A longer human-readable name for the component.
:param str formula: The chemical formula. This is a space-separated
list of the element symbols in the component, each followed
by an optional count (if omitted, 1 is assumed). The formula
is terminated with the formal charge (if not zero). The element
list should be sorted alphabetically, unless carbon is present,
in which case C and H precede the rest of the elements. For
example, water would be "H2 O" and arginine (with +1 formal
charge) "C6 H15 N4 O2 1".
For example, glycine would have
``id='GLY', code='G', code_canonical='G'`` while selenomethionine would
use ``id='MSE', code='MSE', code_canonical='M'``, guanosine (RNA)
``id='G', code='G', code_canonical='G'``, and deoxyguanosine (DNA)
``id='DG', code='DG', code_canonical='G'``.
"""
type = 'other'
_element_mass = {'H': 1.008, 'C': 12.011, 'N': 14.007, 'O': 15.999,
'P': 30.974, 'S': 32.060, 'Se': 78.971, 'Fe': 55.845}
def __init__(self, id, code, code_canonical, name=None, formula=None):
self.id = id
self.code, self.code_canonical, self.name = code, code_canonical, name
self.formula = formula
def __str__(self):
return ('<%s.%s(%s)>'
% (self.__class__.__module__, self.__class__.__name__,
self.id))
def __get_weight(self):
# Calculate weight from formula
if self.formula is None:
return
spl = self.formula.split()
# Remove formal charge if present
if len(spl) > 0 and spl[-1].isdigit():
del spl[-1]
r = re.compile(r'(\D+)(\d*)$')
weight = 0.
for s in spl:
m = r.match(s)
if m is None:
raise ValueError("Bad formula fragment: %s" % s)
emass = self._element_mass.get(m.group(1), None)
if emass:
weight += emass * (int(m.group(2)) if m.group(2) else 1)
else:
# If element is unknown, weight is unknown too
return None
return weight
formula_weight = property(
__get_weight,
doc="Formula weight (dalton). This is calculated automatically from "
"the chemical formula and known atomic masses.")
# Equal if all identifiers are the same
def __eq__(self, other):
return ((self.code, self.code_canonical, self.id, self.type) ==
(other.code, other.code_canonical, other.id, other.type))
def __hash__(self):
return hash((self.code, self.code_canonical, self.id, self.type))
class PeptideChemComp(ChemComp):
"""A single peptide component. Usually :class:`LPeptideChemComp` is used
instead (except for glycine) to specify chirality.
See :class:`ChemComp` for a description of the parameters."""
type = 'peptide linking'
class LPeptideChemComp(PeptideChemComp):
"""A single peptide component with (normal) L- chirality.
See :class:`ChemComp` for a description of the parameters."""
type = 'L-peptide linking'
class DPeptideChemComp(PeptideChemComp):
"""A single peptide component with (unusual) D- chirality.
See :class:`ChemComp` for a description of the parameters."""
type = 'D-peptide linking'
class DNAChemComp(ChemComp):
"""A single DNA component.
See :class:`ChemComp` for a description of the parameters."""
type = 'DNA linking'
class RNAChemComp(ChemComp):
"""A single RNA component.
See :class:`ChemComp` for a description of the parameters."""
type = 'RNA linking'
class NonPolymerChemComp(ChemComp):
"""A non-polymer chemical component, such as a ligand
(for crystal waters, use :class:`WaterChemComp`).
:param str id: A globally unique identifier for this component.
:param str name: A longer human-readable name for the component.
:param str formula: The chemical formula. See :class:`ChemComp` for
more details.
"""
type = "non-polymer"
def __init__(self, id, name=None, formula=None):
super(NonPolymerChemComp, self).__init__(id, id, id, name=name,
formula=formula)
class WaterChemComp(NonPolymerChemComp):
"""The chemical component for crystal water.
"""
def __init__(self):
super(WaterChemComp, self).__init__('HOH', name='WATER',
formula="H2 O")
class Alphabet(object):
"""A mapping from codes (usually one-letter, or two-letter for DNA) to
chemical components.
These classes can be used to construct sequences of components
when creating an :class:`Entity`. They can also be used like a Python
dict to get standard components, e.g.::
a = ihm.LPeptideAlphabet()
met = a['M']
gly = a['G']
See :class:`LPeptideAlphabet`, :class:`RNAAlphabet`,
:class:`DNAAlphabet`.
"""
def __getitem__(self, key):
return self._comps[key]
def __contains__(self, key):
return key in self._comps
keys = property(lambda self: self._comps.keys())
values = property(lambda self: self._comps.values())
items = property(lambda self: self._comps.items())
class LPeptideAlphabet(Alphabet):
"""A mapping from one-letter amino acid codes (e.g. H, M) to
L-amino acids (as :class:`LPeptideChemComp` objects, except for achiral
glycine which maps to :class:`PeptideChemComp`). Some other common
modified residues are also included (e.g. MSE). For these their full
name rather than a one-letter code is used.
"""
_comps = dict([code, LPeptideChemComp(id, code, code, name,
formula)]
for code, id, name, formula in [
('A', 'ALA', 'ALANINE', 'C3 H7 N O2'),
('C', 'CYS', 'CYSTEINE', 'C3 H7 N O2 S'),
('D', 'ASP', 'ASPARTIC ACID', 'C4 H7 N O4'),
('E', 'GLU', 'GLUTAMIC ACID', 'C5 H9 N O4'),
('F', 'PHE', 'PHENYLALANINE', 'C9 H11 N O2'),
('H', 'HIS', 'HISTIDINE', 'C6 H10 N3 O2 1'),
('I', 'ILE', 'ISOLEUCINE', 'C6 H13 N O2'),
('K', 'LYS', 'LYSINE', 'C6 H15 N2 O2 1'),
('L', 'LEU', 'LEUCINE', 'C6 H13 N O2'),
('M', 'MET', 'METHIONINE', 'C5 H11 N O2 S'),
('N', 'ASN', 'ASPARAGINE', 'C4 H8 N2 O3'),
('P', 'PRO', 'PROLINE', 'C5 H9 N O2'),
('Q', 'GLN', 'GLUTAMINE', 'C5 H10 N2 O3'),
('R', 'ARG', 'ARGININE', 'C6 H15 N4 O2 1'),
('S', 'SER', 'SERINE', 'C3 H7 N O3'),
('T', 'THR', 'THREONINE', 'C4 H9 N O3'),
('V', 'VAL', 'VALINE', 'C5 H11 N O2'),
('W', 'TRP', 'TRYPTOPHAN', 'C11 H12 N2 O2'),
('Y', 'TYR', 'TYROSINE', 'C9 H11 N O3')])
_comps['G'] = PeptideChemComp('GLY', 'G', 'G', name='GLYCINE',
formula="C2 H5 N O2")
# common non-standard L-amino acids
_comps.update([id, LPeptideChemComp(id, id, canon, name, formula)]
for id, canon, name, formula in [
('MSE', 'M', 'SELENOMETHIONINE', 'C5 H11 N O2 Se'),
('UNK', 'X', 'UNKNOWN', 'C4 H9 N O2')])
class DPeptideAlphabet(Alphabet):
"""A mapping from D-amino acid codes (e.g. DHI, MED) to
D-amino acids (as :class:`DPeptideChemComp` objects, except for achiral
glycine which maps to :class:`PeptideChemComp`). See
:class:`LPeptideAlphabet` for more details.
"""
_comps = dict([code, DPeptideChemComp(code, code, canon, name, formula)]
for canon, code, name, formula in [
('A', 'DAL', 'D-ALANINE', 'C3 H7 N O2'),
('C', 'DCY', 'D-CYSTEINE', 'C3 H7 N O2 S'),
('D', 'DAS', 'D-ASPARTIC ACID', 'C4 H7 N O4'),
('E', 'DGL', 'D-GLUTAMIC ACID', 'C5 H9 N O4'),
('F', 'DPN', 'D-PHENYLALANINE', 'C9 H11 N O2'),
('H', 'DHI', 'D-HISTIDINE', 'C6 H10 N3 O2 1'),
('I', 'DIL', 'D-ISOLEUCINE', 'C6 H13 N O2'),
('K', 'DLY', 'D-LYSINE', 'C6 H14 N2 O2'),
('L', 'DLE', 'D-LEUCINE', 'C6 H13 N O2'),
('M', 'MED', 'D-METHIONINE', 'C5 H11 N O2 S'),
('N', 'DSG', 'D-ASPARAGINE', 'C4 H8 N2 O3'),
('P', 'DPR', 'D-PROLINE', 'C5 H9 N O2'),
('Q', 'DGN', 'D-GLUTAMINE', 'C5 H10 N2 O3'),
('R', 'DAR', 'D-ARGININE', 'C6 H15 N4 O2 1'),
('S', 'DSN', 'D-SERINE', 'C3 H7 N O3'),
('T', 'DTH', 'D-THREONINE', 'C4 H9 N O3'),
('V', 'DVA', 'D-VALINE', 'C5 H11 N O2'),
('W', 'DTR', 'D-TRYPTOPHAN', 'C11 H12 N2 O2'),
('Y', 'DTY', 'D-TYROSINE', 'C9 H11 N O3')])
_comps['G'] = PeptideChemComp('GLY', 'G', 'G', name='GLYCINE',
formula="C2 H5 N O2")
class RNAAlphabet(Alphabet):
"""A mapping from one-letter nucleic acid codes (e.g. A) to
RNA (as :class:`RNAChemComp` objects)."""
_comps = dict([id, RNAChemComp(id, id, id, name, formula)]
for id, name, formula in [
('A', "ADENOSINE-5'-MONOPHOSPHATE", 'C10 H14 N5 O7 P'),
('C', "CYTIDINE-5'-MONOPHOSPHATE", 'C9 H14 N3 O8 P'),
('G', "GUANOSINE-5'-MONOPHOSPHATE", 'C10 H14 N5 O8 P'),
('U', "URIDINE-5'-MONOPHOSPHATE", 'C9 H13 N2 O9 P')])
class DNAAlphabet(Alphabet):
"""A mapping from two-letter nucleic acid codes (e.g. DA) to
DNA (as :class:`DNAChemComp` objects)."""
_comps = dict([code, DNAChemComp(code, code, canon, name, formula)]
for code, canon, name, formula in [
('DA', 'A', "2'-DEOXYADENOSINE-5'-MONOPHOSPHATE",
'C10 H14 N5 O6 P'),
('DC', 'C', "2'-DEOXYCYTIDINE-5'-MONOPHOSPHATE",
'C9 H14 N3 O7 P'),
('DG', 'G', "2'-DEOXYGUANOSINE-5'-MONOPHOSPHATE",
'C10 H14 N5 O7 P'),
('DT', 'T', "THYMIDINE-5'-MONOPHOSPHATE",
'C10 H15 N2 O8 P')])
class EntityRange(object):
"""Part of an entity. Usually these objects are created from
an :class:`Entity`, e.g. to get a range covering residues 4 through
7 in `entity` use::
entity = ihm.Entity(sequence=...)
rng = entity(4,7)
"""
def __init__(self, entity, seq_id_begin, seq_id_end):
if not entity.is_polymeric():
raise TypeError("Can only create ranges for polymeric entities")
self.entity = entity
# todo: check range for validity (at property read time)
self.seq_id_range = (seq_id_begin, seq_id_end)
def __eq__(self, other):
try:
return (self.entity is other.entity
and self.seq_id_range == other.seq_id_range)
except AttributeError:
return False
def __hash__(self):
return hash((id(self.entity), self.seq_id_range))
# Use same ID as the original entity
_id = property(lambda self: self.entity._id)
class Atom(object):
"""A single atom in an entity or asymmetric unit. Usually these objects
are created by calling :meth:`Residue.atom`.
Note that this class does not store atomic coordinates of a given
atom in a given model; for that, see :class:`ihm.model.Atom`.
"""
__slots__ = ['residue', 'id']
def __init__(self, residue, id):
self.residue, self.id = residue, id
entity = property(lambda self: self.residue.entity)
asym = property(lambda self: self.residue.asym)
seq_id = property(lambda self: self.residue.seq_id)
class Residue(object):
"""A single residue in an entity or asymmetric unit. Usually these objects
are created by calling :meth:`Entity.residue` or
:meth:`AsymUnit.residue`.
"""
__slots__ = ['entity', 'asym', 'seq_id', '_range_id']
def __init__(self, seq_id, entity=None, asym=None):
self.entity = entity
self.asym = asym
# todo: check id for validity (at property read time)
self.seq_id = seq_id
def atom(self, atom_id):
"""Get a :class:`Atom` in this residue with the given name."""
return Atom(residue=self, id=atom_id)
def _get_auth_seq_id(self):
return self.asym._get_auth_seq_id(self.seq_id)
auth_seq_id = property(_get_auth_seq_id,
doc="Author-provided seq_id; only makes sense "
"for asymmetric units")
# Allow passing residues where a range is requested
# (e.g. to ResidueFeature)
seq_id_range = property(lambda self: (self.seq_id, self.seq_id))
class Entity(object):
"""Represent a CIF entity (with a unique sequence)
:param sequence sequence: The primary sequence, as a sequence of
:class:`ChemComp` objects, and/or codes looked up in `alphabet`.
:param alphabet: The mapping from code to chemical components to use
(it is not necessary to instantiate this class).
:type alphabet: :class:`Alphabet`
:param str description: A short text name for the sequence.
:param str details: Longer text describing the sequence.
:param source: The method by which the sample for this entity was
produced.
:type source: :class:`ihm.source.Source`
:param references: Information about this entity stored in external
databases (for example the sequence in UniProt)
:type references: sequence of :class:`ihm.reference.Reference` objects
The sequence for an entity can be specified explicitly as a list of
chemical components, or (more usually) as a list or string of codes,
or a mixture of both.
For example::
# Construct with a string of one-letter amino acid codes
protein = ihm.Entity('AHMD')
# Some less common amino acids (e.g. MSE) have three-letter codes
protein_with_mse = ihm.Entity(['A', 'H', 'MSE', 'D'])
# Can use a non-default alphabet to make DNA or RNA sequences
dna = ihm.Entity(('DA', 'DC'), alphabet=ihm.DNAAlphabet)
rna = ihm.Entity('AC', alphabet=ihm.RNAAlphabet)
# Can pass explicit ChemComp objects by looking them up in Alphabets
dna_al = ihm.DNAAlphabet()
rna_al = ihm.RNAAlphabet()
dna_rna_hybrid = ihm.Entity((dna_al['DG'], rna_al['C']))
# For unusual components (e.g. modified residues or ligands),
# new ChemComp objects can be constructed
psu = ihm.RNAChemComp(id='PSU', code='PSU', code_canonical='U',
name="PSEUDOURIDINE-5'-MONOPHOSPHATE",
formula='C9 H13 N2 O9 P')
rna_with_psu = ihm.Entity(('A', 'C', psu), alphabet=ihm.RNAAlphabet)
For more examples, see the
`ligands and water example <https://github.com/ihmwg/python-ihm/blob/main/examples/ligands_water.py>`_.
All entities should be stored in the top-level System object;
see :attr:`System.entities`.
""" # noqa: E501
number_of_molecules = 1
def __get_type(self):
if self.is_polymeric():
return 'polymer'
else:
return 'water' if self.sequence[0].code == 'HOH' else 'non-polymer'
type = property(__get_type)
def __get_src_method(self):
if self.source:
return self.source.src_method
elif self.type == 'water':
return 'nat'
else:
return 'man'
def __set_src_method(self, val):
raise TypeError("src_method is read-only; assign an appropriate "
"subclass of ihm.source.Source to source instead")
src_method = property(__get_src_method, __set_src_method)
def __get_weight(self):
weight = 0.
for s in self.sequence:
w = s.formula_weight
# If any component's weight is unknown, the total is too
if w:
weight += w
else:
return None
return weight
formula_weight = property(
__get_weight,
doc="Formula weight (dalton). This is calculated automatically "
"from that of the chemical components.")
def __init__(self, sequence, alphabet=LPeptideAlphabet,
description=None, details=None, source=None, references=[]):
def get_chem_comp(s):
if isinstance(s, ChemComp):
return s
else:
return alphabet._comps[s]
self.sequence = tuple(get_chem_comp(s) for s in sequence)
self.description, self.details = description, details
self.source = source
self.references = []
self.references.extend(references)
def __str__(self):
return "<ihm.Entity(%s)>" % self.description
def is_polymeric(self):
"""Return True iff this entity represents a polymer, such as an
amino acid sequence or DNA/RNA chain (and not a ligand or water)"""
return len(self.sequence) != 1 or not isinstance(self.sequence[0],
NonPolymerChemComp)
def residue(self, seq_id):
"""Get a :class:`Residue` at the given sequence position"""
return Residue(entity=self, seq_id=seq_id)
# Entities are considered identical if they have the same sequence
def __eq__(self, other):
return self.sequence == other.sequence
def __hash__(self):
return hash(self.sequence)
def __call__(self, seq_id_begin, seq_id_end):
return EntityRange(self, seq_id_begin, seq_id_end)
def __get_seq_id_range(self):
if self.is_polymeric():
return (1, len(self.sequence))
else:
# Nonpolymers don't have the concept of seq_id
return (None, None)
seq_id_range = property(__get_seq_id_range, doc="Sequence range")
class AsymUnitRange(object):
"""Part of an asymmetric unit. Usually these objects are created from
an :class:`AsymUnit`, e.g. to get a range covering residues 4 through
7 in `asym` use::
asym = ihm.AsymUnit(entity)
rng = asym(4,7)
"""
def __init__(self, asym, seq_id_begin, seq_id_end):
if asym.entity is not None and not asym.entity.is_polymeric():
raise TypeError("Can only create ranges for polymeric entities")
self.asym = asym
# todo: check range for validity (at property read time)
self.seq_id_range = (seq_id_begin, seq_id_end)
def __eq__(self, other):
try:
return (self.asym is other.asym
and self.seq_id_range == other.seq_id_range)
except AttributeError:
return False
def __hash__(self):
return hash((id(self.asym), self.seq_id_range))
# Use same ID and entity as the original asym unit
_id = property(lambda self: self.asym._id)
_ordinal = property(lambda self: self.asym._ordinal)
entity = property(lambda self: self.asym.entity)
class AsymUnit(object):
"""An asymmetric unit, i.e. a unique instance of an Entity that
was modeled.
:param entity: The unique sequence of this asymmetric unit.
:type entity: :class:`Entity`
:param str details: Longer text description of this unit.
:param auth_seq_id_map: Mapping from internal 1-based consecutive
residue numbering (`seq_id`) to "author-provided" numbering
(`auth_seq_id`). This can be either be an int offset, in
which case ``auth_seq_id = seq_id + auth_seq_id_map``, or
a mapping type (dict, list, tuple) in which case
``auth_seq_id = auth_seq_id_map[seq_id]``. (Note that if a `list`
or `tuple` is used, the first element in the list or tuple does
**not** correspond to the first residue and will never be used -
since `seq_id` can never be zero.) The default if
not specified, or not in the mapping, is for
``auth_seq_id == seq_id``.
:param str id: User-specified ID (usually a string of one or more
upper-case letters, e.g. A, B, C, AA). If not specified,
IDs are automatically assigned alphabetically.
See :attr:`System.asym_units`.
"""
def __init__(self, entity, details=None, auth_seq_id_map=0, id=None):
self.entity, self.details = entity, details
self.auth_seq_id_map = auth_seq_id_map
self.id = id
def _get_auth_seq_id(self, seq_id):
if isinstance(self.auth_seq_id_map, int):
return seq_id + self.auth_seq_id_map
else:
try:
return self.auth_seq_id_map[seq_id]
except (KeyError, IndexError):
return seq_id
def __call__(self, seq_id_begin, seq_id_end):
return AsymUnitRange(self, seq_id_begin, seq_id_end)
def residue(self, seq_id):
"""Get a :class:`Residue` at the given sequence position"""
return Residue(asym=self, seq_id=seq_id)
seq_id_range = property(lambda self: self.entity.seq_id_range,
doc="Sequence range")
class Assembly(list):
"""A collection of parts of the system that were modeled or probed
together.
:param sequence elements: Initial set of parts of the system.
:param str name: Short text name of this assembly.
:param str description: Longer text that describes this assembly.
This is implemented as a simple list of asymmetric units (or parts of
them), i.e. a list of :class:`AsymUnit` and/or :class:`AsymUnitRange`
objects. An Assembly is typically assigned to one or more of
- :class:`~ihm.model.Model`
- :class:`ihm.protocol.Step`
- :class:`ihm.analysis.Step`
- :class:`~ihm.restraint.Restraint`
See also :attr:`System.complete_assembly`
and :attr:`System.orphan_assemblies`.
Note that any duplicate assemblies will be pruned on output."""
#: :class:`Assembly` that is the immediate parent in a hierarchy, or `None`
parent = None
def __init__(self, elements=(), name=None, description=None):
super(Assembly, self).__init__(elements)
self.name, self.description = name, description
class ChemDescriptor(object):
"""Description of a non-polymeric chemical component used in the experiment.
For example, this might be a fluorescent probe or cross-linking agent.
This class describes the chemical structure of the component, for
example with a SMILES or INCHI descriptor, so that it is uniquely
defined. A descriptor is typically assigned to a
:class:`ihm.restraint.CrossLinkRestraint`.
See :mod:`ihm.cross_linkers` for chemical descriptors of some
commonly-used cross-linking agents.
:param str auth_name: Author-provided name
:param str chem_comp_id: If this chemical is listed in the Chemical
Component Dictionary, its three-letter identifier
:param str chemical_name: The systematic (IUPAC) chemical name
:param str common_name: Common name for the component
:param str smiles: SMILES string
:param str smiles_canonical: Canonical SMILES string
:param str inchi: IUPAC INCHI descriptor
:param str inchi_key: Hashed INCHI key
See also :attr:`System.orphan_chem_descriptors`.
"""
def __init__(self, auth_name, chem_comp_id=None, chemical_name=None,
common_name=None, smiles=None, smiles_canonical=None,
inchi=None, inchi_key=None):
self.auth_name, self.chem_comp_id = auth_name, chem_comp_id
self.chemical_name, self.common_name = chemical_name, common_name
self.smiles, self.smiles_canonical = smiles, smiles_canonical
self.inchi, self.inchi_key = inchi, inchi_key
|
the-stack_0_5136 | import tkinter
import csv
f = open('class.csv')
csv_f = csv.reader(f)
myList = []
myList1 = []
myList2 = []
myList3 = []
for row in csv_f:
#print (row[2])
#myList.append(row[2])
myList.append(row)
myList1.append(row[0])
myList2.append(row[1])
myList3.append(row[2])
#print (myList)
class createGUIClass:
def __init__(self,master):
master.minsize(width=800, height=500)
master.maxsize(width=1000, height=700)
root = tkinter.Tk()
createGUI = createGUIClass(root)
for r in range(5):
tkinter.Label(root, text='%s'%(myList1[1]),
borderwidth=10 ).grid(row=r,column=1)
for c in range(len(myList)):
tkinter.Label(root, text='%s'%(myList1[r]),
borderwidth=10 ).grid(row=r,column=c)
root.mainloop( )
|
the-stack_0_5137 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 17:56:23 2019
@author: Khizar Anjum
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, MaxPooling1D, Dropout, Flatten, Add, Conv1D
from keras.models import Model
#%%
drp = [0.3,0.5,0.7,0.9];
files = [pd.read_csv('spline_log'+str(i)+'.csv',index_col=0) for i in drp]
#%%
[plt.plot(df.index.values,df['loss'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Training loss plot')
plt.show()
#%%
[plt.plot(df.index.values,df['val_loss'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Validation loss plot')
plt.show()
#%%
[plt.plot(df.index.values,df['sensitivity'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Sensitivity plot')
plt.show()
#%%
[plt.plot(df.index.values,df['val_sensitivity'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('validaton sensitivity plot')
plt.show()
#%%
prev_train = 0.2201
[plt.plot(df.index.values,np.array(df['sensitivity'].values)*prev_train + np.array(df['specificity'].values)*(1-prev_train)) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Training accuracy plot')
plt.show()
#%%
prev_val = 0.2136
[plt.plot(df.index.values,np.array(df['val_sensitivity'].values)*prev_val + np.array(df['val_specificity'].values)*(1-prev_val)) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Validation accuracy plot')
plt.show()
#%%
[plt.plot(df.index.values,df['specificity'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('Specificity plot')
plt.show()
#%%
[plt.plot(df.index.values,df['val_specificity'].values) for df in files]
plt.legend(['dropout = '+str(i) for i in drp])
plt.title('validaton specificity plot')
plt.show()
#%%
def spline_model(J = 2, Q = 128, T = 200):
inputs = Input(shape=(22050,1))
#
#
x = Conv1D(filters=int(J*Q),kernel_size=int(T),padding='valid',strides=10, activation='square_activation')(inputs)#,kernel_initializer=real_sp_initializer
#y = Conv1D(filters=int(J*Q),kernel_size=int(T),padding='valid',strides=10, activation='square_activation')(inputs)#,kernel_initializer=imag_sp_initializer
#xy = Add()([x,y])
#print(xy)
#c1 = Conv1D(24,128,activation='relu',strides=1,padding='valid')(xy)
#p1 = MaxPooling1D(pool_size=2,strides=1, padding='valid')(c1)
d1 = Dropout(drp)(x)
c2 = Conv1D(128,4,activation='relu',strides=10,padding='valid')(d1)
#p2 = MaxPooling1D(pool_size=100,strides=10, padding='valid')(c2)
d2 = Dropout(drp)(c2)
c3 = Conv1D(128,4,activation='relu',strides=10,padding='valid')(d2)
#print(c3)
p3 = MaxPooling1D(pool_size=10,strides=5, padding='valid')(c3)
#print(p3)
#d3 = Dropout(0.1)(p3)
#print(d3)
#c4 = Conv1D(4,16,activation='relu',strides=1,padding='valid')(d2)
f1 = Flatten()(p3)
#print(f1)
dn1 = Dense(128,activation='sigmoid')(f1)
d4 = Dropout(drp)(dn1)
dn2 = Dense(32,activation='sigmoid')(d4)
d5 = Dropout(drp)(dn2)
predictions = Dense(2,activation='softmax')(d5)
#training and evaluating the model
model = Model(inputs=inputs, outputs=predictions)
return model
#%% |
the-stack_0_5139 | # Artificial Intelligence
# Grado en Ingeniería Informática
# 2017-18
# play_tennis.py (Unit 3, slide 8)
attributes=[('Outlook',['Sunny','Overcast','Rainy']),
('Temperature',['High','Low','Mild']),
('Humidity',['High','Normal']),
('Wind',['Weak','Strong'])]
class_name='Play Tennis'
classes=['yes','no']
train=[['Sunny' , 'High' , 'High' , 'Weak' , 'no'],
['Sunny' , 'High' , 'High' , 'Strong', 'no'],
['Overcast','High' , 'High' , 'Weak' , 'yes'],
['Rainy' , 'Mild' , 'High' , 'Weak' , 'yes'],
['Rainy' , 'Low' , 'Normal' , 'Weak' , 'yes'],
['Rainy' , 'Low' , 'Normal' , 'Strong', 'no'],
['Overcast','Low' , 'Normal' , 'Strong', 'yes'],
['Sunny' , 'Mild' , 'High' , 'Weak' , 'no'],
['Sunny' , 'Low' , 'Normal' , 'Weak' , 'yes'],
['Rainy' , 'Mild' , 'Normal' , 'Weak' , 'yes'],
['Sunny' , 'Mild' , 'Normal' , 'Strong', 'yes'],
['Overcast','Mild' , 'High' , 'Strong', 'yes'],
['Overcast','High' , 'Normal' , 'Weak' , 'yes'],
['Rainy', 'Mild' , 'High' , 'Strong', 'no']]
|
the-stack_0_5141 | """
Command Line Interface of the checker
"""
import argparse
import sys
import termcolor
def colored(text, color):
"""Returns string with colored text depending on platform"""
colored_text = text
if 'win' not in sys.platform:
# termcolor works only on linux
colored_text = termcolor.colored(text, color)
return colored_text
def print_error(message):
"""Prints message red colored"""
print(colored(message, "red"))
def print_success(message):
"""Prints message green colored"""
print(colored(message, "green"))
def get_parsed_args():
"""Parses arguments from stdin"""
parser = argparse.ArgumentParser(description='Simple test runner')
parser.add_argument(
'-p',
metavar='program',
default='./main' if 'win' not in sys.platform else 'main',
help='path to the tested program')
parser.add_argument('-d',
metavar='directory',
default='tests',
help='path to directory containing tests')
parser.add_argument('-g',
metavar='groups',
nargs='+',
help="groups in given directory that should be tested")
parser.add_argument('-v',
metavar='verifier',
help="path to custom verifier")
parser.add_argument('-b',
metavar='break',
default='true',
choices=['true', 'false'],
help='break on failed tests [true/false]')
parser.add_argument('-t',
metavar='timeout',
default=None,
type=float,
help='time limit after which TLE is raised')
parser.add_argument(
'--timer',
help="run program will have a line 'Time: [float]' on stderr",
action='store_true')
parser.add_argument('--sha',
help='calculate sha-256 instead of veryfying',
action='store_true')
return parser.parse_args()
|
the-stack_0_5143 | from __future__ import unicode_literals
# For backwards-compatibility. keep this file.
# (Many people are going to have key bindings that rely on this file.)
from .app import *
__all__ = [
# Old names.
'HasArg',
'HasCompletions',
'HasFocus',
'HasSelection',
'HasValidationError',
'IsDone',
'IsReadOnly',
'IsMultiline',
'RendererHeightIsKnown',
'InEditingMode',
'InPasteMode',
'ViMode',
'ViNavigationMode',
'ViInsertMode',
'ViInsertMultipleMode',
'ViReplaceMode',
'ViSelectionMode',
'ViWaitingForTextObjectMode',
'ViDigraphMode',
'EmacsMode',
'EmacsInsertMode',
'EmacsSelectionMode',
'IsSearching',
'HasSearch',
'ControlIsSearchable',
]
# Keep the original classnames for backwards compatibility.
HasValidationError = lambda: has_validation_error
HasArg = lambda: has_arg
IsDone = lambda: is_done
RendererHeightIsKnown = lambda: renderer_height_is_known
ViNavigationMode = lambda: vi_navigation_mode
InPasteMode = lambda: in_paste_mode
EmacsMode = lambda: emacs_mode
EmacsInsertMode = lambda: emacs_insert_mode
ViMode = lambda: vi_mode
IsSearching = lambda: is_searching
HasSearch = lambda: is_searching
ControlIsSearchable = lambda: control_is_searchable
EmacsSelectionMode = lambda: emacs_selection_mode
ViDigraphMode = lambda: vi_digraph_mode
ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode
ViSelectionMode = lambda: vi_selection_mode
ViReplaceMode = lambda: vi_replace_mode
ViInsertMultipleMode = lambda: vi_insert_multiple_mode
ViInsertMode = lambda: vi_insert_mode
HasSelection = lambda: has_selection
HasCompletions = lambda: has_completions
IsReadOnly = lambda: is_read_only
IsMultiline = lambda: is_multiline
HasFocus = has_focus # No lambda here! (Has_focus is callable that returns a callable.)
InEditingMode = in_editing_mode
|
the-stack_0_5144 | try:
from . import generic as g
except BaseException:
import generic as g
class GLTFTest(g.unittest.TestCase):
def test_duck(self):
scene = g.get_mesh('Duck.glb', process=False)
# should have one mesh
assert len(scene.geometry) == 1
# get the mesh
geom = next(iter(scene.geometry.values()))
# should not be watertight
assert not geom.is_volume
# make sure export doesn't crash
export = scene.export(file_type='glb')
assert len(export) > 0
# check a roundtrip
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(scene, reloaded)
# if we merge ugly it should now be watertight
geom.merge_vertices(textured=False)
assert geom.is_volume
def test_tex_export(self):
# load textured PLY
mesh = g.get_mesh('fuze.ply')
assert hasattr(mesh.visual, 'uv')
# make sure export as GLB doesn't crash on scenes
export = mesh.scene().export(file_type='glb')
assert len(export) > 0
# make sure it works on meshes
export = mesh.export(file_type='glb')
assert len(export) > 0
def test_cesium(self):
"""
A GLTF with a multi- primitive mesh
"""
s = g.get_mesh('CesiumMilkTruck.glb')
# should be one Trimesh object per GLTF "primitive"
assert len(s.geometry) == 4
# every geometry displayed once, except wheels twice
assert len(s.graph.nodes_geometry) == 5
# make sure export doesn't crash
export = s.export(file_type='glb')
assert len(export) > 0
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(s, reloaded)
def test_units(self):
"""
Trimesh will store units as a GLTF extra if they
are defined so check that.
"""
original = g.get_mesh('pins.glb')
# export it as a a GLB file
export = original.export(file_type='glb')
kwargs = g.trimesh.exchange.gltf.load_glb(
g.trimesh.util.wrap_as_stream(export))
# roundtrip it
reloaded = g.trimesh.exchange.load.load_kwargs(kwargs)
# make basic assertions
g.scene_equal(original, reloaded)
# make assertions on original and reloaded
for scene in [original, reloaded]:
# units should be stored as an extra
assert scene.units == 'mm'
# make sure we have two unique geometries
assert len(scene.geometry) == 2
# that should have seven instances
assert len(scene.graph.nodes_geometry) == 7
# all meshes should be well constructed
assert all(m.is_volume for m in
scene.geometry.values())
# check unit conversions for fun
extents = scene.extents.copy()
as_in = scene.convert_units('in')
# should all be exactly mm -> in conversion factor
assert g.np.allclose(
extents / as_in.extents, 25.4, atol=.001)
m = g.get_mesh('testplate.glb')
assert m.units == 'meters'
def test_gltf(self):
# split a multibody mesh into a scene
scene = g.trimesh.scene.split_scene(
g.get_mesh('cycloidal.ply'))
# should be 117 geometries
assert len(scene.geometry) >= 117
# a dict with {file name: str}
export = scene.export(file_type='gltf')
# load from just resolver
r = g.trimesh.load(file_obj=None,
file_type='gltf',
resolver=export)
# will assert round trip is roughly equal
g.scene_equal(r, scene)
# try loading from a ZIP archive
zipped = g.trimesh.util.compress(export)
r = g.trimesh.load(
file_obj=g.trimesh.util.wrap_as_stream(zipped),
file_type='zip')
# try loading from a file name
# will require a file path resolver
with g.TemporaryDirectory() as d:
for file_name, data in export.items():
with open(g.os.path.join(d, file_name), 'wb') as f:
f.write(data)
# load from file path of header GLTF
rd = g.trimesh.load(
g.os.path.join(d, 'model.gltf'))
# will assert round trip is roughly equal
g.scene_equal(rd, scene)
def test_gltf_pole(self):
scene = g.get_mesh('simple_pole.glb')
# should have multiple primitives
assert len(scene.geometry) == 11
export = scene.export(file_type='glb')
assert len(export) > 0
# check a roundtrip
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(scene, reloaded)
def test_material_hash(self):
# load mesh twice independently
a = g.get_mesh('fuze.obj')
b = g.get_mesh('fuze.obj')
# move one of the meshes away from the other
a.apply_translation([a.scale, 0, 0])
# materials should not be the same object
assert id(a.visual.material) != id(b.visual.material)
# despite being loaded separately material hash should match
assert hash(a.visual.material) == hash(b.visual.material)
# create a scene with two meshes
scene = g.trimesh.Scene([a, b])
# get the exported GLTF header of a scene with both meshes
header = g.json.loads(scene.export(
file_type='gltf')['model.gltf'].decode('utf-8'))
# header should contain exactly one material
assert len(header['materials']) == 1
# both meshes should be contained in the export
assert len(header['meshes']) == 2
# get a reloaded version
reloaded = g.trimesh.load(
file_obj=g.trimesh.util.wrap_as_stream(
scene.export(file_type='glb')),
file_type='glb')
# meshes should have survived
assert len(reloaded.geometry) == 2
# get meshes back
ar, br = reloaded.geometry.values()
# should have been loaded as a PBR material
assert isinstance(ar.visual.material,
g.trimesh.visual.material.PBRMaterial)
# materials should have the same memory location
assert id(ar.visual.material) == id(br.visual.material)
# make sure hash is returning something
ahash = hash(ar.visual.material)
# should be returning valid material hashes
assert isinstance(ahash, int)
assert ahash != 0
def test_node_name(self):
"""
Test to see if node names generally survive
an export-import cycle.
"""
# a scene
s = g.get_mesh('cycloidal.3DXML')
# export as GLB then re-load
r = g.trimesh.load(
g.trimesh.util.wrap_as_stream(
s.export(file_type='glb')),
file_type='glb')
# make sure we have the same geometries before and after
assert set(s.geometry.keys()) == set(r.geometry.keys())
# make sure the node names are the same before and after
assert (set(s.graph.nodes_geometry) ==
set(r.graph.nodes_geometry))
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
the-stack_0_5146 | from sqlalchemy import func
from fence.errors import NotFound, UserError
from fence.models import (
Project,
StorageAccess,
CloudProvider,
ProjectToBucket,
Bucket,
User,
AccessPrivilege,
Group,
UserToGroup,
)
__all__ = [
"get_project",
"create_project_with_dict",
"create_project",
"create_bucket_on_project",
"get_project_info",
"get_all_projects",
"delete_project",
"delete_bucket_on_project",
"list_buckets_on_project",
"get_cloud_providers_from_project",
"get_buckets_by_project_cloud_provider",
"get_user_project_access_privilege",
]
def get_project(current_session, projectname):
return current_session.query(Project).filter_by(name=projectname).first()
def create_project_with_dict(current_session, project_data):
"""
Create a project given a dict of all needed info
Args:
project_data (dict): dict of project info
Return:
None
"""
auth_id = project_data["auth_id"]
name = project_data.get("name") or auth_id
storage_accesses = project_data.get("storage_accesses", [])
project = create_project(
current_session, name, auth_id, [sa["name"] for sa in storage_accesses]
)
for sa in storage_accesses:
for bucket in sa.get("buckets", []):
create_bucket_on_project(current_session, name, bucket, sa["name"])
return project
def create_project(current_session, name, auth_id, storage_accesses):
"""
Creates a project with an associated auth_id and storage access
"""
new_project = Project(name=name, auth_id=auth_id)
current_session.add(new_project)
current_session.flush()
for storage in storage_accesses:
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.name == storage)
.first()
)
if provider:
new_storage_access = StorageAccess(
provider_id=provider.id, project_id=new_project.id
)
current_session.add(new_storage_access)
else:
raise NotFound()
return new_project
def create_bucket_on_project(current_session, project_name, bucket_name, provider_name):
"""
Create a bucket and assign it to a project
"""
project = (
current_session.query(Project).filter(Project.name == project_name).first()
)
if not project:
msg = "".join(["Project ", project_name, " not found"])
raise NotFound(msg)
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.name == provider_name)
.first()
)
if not provider:
msg = "".join(["Provider ", provider_name, " not found"])
raise NotFound(msg)
bucket = (
current_session.query(Bucket)
.filter(Bucket.name == bucket_name, Bucket.provider_id == provider.id)
.first()
)
if not bucket:
bucket = Bucket(name=bucket_name, provider_id=provider.id)
current_session.add(bucket)
current_session.flush()
proj_to_bucket = ProjectToBucket(
project_id=project.id, bucket_id=bucket.id, privilege=["owner"]
)
current_session.add(proj_to_bucket)
# Find the users that need to be updated
users_in_project = current_session.query(AccessPrivilege).filter(
AccessPrivilege.project_id == project.id
)
users_to_update = []
for row in users_in_project:
usr = current_session.query(User).filter(User.id == row.user_id).first()
users_to_update.append((usr, row.privilege))
return {
"result": "success",
"provider": provider,
"bucket": bucket,
"users_to_update": users_to_update,
}
else:
raise UserError("Error, name already in use for that storage system")
def get_project_info(current_session, project_name):
"""
Get project info from userdatamodel
from its name
"""
proj = get_project(current_session, project_name)
if not proj:
msg = "".join(["Error: project ", project_name, " not found"])
raise NotFound(msg)
info = {
"id": proj.id,
"name": proj.name,
"auth_id": proj.auth_id,
"description": proj.description,
"associated buckets": [],
}
buckets = current_session.query(ProjectToBucket).filter(
ProjectToBucket.project_id == proj.id
)
for row in buckets:
bucket = (
current_session.query(Bucket).filter(Bucket.id == row.bucket_id).first()
)
info["associated buckets"].append(bucket.name)
return info
def get_all_projects(current_session):
projects = current_session.query(Project).all()
projects_info = [
get_project_info(current_session, project.name) for project in projects
]
return {"projects": projects_info}
def delete_project(current_session, project_name):
"""
Delete the project from the database
The project should have no buckets in use
"""
proj = current_session.query(Project).filter(Project.name == project_name).first()
if not proj:
return {"result": "error, project not found"}
buckets = (
current_session.query(ProjectToBucket)
.filter(ProjectToBucket.project_id == proj.id)
.first()
)
if buckets:
msg = (
"error, project still has buckets associated with it. Please"
" remove those first and then retry."
)
return {"result": msg}
storage_access = current_session.query(StorageAccess).filter(
StorageAccess.project_id == proj.id
)
"""
Find the users that only belong to this project
and store them to be removed
"""
accesses = current_session.query(AccessPrivilege).filter(
AccessPrivilege.project_id == proj.id
)
users_to_remove = []
for access in accesses:
num = (
current_session.query(func.count(AccessPrivilege.project_id))
.filter(AccessPrivilege.user_id == access.user_id)
.scalar()
)
if num == 1:
for storage in storage_access:
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.id == storage.provider_id)
.first()
)
usr = (
current_session.query(User)
.filter(User.id == access.user_id)
.first()
)
users_to_remove.append((provider, usr))
current_session.delete(usr)
current_session.delete(access)
for storage in storage_access:
current_session.delete(storage)
current_session.delete(proj)
return {"result": "success", "users_to_remove": users_to_remove}
def delete_bucket_on_project(current_session, project_name, bucket_name):
"""
Remove a bucket and its relationship to a project
"""
bucket = current_session.query(Bucket).filter_by(name=bucket_name).first()
if not bucket:
msg = "".join(["Bucket name ", bucket_name, " not found"])
raise NotFound(msg)
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.id == bucket.provider_id)
.first()
)
project = (
current_session.query(Project).filter(Project.name == project_name).first()
)
if not project:
msg = "".join(["Project name ", project_name, " not found"])
raise NotFound(msg)
proj_to_bucket = (
current_session.query(ProjectToBucket)
.filter(
ProjectToBucket.bucket_id == bucket.id,
ProjectToBucket.project_id == project.id,
)
.first()
)
if proj_to_bucket:
current_session.delete(proj_to_bucket)
current_session.delete(bucket)
return {"result": "success", "provider": provider}
else:
current_session.delete(bucket)
msg = (
"WARNING: Project-to-bucket "
"relationship not found, deleting bucket anyway"
)
return {"result": msg, "provider": provider}
def list_buckets_on_project(current_session, project_name):
"""
List all the buckets assigned to a project
"""
project = (
current_session.query(Project).filter(Project.name == project_name).first()
)
if not project:
msg = "".join(["Project name ", project_name, " not found"])
raise NotFound(msg)
buckets = current_session.query(ProjectToBucket).filter(
ProjectToBucket.project_id == project.id
)
response = {"buckets": []}
for bucket in buckets:
buck = (
current_session.query(Bucket).filter(Bucket.id == bucket.bucket_id).first()
)
provider = (
current_session.query(CloudProvider)
.filter(CloudProvider.id == buck.provider_id)
.first()
)
new_buck = {"name": buck.name, "provider": provider.name}
response["buckets"].append(new_buck)
return response
def get_cloud_providers_from_project(current_session, project_id):
"""
Retrieve cloud provider to be used in other operations that require the
backend.
"""
accesses = current_session.query(StorageAccess).filter(
StorageAccess.project_id == project_id
)
cloud_providers = []
for access in accesses:
cloud_providers.append(
current_session.query(CloudProvider)
.filter(CloudProvider.id == access.provider_id)
.first()
)
return cloud_providers
def get_buckets_by_project_cloud_provider(current_session, prjct_id, provider_id):
"""
List all the buckets assigned to a project
"""
buckets = current_session.query(ProjectToBucket).filter_by(project_id=prjct_id)
response = {"buckets": []}
for bucket in buckets:
buck = (
current_session.query(Bucket)
.filter(Bucket.id == bucket.bucket_id, Bucket.provider_id == provider_id)
.first()
)
if buck:
response["buckets"].append(buck)
return response
def get_user_project_access_privilege(current_session, user, project):
return (
current_session.query(AccessPrivilege)
.filter_by(project_id=project.id, user_id=user.id)
.first()
)
|
the-stack_0_5147 | import tensorflow as tf
import numpy as np
def linear(input_, output_size, stddev=0.02, bias_start=0.0, activation_fn=None, name='linear'):
"""
Fully connected linear layer
:param input_:
:param output_size:
:param stddev:
:param bias_start:
:param activation_fn:
:param name:
:return:
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
w = tf.get_variable('Matrix', [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
b = tf.get_variable('bias', [output_size],
initializer=tf.constant_initializer(bias_start))
out = tf.nn.bias_add(tf.matmul(input_, w), b)
if activation_fn is not None:
return activation_fn(out), w, b
else:
return out, w, b
def simple_linear(input_, initializer=tf.constant_initializer([1.]), bias_start=0.0,
activation_fn=None, name='simple_linear'):
"""
simple element-wise linear layer
:param input_:
:param initializer
:param bias_start
:param activation_fn:
:param name:
:return:
"""
with tf.variable_scope(name):
w = tf.get_variable('Matrix', input_.get_shape(), tf.float32,
initializer)
b = tf.get_variable('bias', [input_.get_shape()[1]],
initializer=tf.constant_initializer(bias_start))
out = tf.nn.bias_add(tf.mul(input_, w), b)
if activation_fn is not None:
return activation_fn(out), w, b
else:
return out, w, b
def select_action_tf(belief, vector_set):
"""
Compute optimal action given a belief distribution
:param belief: dim(belief) == dim(AlphaVector)
:param vector_set
:return: optimal action, V(b)
"""
assert not len(vector_set) == 0
max_v = tf.constant([-np.inf], tf.float32)
best_action = tf.constant([-1])
for av in vector_set:
with tf.name_scope('V_b'):
v = tf.reduce_sum(tf.mul(av.v, belief))
best_action = tf.cond(tf.greater(v, max_v)[0], lambda: tf.constant([av.action]),
lambda: best_action)
max_v = tf.maximum(v, max_v)
return best_action, max_v
def clipped_error(x):
# Huber loss
try:
return tf.select(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
except:
return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
|
the-stack_0_5149 | # model settings
model = dict(
type='SimSiam',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3,), # no conv-1, x-1: stage-x
norm_cfg=dict(type='SyncBN'),
style='pytorch'),
neck=dict(
type='NonLinearNeck',
in_channels=2048, hid_channels=2048, out_channels=2048,
num_layers=3,
with_bias=True, with_last_bn=False, with_last_bn_affine=False,
with_avg_pool=True),
head=dict(
type='LatentPredictHead',
predictor=dict(
type='NonLinearNeck',
in_channels=2048, hid_channels=512, out_channels=2048,
num_layers=2,
with_avg_pool=False,
with_bias=True, with_last_bn=False, with_last_bias=True))
)
|
the-stack_0_5150 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import uda_acl_ext
class extended(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-uda-access-list - based on the path /uda/access-list/extended. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name','__uda_acl_ext',)
_yang_name = 'extended'
_rest_name = 'extended'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)', u'cli-full-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-acl-name', is_config=True)
self.__uda_acl_ext = YANGDynClass(base=uda_acl_ext.uda_acl_ext, is_container='container', presence=False, yang_name="uda-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'uda', u'access-list', u'extended']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'uda', u'access-list', u'extended']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /uda/access_list/extended/name (uda-acl-name)
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /uda/access_list/extended/name (uda-acl-name)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)', u'cli-full-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-acl-name', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with uda-acl-name""",
'defined-type': "brocade-uda-access-list:uda-acl-name",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)', u'cli-full-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-acl-name', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)', u'cli-full-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-acl-name', is_config=True)
def _get_uda_acl_ext(self):
"""
Getter method for uda_acl_ext, mapped from YANG variable /uda/access_list/extended/uda_acl_ext (container)
"""
return self.__uda_acl_ext
def _set_uda_acl_ext(self, v, load=False):
"""
Setter method for uda_acl_ext, mapped from YANG variable /uda/access_list/extended/uda_acl_ext (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_uda_acl_ext is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_uda_acl_ext() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=uda_acl_ext.uda_acl_ext, is_container='container', presence=False, yang_name="uda-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """uda_acl_ext must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=uda_acl_ext.uda_acl_ext, is_container='container', presence=False, yang_name="uda-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='container', is_config=True)""",
})
self.__uda_acl_ext = t
if hasattr(self, '_set'):
self._set()
def _unset_uda_acl_ext(self):
self.__uda_acl_ext = YANGDynClass(base=uda_acl_ext.uda_acl_ext, is_container='container', presence=False, yang_name="uda-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='container', is_config=True)
name = __builtin__.property(_get_name, _set_name)
uda_acl_ext = __builtin__.property(_get_uda_acl_ext, _set_uda_acl_ext)
_pyangbind_elements = {'name': name, 'uda_acl_ext': uda_acl_ext, }
|
the-stack_0_5151 | import numpy as np
import os
import sys
import math
from datetime import datetime
from importlib import reload
from pprint import pprint
from platform import python_version
print(python_version())
sys.path.append(os.getcwd())
import NDN3.NDNutils as NDNutils
import NDN3.NDN as NDN
import utils.data as udata
import utils.network as unet
import utils.analysis as uas
import utils.analysis_present as uasp
import fire
def runner(exp_folder, exp, run, hidden, c_size, c_filters):
run_1(exp_folder, exp, run, hidden, c_size, c_filters)
#
# based on bl3:
# - convolution instead of DoG
def run_1(exp_folder, exp, run, hidden, c_size, c_filters):
name = f'baseline3_C{c_filters}s{c_size}xN{hidden}x5000'
exp = f"{exp}1x{run}"
def get_hsm_params_custom(input, output, i):
_, output_shape = output.shape
_, input_shape = input.shape
pprint(f"in: {input_shape} out: {output_shape}")
intput_w, input_h = int(math.sqrt(input_shape)), int(math.sqrt(input_shape))
hsm_params = NDNutils.ffnetwork_params(
verbose=False,
input_dims=[1, intput_w, input_h],
layer_sizes=[c_filters, int(hidden*output_shape), output_shape], # paper: 9, 0.2*output_shape
ei_layers=[None, None, None],
normalization=[0, 0, 0],
layer_types=['conv','normal','normal'],
act_funcs=['lin', 'softplus','softplus'],
shift_spacing=[(c_size+1)//2, 2, 0],
conv_filter_widths=[c_size, 0, 0],
reg_list={})
hsm_params['weights_initializers']=['normal','normal','normal']
hsm_params['biases_initializers']=['trunc_normal','trunc_normal','trunc_normal']
return hsm_params
def get_training_params():
epochs = 5000
return {'batch_size': 16, 'use_gpu': False, 'epochs_summary': epochs//50, 'epochs_training': epochs, 'learning_rate': 0.001}
input_tr_processed, output_tr, output_tr_mask = udata.load_data_multiple(
[1], 'training', udata.normalize_mean_std)
input_val_processed, output_val, output_val_mask = udata.load_data_multiple(
[1], 'validation', udata.normalize_mean_std)
for i in range(10):
seed = i
hsm_params = get_hsm_params_custom(input_tr_processed, output_tr, i)
pprint(hsm_params)
hsm, input_tuple = unet.get_network(
input_tr_processed, output_tr,
'adam',
get_training_params(),
hsm_params,
'poisson',
input_val_processed, output_val,
output_tr_mask, output_val_mask,
f"{name}__{i}", seed,
)
hsm.log_correlation = 'zero-NaNs'
(input, output, train_indxs, test_indxs, data_filters, larg, opt_params, name_str) = input_tuple
hsm.train(
input_data=input,
output_data=output,
train_indxs=train_indxs,
test_indxs=test_indxs,
data_filters=data_filters,
learning_alg=larg,
opt_params=opt_params,
output_dir=f"training_data/logs/{exp_folder}/{exp}/{name_str}"
)
res, naeval, corr = uasp.evaluate_all(hsm, input_val_processed, output_val, output_val_mask)
hsm.save_model(f"./training_data/models/{exp_folder}/{exp}/{name}__{i}.ndnmod")
with open("./training_data/experiments.txt", "a+") as f:
f.write(f"{exp_folder}/{exp}/{name}\n")
if __name__ == "__main__":
fire.Fire(runner)
|
the-stack_0_5152 | from setuptools import setup, find_packages
from PublisherAzureTestsResults.version import VERSION
classifiers = [
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
]
setup(
name='robotframework-publisher-results-azure',
url='https://github.com/ismailktami/robotframework-publisher-results-azure',
version=VERSION,
description='Library to publish robot framework automation results on azure',
author='Ismail Ktami',
author_email='[email protected]',
license='MIT',
classifiers=classifiers,
keywords='robotframework azure devops testplans results outcomes',
packages=find_packages(),
install_requires=[
'robotframework>=3.2.2',
'requests',
'utils'
]
) |
the-stack_0_5153 | # /index.py
from flask import Flask, request, jsonify, render_template, redirect
import os
import dialogflow_v2 as dialogflow
import requests
import json
import pusher
from werkzeug.utils import secure_filename
from trim import song
from therapy import find
from sendemail import sendmail
from video_emotion import output
import cv2
import imutils
import cv2
from tensorflow import keras
import numpy as np
import time
app = Flask(__name__)
lol = 0
@app.route('/chatbot_page')
def chatbot_page():
return render_template('index.html')
@app.route('/tictac')
def tictac():
return render_template("tictac.html")
@app.route("/webcam")
def webcam():
return render_template('webcam.html')
@app.route("/extras")
def extras():
return render_template("extra.html")
@app.route('/predict2', methods=['GET'])
def predict2():
global lol
file_path = "D:\Downloads\screenshot.jpg"
fin, mood = output(file_path)
os.remove(file_path)
# cv2.imshow("image", fin)
# cv2.waitKey(0)
new_path = "D:\Projects\djhack\static\saves2\zinished{}.jpg".format(
str(lol))
cv2.imwrite(new_path, fin)
lol = lol+1
time.sleep(1)
return render_template("something.html", image_name="static\saves2\zinished" + str(lol-1) + ".jpg")
def intensity(level):
if level == 'low':
return 30
if level == 'medium':
return 20
if level == 'high':
return 10
def score_inc(num):
score = score + num
return score
@app.route('/webhook', methods=['POST'])
def webhook():
flag = 0
data = request.get_json(silent=True)
score = 0
if data['queryResult']['intent']['displayName'] == 'feel_happy':
reply = {
'fulfillmentText': 'happy works!',
}
return jsonify(reply)
if data['queryResult']['intent']['displayName'] == 'show_song':
rec_song = song()
my_string = "{} by {}"
my_string = my_string.format(
rec_song['song'][0], rec_song['artist'][0])
reply = {
'fulfillmentText': "According to your mood: " + my_string,
}
return jsonify(reply)
if data['queryResult']['intent']['displayName'] == 'doctor_rec':
city = data['queryResult']['parameters']['geo-city']
doctors = find(city)
fin = ""
for i in range(2):
my_string = "Doctor {}: \nName: {} Role: {} Contact: {}\n"
my_string = my_string.format(
i+1, doctors[i]['Name'], doctors[i]['Role'], doctors[i]['Contact'], )
fin += my_string
reply = {
'fulfillmentText': "Following are the doctor recommendations:\n" + fin
}
return jsonify(reply)
if data['queryResult']['intent']['displayName'] == 'Email':
sendmail()
reply = {
"fulfillmentText": "Email is on its way!"
}
# if data['queryResult']['intent']['displayName'] in ['feel_sad - yes - custom', 'feel_sad - yes - custom - custom', 'feel_sad - yes - custom - custom - custom']:
# level = data['queryResult']['parameters']
# score_inc(intensity(level))
# if data['queryResult']['intent']['displayName'] == 'feel_sad - yes - custom - custom - custom':
# stg = "Your concern level is {} out of 90."
# stg = stg.format(score)
# if score >= 30 and score < 50:
# reply = {
# 'fulfillmentText': stg + "You will be fine! Try playing our mini-games!"
# }
# elif score >= 50 and score < 70:
# reply = {
# 'fulfillmentText': stg + "Ask for song recommendations here. Take care, you'll get over it!"
# }
# elif score >= 70 and score <= 90:
# reply = {
# 'fulfillmentText': stg + "Please consider getting professional help. We can provide you with recommendations!"
# }
def detect_intent_texts(project_id, session_id, text, language_code):
session_client = dialogflow.SessionsClient()
session = session_client.session_path(project_id, session_id)
if text:
text_input = dialogflow.types.TextInput(
text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(
session=session, query_input=query_input)
return response.query_result.fulfillment_text
@app.route('/send_message', methods=['POST'])
def send_message():
message = request.form['message']
project_id = os.getenv('DIALOGFLOW_PROJECT_ID')
fulfillment_text = detect_intent_texts(project_id, "unique", message, 'en')
response_text = {"message": fulfillment_text}
return jsonify(response_text)
@app.route('/snake')
def snake():
print("calls snake!")
return render_template('snake.html')
@app.route('/')
def home():
# landing page
return render_template('home.html')
@app.route('/services')
def services():
return render_template('services.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/doctor')
def doctor():
return render_template('doctor.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
# run Flask app
if __name__ == "__main__":
app.run()
|
the-stack_0_5154 | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Range
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import sys
from . import element
class Range(element.Element):
""" Set of values bounded by low and high.
A set of ordered Quantities defined by a low and high limit.
"""
resource_type = "Range"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.high = None
""" High limit.
Type `Quantity` (represented as `dict` in JSON). """
self.low = None
""" Low limit.
Type `Quantity` (represented as `dict` in JSON). """
super(Range, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Range, self).elementProperties()
js.extend(
[
("high", "high", quantity.Quantity, "Quantity", False, None, False),
("low", "low", quantity.Quantity, "Quantity", False, None, False),
]
)
return js
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + ".quantity"]
|
the-stack_0_5155 | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("localhost", 1028))
s.listen(1)
while True:
client, address = s.accept()
data = client.recv(1024)
client.send(data)
client.close()
|
the-stack_0_5156 | import time
from cnn_model import *
from audio_data import CNNDataset
from cnn_training import *
import argparse
import torch
import torch.nn as nn
import hdf5storage
import os
def main(config):
dtype = torch.FloatTensor
ltype = torch.LongTensor
use_cuda = torch.cuda.is_available()
if use_cuda:
print('Using CUDA.')
dtype = torch.cuda.FloatTensor
ltype = torch.cuda.LongTensor
torch.manual_seed(0)
optimizer = None
if config.stepName!='features':
if config.method == 'dnn':
model = CNNModel(kernel_size=config.kernel_size, nb_channels=config.nb_channels, nb_layers=config.nb_layers, dilation=config.dilation)
elif config.method == 'autoDense':
model = AutoDense()
elif config.method == 'autoStride':
model = AutoStride()
if use_cuda:
model = nn.DataParallel(model).cuda()
#model.cuda()
optimizer = optim.Adam(params=model.parameters(), lr=config.lr, weight_decay=0.0)
if hasattr(config.data, 'modelPath'):
modelPath = np.array2string(np.squeeze(config.data.modelPath))[1:-1]
print(modelPath)
checkpoint = torch.load(modelPath)
# print(checkpoint['model_state_dict'])
model.load_state_dict(checkpoint['model_state_dict'])
# print(checkpoint['optimizer_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# model = load_model_from(modelPath, use_cuda=True)
#model = torch.load('snapshots/some_model')
print('Model: ', model)
print('Parameter count: ', model.module.parameter_count())
if config.dataset=='librispeech':
inputLocation = np.array2string(np.squeeze(config.eC.inputPath))[1:-1]+'speech/LibriSpeech/'
dataset_name = 'dev-clean'
dataset_name_eval = 'test-clean'
elif config.dataset=='gtzan':
inputLocation = np.array2string(np.squeeze(config.eC.inputPath))[1:-1]+'music/gtzan/'
dataset_name = 'dev'
dataset_name_eval = 'test'
elif config.dataset=='medleysolos':
inputLocation = np.array2string(np.squeeze(config.eC.inputPath))[1:-1]+'music/medleysolos/'
dataset_name = 'dev'
dataset_name_eval = 'test'
print(inputLocation)
if config.stepName=='features':
dataLocation = np.array2string(np.squeeze(config.eC.dataPath))[1:-1]
dataLocation += 'features/'
dataLocation += np.array2string(np.squeeze(config.eS.infoHash))[1:-1]
dataLocationTrain = dataLocation+'_train'
dataLocationTest = dataLocation+'_test'
else:
dataLocationTrain = np.array2string(np.squeeze(config.data.trainPath))[1:-1]
dataLocationTest = np.array2string(np.squeeze(config.data.testPath))[1:-1]
data = CNNDataset(dataset_file=dataLocationTrain,
file_location=inputLocation+dataset_name,
sampling_rate=config.sampling_rate,
block_size = config.block_size,
frame_size = config.frame_size,
normalize=True, compute=config.stepName=='features', squeeze=config.squeeze)
data_eval = CNNDataset(dataset_file=dataLocationTest,
file_location=inputLocation+dataset_name_eval,
sampling_rate=config.sampling_rate,
block_size = config.block_size,
frame_size = config.frame_size,
normalize=True, compute=config.stepName=='features', squeeze=config.squeeze)
print('Dataset smat = hdf5storage.loadmatize: ', len(data))
if config.stepName!='features':
trainer = CNNTrainer(model=model,
method=config.method,
lr=config.lr,
log_plus = config.log_plus,
weight_decay=0.0,
optimizer=optimizer,
snapshot_path=config.expLanes[0:-4],
snapshot_interval=config.snapshot_interval,
dtype=dtype,
spectrum_normalization = config.spectrum_normalization)
if config.stepName=='train':
print('----- Training -----')
store, obs = trainer.train(dataset=data,
dataset_validation=data_eval,
batch_size=config.batch_size,
epochs=config.epochs,
target=config.target,
q = config.q)
if config.stepName=='test':
print('----- Evaluation -----')
store, obs = trainer.test(dataset=data_eval, batch_size=config.block_size, save=True)
if config.expLanes :
if config.stepName=='features':
store = {}
obs = {}
store['trainPath'] = data.dataset_file
store['testPath'] = data_eval.dataset_file
store['trainFiles'] = data.get_files()
store['testFiles'] = data_eval.get_files()
obs['nbBlocksTrain'] = len(data)
obs['nbBlocksTest'] = len(data_eval)
if config.stepName=='train':
print('train')
if config.stepName=='test':
print('test')
if os.path.exists(config.expLanes[0:-8]+'_data.mat'):
os.remove(config.expLanes[0:-8]+'_data.mat')
hdf5storage.savemat(config.expLanes[0:-8]+'_data', store)
if os.path.exists(config.expLanes[0:-8]+'_obs.mat'):
os.remove(config.expLanes[0:-8]+'_obs.mat')
hdf5storage.savemat(config.expLanes[0:-8]+'_obs', obs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Datah
parser.add_argument('--expLanes', type=str, default='')
parser.add_argument('--dataset', type=str, default='dev-clean')
parser.add_argument('--dataset_eval', type=str, default='test-clean')
parser.add_argument('-load_mdl', action='store_true')
# Logging
parser.add_argument('--snapshot_interval', type=int, default=1000)
parser.add_argument('--validation_interval', type=int, default=2000000)
# Training
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--epochs', type=int, default=10)
# Experience factors if config.dataset_eval is not None:
parser.add_argument('--target', type=str, default='spec', help='spec, wspec, cqt')
parser.add_argument('--q', type=int, default=27)
config = parser.parse_args()
if config.expLanes :
print('loading expLanes config')
ec = hdf5storage.loadmat(config.expLanes)
print('done')
eSetting = ec['data']['info']['setting']
eConfig = ec['data']['info']
# print(ec)
config.eC = eConfig.view(np.recarray)
eS = eSetting.view(np.recarray)
config.stepName = np.squeeze(config.eC.stepName)
config.eS = eS
config.data = ec['data'].view(np.recarray)
# config.batch_size = int(np.nan_to_num(np.squeeze(eSetting['batchSize'])))
# config.block_size = int(np.nan_to_num(np.squeeze(eSetting['blockSize'])))
config.batch_size = 150
config.block_size = 150
config.squeeze = eSetting['squeeze']
config.dataset = np.array2string(np.squeeze(eSetting['dataset']))[1:-1]
config.method = np.array2string(np.squeeze(eSetting['method']))[1:-1]
if config.stepName=='features':
config.frame_size = int(np.nan_to_num(np.squeeze(ec['data']['frameSize'])))
config.sampling_rate = int(np.nan_to_num(np.squeeze(ec['data']['samplingFrequency'])))
else :
config.kernel_size = int(np.nan_to_num(np.squeeze(eSetting['kernelSize'])))
config.lr = float(np.squeeze(eSetting['learningRate']))
config.epochs = int(np.nan_to_num(np.squeeze(eSetting['epochs'])))
config.nb_channels = int(np.nan_to_num(np.squeeze(eSetting['nbChannels'])))
config.nb_layers = int(np.nan_to_num(np.squeeze(eSetting['nbLayers'])))
config.dilation = int(np.nan_to_num(np.squeeze(eSetting['dilation'])))
config.log_plus = int(np.nan_to_num(np.squeeze(eSetting['logPlus'])))
config.spectrum_normalization = int(np.nan_to_num(np.squeeze(eSetting['spectrumNormalization'])))
config.sampling_rate = 1
config.frame_size = 1
#print(config.epochs)
main(config)
|
the-stack_0_5157 | import zmq
PORT = 9123
def main():
"""Main.
"""
context = zmq.Context()
socket = context.socket(zmq.SUB)
print('Connecting port %s' % PORT)
socket.setsockopt(zmq.SUBSCRIBE, b'')
socket.connect("tcp://localhost:%s" % PORT)
print('Connected port %s' % PORT)
while True:
message = socket.recv()
print("Message received: %s" % message)
if __name__ == '__main__':
main()
|
the-stack_0_5159 | """
Run script for 2d example with two fractures. Dynamics driven by Dirichlet
values at the fracture endpoints, which are different from the matrix BC values.
Flow and cooling from left to right, leftmost fracture grows.
-----------------------
| |
| |
| |
| |
|---- ----|
| |
| |
| |
-----------------------
"""
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import porepy as pp
from porepy.models.thm_model import THM
from fracture_propagation_model import THMPropagationModel
from utils import read_pickle, write_pickle
logger = logging.getLogger(__name__)
class Example2Model(THMPropagationModel, THM):
"""
This class provides the parameter specification of the example, including grid/geometry,
BCs, rock and fluid parameters and time parameters. Also provides some common modelling
functions, such as the aperture computation from the displacement jumps, and data storage
and export functions.
"""
def _fractures(self):
self.fracs = [
np.array([[0.0, 0.5], [0.25, 0.5]]).T,
np.array([[0.75, 0.5], [1, 0.5]]).T,
]
def _depth(self, coords):
return np.zeros(coords.shape[1])
def _bc_type_mechanics(self, g) -> pp.BoundaryConditionVectorial:
"""
Dirichlet values at top and bottom.
"""
all_bf, east, west, north, south, _, _ = self._domain_boundary_sides(g)
dir_faces = south + north + g.tags["fracture_faces"]
bc = pp.BoundaryConditionVectorial(g, dir_faces, "dir")
return bc
def _bc_values_mechanics(self, g) -> np.ndarray:
"""Dirichlet displacement on the top, fixed on bottom and 0 Neumann
on left and right.
"""
# Retrieve the boundaries where values are assigned
bc_values = np.zeros((g.dim, g.num_faces))
return bc_values.ravel("F")
def _p_and_T_dir_faces(self, g):
"""
We prescribe Dirichlet value at the fractures.
No-flow for the matrix.
"""
if g.dim == self._Nd:
return np.empty(0, dtype=int)
else:
all_bf, east, west, north, south, _, _ = self._domain_boundary_sides(g)
return (east + west).nonzero()[0]
def _bc_values_scalar(self, g) -> np.ndarray:
"""
See bc_type_scalar
"""
# Retrieve the boundaries where values are assigned
dir_faces = self._p_and_T_dir_faces(g)
bc_values = np.zeros(g.num_faces)
bc_values[dir_faces] = (
5e4 / self.scalar_scale * (1 - g.face_centers[0, dir_faces])
)
return bc_values
def _bc_values_temperature(self, g) -> np.ndarray:
"""Cooling on the left from the onset of phase III."""
bc_values = np.zeros(g.num_faces)
dir_faces = self._p_and_T_dir_faces(g)
bc_values[dir_faces] = self.T_0_Kelvin - 50 * (1 - g.face_centers[0, dir_faces])
return bc_values
def _set_rock_and_fluid(self):
"""
Set rock and fluid properties to those of granite and water.
We ignore all temperature dependencies of the parameters.
"""
super()._set_rock_and_fluid()
def _hydrostatic_pressure(self, g, depth):
"""Set explicitly to zero to avoid the atmospheric pressure returned
by the exIII/exIV function for depth=0.
"""
return np.zeros_like(depth)
def _set_time_parameters(self):
"""
Specify time parameters.
"""
# For the initialization run, we use the following
# start time
self.time = 0
# and time step
self.time_step = self.params.get("time_step")
# We use
self.end_time = 4 * pp.HOUR
self.max_time_step = self.time_step
self.phase_limits = np.array([self.end_time])
self.phase_time_steps = np.array([self.time_step])
def _set_fields(self, params):
"""
Set various fields to be used in the model.
"""
super()._set_fields(params)
# Initial aperture, a_0
self.initial_aperture = 1e-3 / self.length_scale
self.gravity_on = False # Mechanics not implemented for True
self.box = {"xmin": 0, "ymin": 0, "xmax": 1, "ymax": 1}
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# dt in seconds
reference = False
time_steps = np.array([600, 300, 150, 75])
# Number of cells in each dimension
n_cells = np.array([32, 64, 128])
if reference:
n_cells = np.array([512])
time_steps = np.array([25])
fracture_sizes = {}
export_times = {}
mesh_size = 0.02
mesh_args = {
"mesh_size_frac": mesh_size,
"mesh_size_min": 0.5 * mesh_size,
"mesh_size_bound": 3.6 * mesh_size,
}
folder_name = "exII_revision"
if reference:
folder_name += "_ref"
params = {
"nl_convergence_tol": 1e-8,
"max_iterations": 50,
"file_name": "tensile_stable_propagation",
"mesh_args": mesh_args,
"folder_name": folder_name,
"nx": 10,
"prepare_umfpack": False,
}
if reference:
params["file_name"] = "tensile_stable_propagation_reference"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
for i, dt in enumerate(time_steps):
params["time_step"] = dt
for j, nx in enumerate(n_cells):
logger.info("\nSolving for dt {} and nx {}.".format(dt, nx))
params["nx"] = nx
m = Example2Model(params)
pp.run_time_dependent_model(m, params)
fracture_sizes[(dt, nx)] = m.fracture_sizes
export_times[(dt, nx)] = m.export_times
m._export_pvd()
plot = False
if reference:
data = read_pickle("exII/fracture_sizes")
fracture_sizes.update(data["fracture_sizes"])
time_steps = np.union1d(data["time_steps"], time_steps)
export_times = data["export_times"].update(export_times)
n_cells = np.union1d(data["n_cells"], n_cells)
data = {
"fracture_sizes": fracture_sizes,
"time_steps": time_steps,
"n_cells": n_cells,
"export_times": export_times,
}
write_pickle(data, folder_name + "/fracture_sizes")
if plot:
fig, ax = plt.subplots()
for i, dt in enumerate(time_steps):
for j, nx in enumerate(n_cells):
data = fracture_sizes[(dt, nx)]
length = data[:, 2] - data[:, 1]
ax.plot(data[:, 0], length, label="dt {} nx {}".format(dt, nx))
ax.legend()
plt.show()
|
the-stack_0_5161 | # System imports
from datetime import datetime
import time
import json
import logging
# Package imports
from flask import Blueprint
from flask import render_template
from flask import jsonify
from flask import request
# Local imports
import common
from ispyb_api import controller
api = Blueprint('ebic', __name__, url_prefix='/ebic')
rack_prefix = 'EBIC-RACK'
rack_suffixes = ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4',
'C1', 'C2', 'C3', 'C4',
'D1', 'D2', 'D3', 'D4',
'E1', 'E2', 'E3', 'E4',
'F1', 'F2', 'F3', 'F4',
'G1', 'G2', 'G3', 'G4',
'H1', 'H2', 'H3', 'H4',
'J1', 'J2', 'J3', 'J4',
'K1', 'K2', 'K3', 'K4',
'L1', 'L2', 'L3', 'L4',
'M1', 'M2', 'M3', 'M4',
'N1', 'N2', 'N3', 'N4',
'P1', 'P2', 'P3', 'P4',
'Q1', 'Q2', 'Q3', 'Q4',
'R1', 'R2', 'R3', 'R4',
]
rack_locations = ['-'.join([rack_prefix, suffix])
for suffix in rack_suffixes]
beamlines = ['m01',
'm02',
'm03',
'm04',
'm05',
'm06',
'm07',
]
beamline_prefix = 'MICROSCOPE'
beamline_locations = ['{}-{}'.format(beamline_prefix, x.upper()) for x in beamlines]
# Add the common locations on for the web ui
beamline_locations.extend(['USER-COLLECTION',
'STORES-OUT',
'ZONE-6-STORE',
])
@api.route('/')
def index():
"""
Main page for dewar management
"""
return render_template('dewars.html',
title="eBIC Dewar Management",
rack_locations=rack_locations,
rack_suffixes=rack_suffixes,
rack_prefix=rack_prefix,
beamlines=beamline_locations,
api_prefix="ebic",
)
@api.route('/dewars', methods=["GET", "POST", "DELETE"])
def location():
"""
API route for dewar management
"""
result = {}
status_code = 200
if request.method == "GET":
# Get any dewar with any rack location
# There should only be one per location
# Simple call so use controller directly
result = controller.find_dewars_by_location(rack_locations)
elif request.method == "POST":
location = request.form['location']
barcode = request.form['barcode']
result, status_code = common.update_dewar_location(barcode, location)
elif request.method == "DELETE":
location = request.form['location']
result, status_code = common.remove_dewar_from_location(location)
else:
result = {'location': '',
'barcode': '',
'status': 'fail',
'reason': 'Method/route not implemented yet'}
status_code = 501
return jsonify(result), status_code
@api.route('/dewars/find', methods=["GET"])
def find():
"""
Return a list of matching dewars with this facility code
Should be requested with parameters in the URL ?fc=DLS-MS-1234 request
We specifically return the status code so the front end can show feedback
"""
facilitycode = request.args.get('fc')
result, status_code = common.find_dewar(facilitycode)
return jsonify(result), status_code
|
the-stack_0_5164 | """Functionality for awesome-streamlit.org"""
from panel.pane import Markdown
def title_awesome(body: str,) -> Markdown:
"""An *Awesome Panel* title as a Markdown with
- the text like 'Awesome Panel About'
- the [Awesome Badge](https://cdn.rawgit.com/sindresorhus/awesome/\
d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)
Arguments:
body (str): Some title like 'About'
Returns:
Markdown: An 'Awesome Panel {body} title with a link and the awesome badge.
"""
return Markdown(
f"# Awesome Panel {body} "
""
)
|
the-stack_0_5166 | """
mfwel module. Contains the ModflowWel class. Note that the user can access
the ModflowWel class as `flopy.modflow.ModflowWel`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?wel.htm>`_.
"""
import sys
import numpy as np
from ..utils import MfList
from ..pakbase import Package
from ..utils.recarray_utils import create_empty_recarray
from ..utils.optionblock import OptionBlock
import warnings
class ModflowWel(Package):
"""
MODFLOW Well Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 0).
stress_period_data : list of boundaries, or recarray of boundaries, or
dictionary of boundaries
Each well is defined through definition of
layer (int), row (int), column (int), flux (float).
The simplest form is a dictionary with a lists of boundaries for each
stress period, where each list of boundaries itself is a list of
boundaries. Indices of the dictionary are the numbers of the stress
period. This gives the form of:
stress_period_data =
{0: [
[lay, row, col, flux],
[lay, row, col, flux],
[lay, row, col, flux]
],
1: [
[lay, row, col, flux],
[lay, row, col, flux],
[lay, row, col, flux]
], ...
kper:
[
[lay, row, col, flux],
[lay, row, col, flux],
[lay, row, col, flux]
]
}
Note that if the number of lists is smaller than the number of stress
periods, then the last list of wells will apply until the end of the
simulation. Full details of all options to specify stress_period_data
can be found in the flopy3 boundaries Notebook in the basic
subdirectory of the examples directory
dtype : custom datatype of stress_period_data.
If None the default well datatype will be applied (default is None).
extension : string
Filename extension (default is 'wel')
options : list of strings
Package options (default is None).
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a single string is passed
the package will be set to the string and cbc output names will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
mxactw : int
Maximum number of wells for a stress period. This is calculated
automatically by FloPy based on the information in
stress_period_data.
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
>>> wel = flopy.modflow.ModflowWel(m, stress_period_data=lrcq)
"""
_options = dict(
[
(
"specify",
{
OptionBlock.dtype: np.bool_,
OptionBlock.nested: True,
OptionBlock.n_nested: 2,
OptionBlock.vars: dict(
[
("phiramp", OptionBlock.simple_float),
(
"iunitramp",
dict(
[
(OptionBlock.dtype, int),
(OptionBlock.nested, False),
(OptionBlock.optional, True),
]
),
),
]
),
},
),
("tabfiles", OptionBlock.simple_tabfile),
]
)
def __init__(
self,
model,
ipakcb=None,
stress_period_data=None,
dtype=None,
extension="wel",
options=None,
binary=False,
unitnumber=None,
filenames=None,
):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowWel._defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(
ipakcb, fname=fname, package=ModflowWel._ftype()
)
else:
ipakcb = 0
# Fill namefile items
name = [ModflowWel._ftype()]
units = [unitnumber]
extra = [""]
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and
# unit number
Package.__init__(
self,
model,
extension=extension,
name=name,
unit_number=units,
extra=extra,
filenames=fname,
)
self._generate_heading()
self.url = "wel.htm"
self.ipakcb = ipakcb
self.np = 0
if options is None:
options = []
self.specify = False
self.phiramp = None
self.iunitramp = None
self.options = options
if isinstance(options, OptionBlock):
if not self.options.specify:
self.specify = self.options.specify
else:
self.specify = True
self.phiramp = self.options.phiramp
self.iunitramp = self.options.iunitramp
# this is to grab the aux variables...
options = []
else:
for idx, opt in enumerate(options):
if "specify" in opt:
t = opt.strip().split()
self.specify = True
self.phiramp = float(t[1])
self.iunitramp = int(t[2])
self.options.pop(idx)
break
if dtype is not None:
self.dtype = dtype
else:
self.dtype = self.get_default_dtype(
structured=self.parent.structured
)
# determine if any aux variables in dtype
dt = self.get_default_dtype(structured=self.parent.structured)
if len(self.dtype.names) > len(dt.names):
for name in self.dtype.names[len(dt.names) :]:
ladd = True
for option in options:
if name.lower() in option.lower():
ladd = False
break
if ladd:
options.append("aux {} ".format(name))
if isinstance(self.options, OptionBlock):
if not self.options.auxillary:
self.options.auxillary = options
else:
self.options = options
# initialize MfList
self.stress_period_data = MfList(
self, stress_period_data, binary=binary
)
self.parent.add_package(self)
def _ncells(self):
"""Maximum number of cells that have wells (developed for
MT3DMS SSM package).
Returns
-------
ncells: int
maximum number of wel cells
"""
return self.stress_period_data.mxact
def write_file(self, f=None):
"""
Write the package file.
Parameters:
f: (str) optional file name
Returns
-------
None
"""
if f is not None:
if isinstance(f, str):
f_wel = open(f, "w")
else:
f_wel = f
else:
f_wel = open(self.fn_path, "w")
f_wel.write("%s\n" % self.heading)
if (
isinstance(self.options, OptionBlock)
and self.parent.version == "mfnwt"
):
self.options.update_from_package(self)
if self.options.block:
self.options.write_options(f_wel)
line = " {0:9d} {1:9d} ".format(
self.stress_period_data.mxact, self.ipakcb
)
if isinstance(self.options, OptionBlock):
if self.options.noprint:
line += "NOPRINT "
if self.options.auxillary:
line += " ".join(
[str(aux).upper() for aux in self.options.auxillary]
)
else:
for opt in self.options:
line += " " + str(opt)
line += "\n"
f_wel.write(line)
if (
isinstance(self.options, OptionBlock)
and self.parent.version == "mfnwt"
):
if not self.options.block:
if isinstance(self.options.specify, np.ndarray):
self.options.tabfiles = False
self.options.write_options(f_wel)
else:
if self.specify and self.parent.version == "mfnwt":
f_wel.write(
"SPECIFY {0:10.5g} {1:10d}\n".format(
self.phiramp, self.iunitramp
)
)
self.stress_period_data.write_transient(f_wel)
f_wel.close()
def add_record(self, kper, index, values):
try:
self.stress_period_data.add_record(kper, index, values)
except Exception as e:
raise Exception("mfwel error adding record to list: " + str(e))
@staticmethod
def get_default_dtype(structured=True):
if structured:
dtype = np.dtype(
[
("k", int),
("i", int),
("j", int),
("flux", np.float32),
]
)
else:
dtype = np.dtype([("node", int), ("flux", np.float32)])
return dtype
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recarray that corresponds to dtype
dtype = ModflowWel.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
return create_empty_recarray(ncells, dtype, default_value=-1.0e10)
@staticmethod
def _get_sfac_columns():
return ["flux"]
@classmethod
def load(cls, f, model, nper=None, ext_unit_dict=None, check=True):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
wel : ModflowWel object
ModflowWel object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> wel = flopy.modflow.ModflowWel.load('test.wel', m)
"""
if model.verbose:
sys.stdout.write("loading wel package file...\n")
return Package.load(
f,
model,
cls,
nper=nper,
check=check,
ext_unit_dict=ext_unit_dict,
)
@staticmethod
def _ftype():
return "WEL"
@staticmethod
def _defaultunit():
return 20
|
the-stack_0_5167 | # objective is to get the cart to the flag.
# for now, let's just move randomly:
import gym
import numpy as np
env = gym.make("MountainCar-v0")
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 25000
SHOW_EVERY = 1000
DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low)/DISCRETE_OS_SIZE
# Exploration settings
epsilon = 1 # not a constant, qoing to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int)) # we use this tuple to look up the 3 Q values for the available actions in the q-table
for episode in range(EPISODES):
discrete_state = get_discrete_state(env.reset())
done = False
if episode % SHOW_EVERY == 0:
render = True
print(episode)
else:
render = False
while not done:
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(q_table[discrete_state])
else:
# Get random action
action = np.random.randint(0, env.action_space.n)
new_state, reward, done, _ = env.step(action)
new_discrete_state = get_discrete_state(new_state)
if episode % SHOW_EVERY == 0:
env.render()
#new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# If simulation did not end yet after last step - update Q table
if not done:
# Maximum possible Q value in next step (for new state)
max_future_q = np.max(q_table[new_discrete_state])
# Current Q value (for current state and performed action)
current_q = q_table[discrete_state + (action,)]
# And here's our equation for a new Q value for current state and action
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# Update Q table with new Q value
q_table[discrete_state + (action,)] = new_q
# Simulation ended (for any reson) - if goal position is achived - update Q value with reward directly
elif new_state[0] >= env.goal_position:
#q_table[discrete_state + (action,)] = reward
print('Win At '+str(episode))
q_table[discrete_state + (action,)] = 0
discrete_state = new_discrete_state
# Decaying is being done every episode if episode number is within decaying range
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon -= epsilon_decay_value
env.close()
|
the-stack_0_5168 | # -*- coding: utf-8 -*-
if __name__ == '__main__':
import os, sys
path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(path, '..', '..'))
from ..Qt import QtGui
from .. import functions as fn
from .UIGraphicsItem import UIGraphicsItem
__all__ = ['VTickGroup']
class VTickGroup(UIGraphicsItem):
"""
**Bases:** :class:`UIGraphicsItem <pyqtgraph.UIGraphicsItem>`
Draws a set of tick marks which always occupy the same vertical range of the view,
but have x coordinates relative to the data within the view.
"""
def __init__(self, xvals=None, yrange=None, pen=None):
"""
============== ===================================================================
**Arguments:**
xvals A list of x values (in data coordinates) at which to draw ticks.
yrange A list of [low, high] limits for the tick. 0 is the bottom of
the view, 1 is the top. [0.8, 1] would draw ticks in the top
fifth of the view.
pen The pen to use for drawing ticks. Default is grey. Can be specified
as any argument valid for :func:`mkPen<pyqtgraph.mkPen>`
============== ===================================================================
"""
if yrange is None:
yrange = [0, 1]
if xvals is None:
xvals = []
UIGraphicsItem.__init__(self)
if pen is None:
pen = (200, 200, 200)
self.path = QtGui.QGraphicsPathItem()
self.ticks = []
self.xvals = []
self.yrange = [0,1]
self.setPen(pen)
self.setYRange(yrange)
self.setXVals(xvals)
def setPen(self, *args, **kwargs):
"""Set the pen to use for drawing ticks. Can be specified as any arguments valid
for :func:`mkPen<pyqtgraph.mkPen>`"""
self.pen = fn.mkPen(*args, **kwargs)
def setXVals(self, vals):
"""Set the x values for the ticks.
============== =====================================================================
**Arguments:**
vals A list of x values (in data/plot coordinates) at which to draw ticks.
============== =====================================================================
"""
self.xvals = vals
self.rebuildTicks()
#self.valid = False
def setYRange(self, vals):
"""Set the y range [low, high] that the ticks are drawn on. 0 is the bottom of
the view, 1 is the top."""
self.yrange = vals
self.rebuildTicks()
def dataBounds(self, *args, **kargs):
return None ## item should never affect view autoscaling
def yRange(self):
return self.yrange
def rebuildTicks(self):
self.path = QtGui.QPainterPath()
yrange = self.yRange()
for x in self.xvals:
self.path.moveTo(x, 0.)
self.path.lineTo(x, 1.)
def paint(self, p, *args):
UIGraphicsItem.paint(self, p, *args)
br = self.boundingRect()
h = br.height()
br.setY(br.y() + self.yrange[0] * h)
br.setHeight((self.yrange[1] - self.yrange[0]) * h)
p.translate(0, br.y())
p.scale(1.0, br.height())
p.setPen(self.pen)
p.drawPath(self.path)
|
the-stack_0_5170 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class NetworkWatcherPaged(Paged):
"""
A paging container for iterating over a list of :class:`NetworkWatcher <azure.mgmt.network.v2018_12_01.models.NetworkWatcher>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[NetworkWatcher]'}
}
def __init__(self, *args, **kwargs):
super(NetworkWatcherPaged, self).__init__(*args, **kwargs)
|
the-stack_0_5171 | from typing import Any, Dict, Iterable
def filter_dict(d: Dict[str, Any], exclude: Iterable[str]) -> Dict[str, Any]:
"""Return a new dict with specified keys excluded from the original dict
Args:
d (dict): original dict
exclude (list): The keys that are excluded
"""
result: Dict[str, Any] = {}
for key, value in d.items():
if key not in exclude:
result.update({key: value})
return result
|
the-stack_0_5173 | #!/usr/bin/env python3
# from __future__ import print_function
"""
@summary: Timing transactions that are getting into the chain
@version: v46 (03/January/2019)
@since: 17/April/2018
@organization:
@author: https://github.com/drandreaskrueger
@see: https://github.com/drandreaskrueger/chainhammer for updates
"""
import time, timeit, sys, os, json
from web3 import Web3, HTTPProvider
# extend path for imports:
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from hammer.config import RPCaddress2, FILE_LAST_EXPERIMENT, AUTOSTOP_TPS, EMPTY_BLOCKS_AT_END
from hammer.deploy import loadFromDisk, FILE_CONTRACT_ADDRESS
from hammer.clienttools import web3connection, getBlockTransactionCount
def loopUntil_NewContract(query_intervall = 0.1):
"""
Wait for new smart contract to be deployed.
Continuously polls file "FILE_CONTRACT_ADDRESS".
Returns when overwritten file has different address or new filedate.
N.B.: It actually happens that same Ethereum contract address is created again,
if blockchain is deleted, and everything restarted. So: Check filedate too.
"""
address, _ = loadFromDisk()
when = os.path.getmtime(FILE_CONTRACT_ADDRESS)
print ("(filedate %d) last contract address: %s" %(when, address))
while(True):
time.sleep(query_intervall)
# checks whether a new contract has been deployed
# because then a new address has been saved to file:
newAddress, _ = loadFromDisk()
newWhen = os.path.getmtime(FILE_CONTRACT_ADDRESS)
if (newAddress != address or newWhen != when):
print ("(filedate %d) new contract address: %s" %(newWhen, newAddress))
break
return
def timestampToSeconds(timestamp, NODENAME, CONSENSUS):
"""
turn timestamp into (float of) seconds
as a separate function so that it can be recycled in blocksDB_create.py
"""
# most ethereum clients return block timestamps as whole seconds:
timeunits = 1.0
# quorum raft consensus ... returns not seconds but nanoseconds?
if CONSENSUS=="raft": timeunits = 1000000000.0
# testrpc-py has odd timestamp units ;-)
# do check for updates: https://github.com/pipermerriam/eth-testrpc/issues/117
if NODENAME=="TestRPC": timeunits = 205.0
return timestamp / timeunits
def analyzeNewBlocks(blockNumber, newBlockNumber, txCount, start_time, peakTpsAv):
"""
iterate through all new blocks, add up number of transactions
print status line
"""
txCount_new = 0
for bl in range(blockNumber+1, newBlockNumber+1): # TODO check range again - shift by one?
# txCount_new += w3.eth.getBlockTransactionCount(bl)
blktx = getBlockTransactionCount(w3, bl)
txCount_new += blktx # TODO
ts_blockNumber = w3.eth.getBlock( blockNumber).timestamp
ts_newBlockNumber = w3.eth.getBlock(newBlockNumber).timestamp
ts_diff = ts_newBlockNumber - ts_blockNumber
blocktimeSeconds = timestampToSeconds(ts_diff, NODENAME, CONSENSUS)
try:
tps_current = txCount_new / blocktimeSeconds
except ZeroDivisionError:
# Odd: Parity seems to have a blocktime resolution of whole seconds??
# So if blocks come much faster (e.g. with instantseal),
# then they end up having a blocktime of zero lol.
# Then, set TPS_CURRENT to something wrong but syntactically correct.
tps_current = 0
txCount += txCount_new
elapsed = timeit.default_timer() - start_time
tpsAv = txCount / elapsed
if tpsAv > peakTpsAv:
peakTpsAv = tpsAv
verb = " is" if peakTpsAv==tpsAv else "was"
line = "block %d | new #TX %3d / %4.0f ms = " \
"%5.1f TPS_current | total: #TX %4d / %4.1f s = %5.1f TPS_average " \
"(peak %s %5.1f TPS_average)"
line = line % ( newBlockNumber, txCount_new, blocktimeSeconds * 1000,
tps_current, txCount, elapsed, tpsAv, verb, peakTpsAv)
print (line)
return txCount, peakTpsAv, tpsAv
def sendingEndedFiledate():
try:
when = os.path.getmtime(FILE_LAST_EXPERIMENT)
except FileNotFoundError:
when = 0
return when
def readInfofile(fn=FILE_LAST_EXPERIMENT):
with open(fn, "r") as f:
data = json.load(f)
return data
class CodingError(Exception):
pass
def getNearestEntry(myDict, myIndex):
"""
because
finalTpsAv = tpsAv[block_last]
can sometimes not be resolved, then choose
finalTpsAv = tpsAv[block_last+i]
testing with increasing i, the decreasing i
"""
answer = myDict.get(myIndex, None)
if answer:
return answer
maxIndex,minIndex = max(myDict.keys()), min(myDict.keys())
# first look later:
i = myIndex
while not answer:
i += +1
if i>maxIndex:
break
answer = myDict.get(i, None)
# then look earlier:
i=myIndex
while not answer:
i += -1
if i<minIndex:
raise CodingError("Ouch, this should never happen. Info: len(myDict)=%d myIndex=%d" %(len(myDict), myIndex))
answer = myDict.get(i, None)
return answer
def measurement(blockNumber, pauseBetweenQueries=0.3,
RELAXATION_ROUNDS=3, empty_blocks_at_end=EMPTY_BLOCKS_AT_END):
"""
when a (or more) new block appeared,
add them to the total, and print a line.
"""
whenBefore = sendingEndedFiledate()
# the block we had been waiting for already contains the first transaction/s
# N.B.: slight inaccurracy of time measurement, because not measured how long those needed
# txCount=w3.eth.getBlockTransactionCount(blockNumber)
txCount=getBlockTransactionCount(w3, blockNumber)
start_time = timeit.default_timer()
start_epochtime = time.time()
# TODO: perhaps additional to elapsed system time, show blocktime?
print('starting timer, at block', blockNumber, 'which has ',
txCount,' transactions; at epochtime', start_epochtime)
peakTpsAv = 0
counterStart, blocknumberEnd = 0, -1
tpsAv = {} # memorize all of them, so we can return value at 'block_last'
while(True):
newBlockNumber=w3.eth.blockNumber
if(blockNumber!=newBlockNumber): # when a new block appears:
args = (blockNumber, newBlockNumber, txCount, start_time, peakTpsAv)
txCount, peakTpsAv, tpsAv[newBlockNumber] = analyzeNewBlocks(*args)
blockNumber = newBlockNumber
# for the first 3 rounds, always reset the peakTpsAv again!
if counterStart < RELAXATION_ROUNDS:
peakTpsAv=0
counterStart += 1
# send.py --> store_experiment_data() is called AFTER last tx was mined.
# THEN do another 10 empty blocks ...
# only THEN end this:
# if AUTOSTOP_TPS and blocknumberEnd==-1 and sendingEndedFiledate()!=whenBefore:
if AUTOSTOP_TPS and sendingEndedFiledate()!=whenBefore:
print ("Received signal from send.py = updated INFOFILE.")
block_last = readInfofile()['send']['block_last']
# finalTpsAv = tpsAv[block_last]
finalTpsAv = getNearestEntry(myDict=tpsAv, myIndex=block_last)
break
# finalTpsAv = tpsAv
# blocknumberEnd = newBlockNumber + empty_blocks_at_end
# print ("The end is nigh ... after blocknumber", blocknumberEnd)
# if NODETYPE=="TestRPC":
# break # no empty blocks in TestRPC
# if blocknumberEnd>0 and newBlockNumber > blocknumberEnd:
# break
time.sleep(pauseBetweenQueries) # do not query too often; as little side effect on node as possible
# print ("end") # N.B.: it never gets here !
txt = "Experiment ended! Current blocknumber = %d"
txt = txt % (w3.eth.blockNumber)
print (txt)
return peakTpsAv, finalTpsAv, start_epochtime
def addMeasurementToFile(peakTpsAv, finalTpsAv, start_epochtime, fn=FILE_LAST_EXPERIMENT):
with open(fn, "r") as f:
data = json.load(f)
data["tps"]={}
data["tps"]["peakTpsAv"] = peakTpsAv
data["tps"]["finalTpsAv"] = finalTpsAv
data["tps"]["start_epochtime"] = start_epochtime
with open(fn, "w") as f:
json.dump(data, f)
if __name__ == '__main__':
global w3, NODENAME, NODETYPE, NODEVERSION, CONSENSUS, NETWORKID, CHAINNAME, CHAINID
w3, chainInfos = web3connection(RPCaddress=RPCaddress2, account=None)
NODENAME, NODETYPE, NODEVERSION, CONSENSUS, NETWORKID, CHAINNAME, CHAINID = chainInfos
blockNumber_before = w3.eth.blockNumber
print ("\nBlock ",blockNumber_before," - waiting for something to happen")
loopUntil_NewContract()
blocknumber_start_here = w3.eth.blockNumber
print ("\nblocknumber_start_here =", blocknumber_start_here)
peakTpsAv, finalTpsAv, start_epochtime = measurement( blocknumber_start_here )
addMeasurementToFile(peakTpsAv, finalTpsAv, start_epochtime, FILE_LAST_EXPERIMENT)
print ("Updated info file:", FILE_LAST_EXPERIMENT, "THE END.")
|
the-stack_0_5175 | from yt.fields.field_info_container import FieldInfoContainer
from yt.fields.magnetic_field import setup_magnetic_field_aliases
from yt.fields.species_fields import add_species_field_by_density, setup_species_fields
from yt.frontends.gadget.fields import GadgetFieldInfo
from yt.frontends.sph.fields import SPHFieldInfo
metal_elements = ["He", "C", "N", "O", "Ne", "Mg", "Si", "S", "Ca", "Fe"]
class GizmoFieldInfo(GadgetFieldInfo):
# The known fields list is according to the GIZMO User Guide. See
# http://www.tapir.caltech.edu/~phopkins/Site/GIZMO_files/gizmo_documentation.html#snaps-reading
known_particle_fields = (
("Coordinates", ("code_length", ["particle_position"], None)),
("Velocities", ("code_velocity", ["particle_velocity"], None)),
("ParticleIDs", ("", ["particle_index"], None)),
("Masses", ("code_mass", ["particle_mass"], None)),
("InternalEnergy", ("code_specific_energy", ["specific_thermal_energy"], None)),
("Density", ("code_mass / code_length**3", ["density"], None)),
("SmoothingLength", ("code_length", ["smoothing_length"], None)),
("ElectronAbundance", ("", [], None)),
("NeutralHydrogenAbundance", ("", [], None)),
("StarFormationRate", ("Msun / yr", [], None)),
("Metallicity", ("code_metallicity", ["metallicity"], None)),
("Metallicity_00", ("", ["metallicity"], None)),
("Metallicity_01", ("", ["He_metallicity"], None)),
("Metallicity_02", ("", ["C_metallicity"], None)),
("Metallicity_03", ("", ["N_metallicity"], None)),
("Metallicity_04", ("", ["O_metallicity"], None)),
("Metallicity_05", ("", ["Ne_metallicity"], None)),
("Metallicity_06", ("", ["Mg_metallicity"], None)),
("Metallicity_07", ("", ["Si_metallicity"], None)),
("Metallicity_08", ("", ["S_metallicity"], None)),
("Metallicity_09", ("", ["Ca_metallicity"], None)),
("Metallicity_10", ("", ["Fe_metallicity"], None)),
("ArtificialViscosity", ("", [], None)),
("MagneticField", ("code_magnetic", ["particle_magnetic_field"], None)),
("DivergenceOfMagneticField", ("code_magnetic / code_length", [], None)),
("StellarFormationTime", ("", [], None)),
# "StellarFormationTime" has different meanings in (non-)cosmological
# runs, so units are left blank here.
("BH_Mass", ("code_mass", [], None)),
("BH_Mdot", ("code_mass / code_time", [], None)),
("BH_Mass_AlphaDisk", ("code_mass", [], None)),
)
def __init__(self, *args, **kwargs):
super(SPHFieldInfo, self).__init__(*args, **kwargs)
if ("PartType0", "Metallicity_00") in self.field_list:
self.nuclei_names = metal_elements
self.species_names = ["H_p0", "H_p1"] + metal_elements
def setup_particle_fields(self, ptype):
FieldInfoContainer.setup_particle_fields(self, ptype)
if ptype in ("PartType0",):
self.setup_gas_particle_fields(ptype)
setup_species_fields(self, ptype)
if ptype in ("PartType4",):
self.setup_star_particle_fields(ptype)
def setup_gas_particle_fields(self, ptype):
super().setup_gas_particle_fields(ptype)
def _h_p0_density(field, data):
x_H = 1.0 - data[(ptype, "He_metallicity")] - data[(ptype, "metallicity")]
return (
x_H
* data[(ptype, "density")]
* data[(ptype, "NeutralHydrogenAbundance")]
)
self.add_field(
(ptype, "H_p0_density"),
sampling_type="particle",
function=_h_p0_density,
units=self.ds.unit_system["density"],
)
add_species_field_by_density(self, ptype, "H")
def _h_p1_density(field, data):
x_H = 1.0 - data[(ptype, "He_metallicity")] - data[(ptype, "metallicity")]
return (
x_H
* data[(ptype, "density")]
* (1.0 - data[(ptype, "NeutralHydrogenAbundance")])
)
self.add_field(
(ptype, "H_p1_density"),
sampling_type="particle",
function=_h_p1_density,
units=self.ds.unit_system["density"],
)
add_species_field_by_density(self, ptype, "H_p1")
def _nuclei_mass_density_field(field, data):
species = field.name[1][: field.name[1].find("_")]
return data[ptype, "density"] * data[ptype, f"{species}_metallicity"]
for species in ["H", "H_p0", "H_p1"]:
for suf in ["_density", "_number_density"]:
field = f"{species}{suf}"
self.alias(("gas", field), (ptype, field))
if (ptype, "ElectronAbundance") in self.field_list:
def _el_number_density(field, data):
return (
data[ptype, "ElectronAbundance"] * data[ptype, "H_number_density"]
)
self.add_field(
(ptype, "El_number_density"),
sampling_type="particle",
function=_el_number_density,
units=self.ds.unit_system["number_density"],
)
self.alias(("gas", "El_number_density"), (ptype, "El_number_density"))
for species in self.nuclei_names:
self.add_field(
(ptype, f"{species}_nuclei_mass_density"),
sampling_type="particle",
function=_nuclei_mass_density_field,
units=self.ds.unit_system["density"],
)
for suf in ["_nuclei_mass_density", "_metallicity"]:
field = f"{species}{suf}"
self.alias(("gas", field), (ptype, field))
def _metal_density_field(field, data):
return data[ptype, "metallicity"] * data[ptype, "density"]
self.add_field(
(ptype, "metal_density"),
sampling_type="local",
function=_metal_density_field,
units=self.ds.unit_system["density"],
)
self.alias(("gas", "metal_density"), (ptype, "metal_density"))
magnetic_field = "MagneticField"
if (ptype, magnetic_field) in self.field_list:
setup_magnetic_field_aliases(self, ptype, magnetic_field)
def setup_star_particle_fields(self, ptype):
def _creation_time(field, data):
if data.ds.cosmological_simulation:
a_form = data["StellarFormationTime"]
z_form = 1 / a_form - 1
creation_time = data.ds.cosmology.t_from_z(z_form)
else:
t_form = data["StellarFormationTime"]
creation_time = data.ds.arr(t_form, "code_time")
return creation_time
self.add_field(
(ptype, "creation_time"),
sampling_type="particle",
function=_creation_time,
units=self.ds.unit_system["time"],
)
def _age(field, data):
return data.ds.current_time - data["creation_time"]
self.add_field(
(ptype, "age"),
sampling_type="particle",
function=_age,
units=self.ds.unit_system["time"],
)
|
the-stack_0_5178 | import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
import os
import weakref
import gc
from weakref import proxy
import contextlib
from test.support import import_helper
from test.support import threading_helper
from test.support.script_helper import assert_python_ok
import functools
py_functools = import_helper.import_fresh_module('functools',
blocked=['_functools'])
c_functools = import_helper.import_fresh_module('functools')
decimal = import_helper.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
support.gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod(func=capture, a=1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
@support.cpython_only
def test_disallow_instantiation(self):
# Ensure that the type disallows instantiation (bpo-43916)
support.check_disallow_instantiation(
self, type(c_functools.cmp_to_key(None))
)
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
def test_total_ordering_for_metaclasses_issue_44605(self):
@functools.total_ordering
class SortableMeta(type):
def __new__(cls, name, bases, ns):
return super().__new__(cls, name, bases, ns)
def __lt__(self, other):
if not isinstance(other, SortableMeta):
pass
return self.__name__ < other.__name__
def __eq__(self, other):
if not isinstance(other, SortableMeta):
pass
return self.__name__ == other.__name__
class B(metaclass=SortableMeta):
pass
class A(metaclass=SortableMeta):
pass
self.assertTrue(A < B)
self.assertFalse(A > B)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestCache:
# This tests that the pass-through is working as designed.
# The underlying functionality is tested in TestLRU.
def test_cache(self):
@self.module.cache
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with threading_helper.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with threading_helper.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with threading_helper.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with threading_helper.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
def test_lru_cache_parameters(self):
@self.module.lru_cache(maxsize=2)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 2, "typed": False})
@self.module.lru_cache(maxsize=1000, typed=True)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 1000, "typed": True})
def test_lru_cache_weakrefable(self):
@self.module.lru_cache
def test_function(x):
return x
class A:
@self.module.lru_cache
def test_method(self, x):
return (self, x)
@staticmethod
@self.module.lru_cache
def test_staticmethod(x):
return (self, x)
refs = [weakref.ref(test_function),
weakref.ref(A.test_method),
weakref.ref(A.test_staticmethod)]
for ref in refs:
self.assertIsNotNone(ref())
del A
del test_function
gc.collect()
for ref in refs:
self.assertIsNone(ref())
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(metaclass=abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.__dict__['add'].__isabstractmethod__)
with self.assertRaises(TypeError):
Abstract()
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_staticmethod_type_ann_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register
@staticmethod
def _(arg: int):
return isinstance(arg, int)
@t.register
@staticmethod
def _(arg: str):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_type_ann_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register
@classmethod
def _(cls, arg: int):
return cls("int")
@t.register
@classmethod
def _(cls, arg: str):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_method_wrapping_attributes(self):
class A:
@functools.singledispatchmethod
def func(self, arg: int) -> str:
"""My function docstring"""
return str(arg)
@functools.singledispatchmethod
@classmethod
def cls_func(cls, arg: int) -> str:
"""My function docstring"""
return str(arg)
@functools.singledispatchmethod
@staticmethod
def static_func(arg: int) -> str:
"""My function docstring"""
return str(arg)
for meth in (
A.func,
A().func,
A.cls_func,
A().cls_func,
A.static_func,
A().static_func
):
with self.subTest(meth=meth):
self.assertEqual(meth.__doc__, 'My function docstring')
self.assertEqual(meth.__annotations__['arg'], int)
self.assertEqual(A.func.__name__, 'func')
self.assertEqual(A().func.__name__, 'func')
self.assertEqual(A.cls_func.__name__, 'cls_func')
self.assertEqual(A().cls_func.__name__, 'cls_func')
self.assertEqual(A.static_func.__name__, 'static_func')
self.assertEqual(A().static_func.__name__, 'static_func')
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with threading_helper.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5179 | import PIL
import numpy as np
from datetime import datetime
from django.conf import settings
import anodos.tools
import swarm.models
import pflops.models
import distributors.models
import swarm.workers.worker
class Worker(swarm.workers.worker.Worker):
name = 'Service'
def __init__(self):
self.count_of_products = 0
self.count_of_parties = 0
self.count_of_parameters = 0
self.count_of_images = 0
self.count_of_urls = 0
self.message = None
super().__init__()
def run(self):
if self.command == 'info':
print('Продуктов в PFLOPS:',
pflops.models.Product.objects.all().count())
print('Продуктов не перенесено от дистрибьюторов:',
distributors.models.Product.objects.filter(to_pflops__isnull=True).count())
print('Продуктов перенесено от дистрибьюторов:',
distributors.models.Product.objects.filter(to_pflops__isnull=False).count())
elif self.command == 'update_products':
# Обновляем продукты
self.update_products()
# Обновляем цены и наличие
self.update_prices_and_quantities()
# Готовим оповещение
self.message = f'- продуктов: {self.count_of_products};\n' \
f'- партий: {self.count_of_parties}.'
elif self.command == 'update_parameters':
# Характеристики
self.update_parameters()
# Готовим оповещение
self.message = f'- параметров: {self.count_of_parameters}.'
elif self.command == 'update_images':
# Изображения
self.update_images()
# Готовим оповещение
self.message = f'- изображений: {self.count_of_images}.'
elif self.command == 'rewrite_products':
ids_ = pflops.models.Product.objects.all().values('id')
for n, id_ in enumerate(ids_):
product = pflops.models.Product.objects.get(id=id_['id'])
print(f'{n + 1} of {len(ids_)} {product}')
product.save()
elif self.command == 'rewrite_parameter_values':
ids_ = pflops.models.ParameterValue.objects.all().values('id')
for n, id_ in enumerate(ids_):
value = pflops.models.ParameterValue.objects.get(id=id_['id'])
print(f'{n + 1} of {len(ids_)} {value}')
value.save()
elif self.command == 'del_all_images':
pflops.models.ProductImage.objects.all().delete()
elif self.command == 'fix':
bad_name = 'Schneder Electric'
print(bad_name)
try:
distributor = distributors.models.Distributor.objects.get(name=bad_name)
print(distributor)
products = distributors.models.Product.objects.filter(distributor=distributor)
for product in products:
print(product)
product.delete()
vendors = distributors.models.Vendor.objects.filter(distributor=distributor)
for vendor in vendors:
print(product)
vendor.delete()
distributor.delete()
except distributors.models.Distributor.DoesNotExist:
print('Нечего вычищать!')
try:
vendor = pflops.models.Vendor.objects.get(name=bad_name)
print(vendor)
products = pflops.models.Product.objects.filter(vendor=vendor)
for product in products:
print(product)
product.delete()
vendor.delete()
except pflops.models.Vendor.DoesNotExist:
print('Нечего вычищать!')
elif self.command == 'update_sitemap':
self.update_sitemap()
# Готовим оповещение
self.message = f'- ссылок: {self.count_of_urls}.'
else:
print('Неизвестная команда!')
if self.message:
anodos.tools.send(content=f'{self.name}: {self.command} finish at {self.delta()}:\n'
f'{self.message}')
else:
anodos.tools.send(content=f'{self.name}: {self.command} finish at {self.delta()}.\n')
def update_products(self):
""" Переносит сущность продукт в чистовик """
ids_ = distributors.models.Product.objects.filter(vendor__to_pflops__isnull=False).values('id')
for n, id_ in enumerate(ids_):
product_ = distributors.models.Product.objects.get(id=id_['id'])
if product_.category is not None:
category = product_.category.to_pflops
else:
category = None
if product_.unit is not None:
unit = product_.unit.to_pflops
else:
unit = None
product = pflops.models.Product.objects.take(vendor=product_.vendor.to_pflops,
part_number=product_.part_number,
category=category,
name=product_.name,
short_name=product_.short_name,
name_rus=product_.name_rus,
name_other=product_.name_other,
description=product_.description,
warranty=product_.warranty,
ean_128=product_.ean_128,
upc=product_.upc,
pnc=product_.pnc,
hs_code=product_.hs_code,
gtin=product_.gtin,
tnved=product_.tnved,
traceable=product_.traceable,
weight=product_.weight,
width=product_.width,
height=product_.height,
depth=product_.depth,
volume=product_.volume,
multiplicity=product_.multiplicity,
unit=unit,
content=product_.content)
if product_.to_pflops != product:
product_.to_pflops = product
product_.save()
self.count_of_products += 1
print(f'{n + 1} of {len(ids_)} {product}')
def update_prices_and_quantities(self):
rub_ = distributors.models.Currency.objects.take(key="RUB")
rub = pflops.models.Currency.objects.take(key="RUB")
ids_ = pflops.models.Product.objects.all().values('id')
for n, id_ in enumerate(ids_):
product = pflops.models.Product.objects.get(id=id_['id'])
parties = distributors.models.Party.objects.filter(product__to_pflops=product)
price = None
quantity = 0
quantity_great_than = False
# Цены
for party in parties:
if party.quantity:
if party.price_out_open:
if party.price_out_open.currency == rub_:
price = party.price_out_open.value
else:
price = float(party.price_out_open.value) * float(party.price_out_open.currency.rate) / \
float(party.price_out_open.currency.quantity)
break
elif party.price_in:
if party.price_in.currency == rub_:
price_ = float(party.price_in.value) * settings.MARGIN
else:
price_ = float(party.price_in.value) * float(party.price_in.currency.rate) / \
float(party.price_in.currency.quantity) * settings.MARGIN
if price is None or price_ < price:
price = price_
# Количество
for party in parties:
if party.quantity:
quantity += party.quantity
if party.quantity_great_than:
quantity_great_than = True
if price is not None:
price = pflops.models.Price.objects.create(value=price, currency=rub)
product.price = price
product.quantity = quantity
product.quantity_great_than = quantity_great_than
product.save()
print(f'{n + 1} of {len(ids_)} {product} | {product.quantity} | {product.price}')
self.count_of_parties += 1
def update_parameters(self):
# Удаляем мусор
distributors.models.Parameter.objects.filter(distributor__isnull=True).delete()
# Удаляем дубли и кривой текст
parameters = distributors.models.Parameter.objects.all()
for n, parameter in enumerate(parameters):
print(f'{n+1} of {len(parameters)} {parameter}')
if anodos.tools.fix_text(parameter.name) != parameter.name:
parameter.delete()
continue
parameters_ = distributors.models.Parameter.objects.filter(distributor=parameter.distributor,
name=parameter.name)
for m, parameter_ in enumerate(parameters_):
if m > 0:
parameter_.delete()
# Проходим по всем продуктам
ids_ = pflops.models.Product.objects.all().values('id')
for n, id_ in enumerate(ids_):
product = pflops.models.Product.objects.get(id=id_['id'])
print(f'{n + 1} of {len(ids_)} {product}')
# Выбираем источник для переноса параметоров в чистовик
max_parameters_count = -1
product_ = None
for p_ in distributors.models.Product.objects.filter(to_pflops=product):
parameters_count = distributors.models.ParameterValue.objects.filter(product=p_).count()
if parameters_count > max_parameters_count:
product_ = p_
# Получаем ID текущих значений характеристик (чтобы потом удалить неактуальные)
parameter_values_ids_ = pflops.models.ParameterValue.objects.filter(product=product).values('id')
parameter_values_ids = set()
for parameter_values_id_ in parameter_values_ids_:
parameter_values_ids.add(str(parameter_values_id_['id']))
# Переносим параметры
parameter_values_ = distributors.models.ParameterValue.objects.filter(product=product_)
for parameter_value_ in parameter_values_:
if parameter_value_.parameter is not None:
parameter = parameter_value_.parameter.to_pflops
else:
continue
if parameter_value_.unit is not None:
unit = parameter_value_.unit.to_pflops
else:
unit = None
# TODO Сделать дополнительную обработку при выборе единицы измерения
value = parameter_value_.value
parameter_value = pflops.models.ParameterValue.objects.take(product=product,
parameter=parameter,
value=value,
unit=unit)
if parameter_value is not None:
if str(parameter_value.id) in parameter_values_ids:
parameter_values_ids.remove(str(parameter_value.id))
self.count_of_parameters += 1
# Удаляем устаревшие параметры
for parameter_values_id in parameter_values_ids:
pflops.models.ParameterValue.objects.filter(id=parameter_values_id).delete()
def update_images(self):
# Проходим по всем продуктам
ids_ = pflops.models.Product.objects.all().values('id')
# ids_ = pflops.models.Product.objects.filter(images_loaded__isnull=True).values('id')
for n, id_ in enumerate(ids_):
product = pflops.models.Product.objects.get(id=id_['id'])
print(f'{n + 1} of {len(ids_)} {product}')
self.update_images_of_product(product)
def update_images_of_product(self, product):
# Получаем векторы для сравнения из базы имеющихся изображений
vs = []
images = pflops.models.ProductImage.objects.filter(product=product)
for image in images:
# Если изображение уже есть
if image.file_name:
# Загружаем изображение
try:
im = PIL.Image.open(image.file_name)
except FileNotFoundError:
continue
except PIL.UnidentifiedImageError:
continue
# Сравниваем изображения с имеющимися
copy = False
thumbnail_ = im.resize((42, 42))
v_ = np.array(thumbnail_).reshape(42 * 42 * 4)
for v in vs:
r = np.dot(v, v_) / (np.linalg.norm(v) * np.linalg.norm(v_))
if r < 1.0e-11:
copy = True
# Если это копия
if copy is True:
image.delete()
else:
vs.append(v_)
# Проходим по всех исходным продуктам у дистрибьюторов
for product_ in distributors.models.Product.objects.filter(to_pflops=product):
# Проходим по всем изображениям
images_ = distributors.models.ProductImage.objects.filter(product=product_)
for image_ in images_:
self.count_of_images += 1
# Берём сущность с базы
image = pflops.models.ProductImage.objects.take(product=product,
source_url=image_.source_url)
if image.file_name:
continue
# Открываем исходное изображение и проверяем, достаточный ли размер изображения
try:
im = PIL.Image.open(image_.file_name)
except ValueError:
continue
except AttributeError:
continue
except PIL.UnidentifiedImageError:
continue
if im.size[0] < 450 and im.size[1] < 450:
im.close()
continue
# Вычисляем размеры и координаты
size = max(im.size[0], im.size[1])
dx = (size - im.size[0]) // 2
dy = (size - im.size[1]) // 2
# Создаём новое изображение и масштабируем его
try:
im_new = PIL.Image.new('RGBA', (size, size), '#00000000')
im_new.paste(im, (dx, dy))
im_new = im_new.resize((600, 600))
except SyntaxError:
im.close()
im_new.close()
image.delete()
continue
except OSError:
im.close()
im_new.close()
image.delete()
continue
# Сравниваем изображения с имеющимися
copy = False
thumbnail_ = im_new.resize((42, 42))
v_ = np.array(thumbnail_).reshape(42*42*4)
for v in vs:
r = np.dot(v, v_) / (np.linalg.norm(v) * np.linalg.norm(v_))
if r < 1.0e-12:
copy = True
if copy is True:
im.close()
im_new.close()
image.delete()
else:
vs.append(v_)
image.file_name = f'{settings.MEDIA_ROOT}products/photos/{image.id}.png'
anodos.tools.create_directory_for_file(image.file_name)
im_new.save(image.file_name, "PNG")
image.save()
print(image)
im.close()
im_new.close()
def update_sitemap(self):
print('update_sitemap')
count_of_urls = 0
count_of_urlsets = 0
urls_in_urlset = 25000
urlsets_str = ''
urlset_str = ''
products = pflops.models.Product.objects.all()
for n, product in enumerate(products):
if product.url_xml:
urlset_str = f'{urlset_str}{product.url_xml}'
count_of_urls += 1
if (count_of_urls and count_of_urls % urls_in_urlset == 0) or n + 1 == len(products):
urlset_filename = f'{settings.STATIC_ROOT}sitemap/sitemap-{count_of_urlsets}.xml'
urlset_url = f'{settings.HOST}{settings.STATIC_URL}sitemap/sitemap-{count_of_urlsets}.xml'
urlsets_str = f'{urlsets_str}\n' \
f' <sitemap>\n' \
f' <loc>{urlset_url}</loc>\n' \
f' <lastmod>{str(datetime.now())}</lastmod>\n' \
f' </sitemap>\n'
count_of_urlsets += 1
urlset_str = f'<?xml version="1.0" encoding="UTF-8"?>\n' \
f'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n' \
f'{urlset_str}' \
f'</urlset>\n'
anodos.tools.create_directory_for_file(urlset_filename)
urlset_file = open(urlset_filename, 'w')
urlset_file.write(urlset_str)
urlset_file.close()
print(urlset_filename)
urlset_str = ''
urlsets_str = f'<?xml version="1.0" encoding="UTF-8"?>\n' \
f'<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n' \
f'{urlsets_str}' \
f'</sitemapindex>\n'
urlsets_filename = f'{settings.STATIC_ROOT}sitemap/sitemap.xml'
anodos.tools.create_directory_for_file(urlsets_filename)
urlset_files = open(urlsets_filename, 'w')
urlset_files.write(urlsets_str)
urlset_files.close()
print(urlsets_filename)
self.count_of_urls = count_of_urls
|
the-stack_0_5180 | """
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import io
import pprint
import marshal
import warnings
import pickle
from xml.sax.saxutils import XMLGenerator
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.utils.python import to_bytes, to_unicode, is_listlike
from scrapy.item import BaseItem
from scrapy.exceptions import ScrapyDeprecationWarning
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter',
'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter',
'JsonItemExporter', 'MarshalItemExporter']
class BaseItemExporter(object):
def __init__(self, dont_fail=False, **kwargs):
self._kwargs = kwargs
self._configure(kwargs, dont_fail=dont_fail)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses ``__init__`` methods)
"""
self.encoding = options.pop('encoding', None)
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
self.indent = options.pop('indent', None)
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', lambda x: x)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples
(name, serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty and not isinstance(item, dict):
field_iter = item.fields.keys()
else:
field_iter = item.keys()
else:
if include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if field_name in item:
field = {} if isinstance(item, dict) else item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
super().__init__(dont_fail=True, **kwargs)
self.file = file
self._kwargs.setdefault('ensure_ascii', not self.encoding)
self.encoder = ScrapyJSONEncoder(**self._kwargs)
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict) + '\n'
self.file.write(to_bytes(data, self.encoding))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
super().__init__(dont_fail=True, **kwargs)
self.file = file
# there is a small difference between the behaviour or JsonItemExporter.indent
# and ScrapyJSONEncoder.indent. ScrapyJSONEncoder.indent=None is needed to prevent
# the addition of newlines everywhere
json_indent = self.indent if self.indent is not None and self.indent > 0 else None
self._kwargs.setdefault('indent', json_indent)
self._kwargs.setdefault('ensure_ascii', not self.encoding)
self.encoder = ScrapyJSONEncoder(**self._kwargs)
self.first_item = True
def _beautify_newline(self):
if self.indent is not None:
self.file.write(b'\n')
def start_exporting(self):
self.file.write(b"[")
self._beautify_newline()
def finish_exporting(self):
self._beautify_newline()
self.file.write(b"]")
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(b',')
self._beautify_newline()
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict)
self.file.write(to_bytes(data, self.encoding))
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
super().__init__(**kwargs)
if not self.encoding:
self.encoding = 'utf-8'
self.xg = XMLGenerator(file, encoding=self.encoding)
def _beautify_newline(self, new_item=False):
if self.indent is not None and (self.indent > 0 or new_item):
self.xg.characters('\n')
def _beautify_indent(self, depth=1):
if self.indent:
self.xg.characters(' ' * self.indent * depth)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
self._beautify_newline(new_item=True)
def export_item(self, item):
self._beautify_indent(depth=1)
self.xg.startElement(self.item_element, {})
self._beautify_newline()
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value, depth=2)
self._beautify_indent(depth=1)
self.xg.endElement(self.item_element)
self._beautify_newline(new_item=True)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value, depth):
self._beautify_indent(depth=depth)
self.xg.startElement(name, {})
if hasattr(serialized_value, 'items'):
self._beautify_newline()
for subname, value in serialized_value.items():
self._export_xml_field(subname, value, depth=depth+1)
self._beautify_indent(depth=depth)
elif is_listlike(serialized_value):
self._beautify_newline()
for value in serialized_value:
self._export_xml_field('value', value, depth=depth+1)
self._beautify_indent(depth=depth)
elif isinstance(serialized_value, str):
self.xg.characters(serialized_value)
else:
self.xg.characters(str(serialized_value))
self.xg.endElement(name)
self._beautify_newline()
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs):
super().__init__(dont_fail=True, **kwargs)
if not self.encoding:
self.encoding = 'utf-8'
self.include_headers_line = include_headers_line
self.stream = io.TextIOWrapper(
file,
line_buffering=False,
write_through=True,
encoding=self.encoding,
newline='' # Windows needs this https://github.com/scrapy/scrapy/issues/3034
)
self.csv_writer = csv.writer(self.stream, **self._kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value):
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item):
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='',
include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def _build_row(self, values):
for s in values:
try:
yield to_unicode(s, self.encoding)
except TypeError:
yield s
def _write_headers_and_set_fields_to_export(self, item):
if self.include_headers_line:
if not self.fields_to_export:
if isinstance(item, dict):
# for dicts try using fields of the first item
self.fields_to_export = list(item.keys())
else:
# use fields declared in Item
self.fields_to_export = list(item.fields.keys())
row = list(self._build_row(self.fields_to_export))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=2, **kwargs):
super().__init__(**kwargs)
self.file = file
self.protocol = protocol
def export_item(self, item):
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
"""Exports items in a Python-specific binary format (see
:mod:`marshal`).
:param file: The file-like object to use for exporting the data. Its
``write`` method should accept :class:`bytes` (a disk file
opened in binary mode, a :class:`~io.BytesIO` object, etc)
"""
def __init__(self, file, **kwargs):
super().__init__(**kwargs)
self.file = file
def export_item(self, item):
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
super().__init__(**kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + '\n'))
class PythonItemExporter(BaseItemExporter):
"""This is a base class for item exporters that extends
:class:`BaseItemExporter` with support for nested items.
It serializes items to built-in Python types, so that any serialization
library (e.g. :mod:`json` or msgpack_) can be used on top of it.
.. _msgpack: https://pypi.org/project/msgpack/
"""
def _configure(self, options, dont_fail=False):
self.binary = options.pop('binary', True)
super(PythonItemExporter, self)._configure(options, dont_fail)
if self.binary:
warnings.warn(
"PythonItemExporter will drop support for binary export in the future",
ScrapyDeprecationWarning)
if not self.encoding:
self.encoding = 'utf-8'
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._serialize_value)
return serializer(value)
def _serialize_value(self, value):
if isinstance(value, BaseItem):
return self.export_item(value)
if isinstance(value, dict):
return dict(self._serialize_dict(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
encode_func = to_bytes if self.binary else to_unicode
if isinstance(value, (str, bytes)):
return encode_func(value, encoding=self.encoding)
return value
def _serialize_dict(self, value):
for key, val in value.items():
key = to_bytes(key) if self.binary else key
yield key, self._serialize_value(val)
def export_item(self, item):
result = dict(self._get_serialized_fields(item))
if self.binary:
result = dict(self._serialize_dict(result))
return result
|
the-stack_0_5187 | from __future__ import print_function
from PIL import Image
from os.path import join
import os
import torch.utils.data as data
from utils import download_url, check_integrity, list_dir, list_files
import torch
import torchvision
from torchvision import transforms
from sampler import RandSubClassSampler
class Omniglot(data.Dataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = 'omniglot-py'
download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python'
zips_md5 = {
'images_background': '68d2efa1b9178cc56df9314c21c6e718',
'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'
}
def __init__(self, root, background=True,
transform=None, target_transform=None,
download=False, deform=None):
self.root = join(os.path.expanduser(root), self.folder)
self.background = background
self.transform = transform
self.deform = deform
self.target_transform = target_transform
if download:
self.download()
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))]
for a in self._alphabets], [])
self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')]
for idx, character in enumerate(self._characters)]
self._flat_character_images = sum(self._character_images, [])
def __len__(self):
return len(self._flat_character_images)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode='r').convert('L')
image_d = image
if self.deform is not None:
image_d = self.deform(image_d)
if self.transform:
image = self.transform(image)
image_d = self.transform(image_d)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, image_d, character_class, index
def _check_integrity(self):
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]):
return False
return True
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
filename = self._get_target_folder()
zip_filename = filename + '.zip'
url = self.download_url_prefix + '/' + zip_filename
download_url(url, self.root, zip_filename, self.zips_md5[filename])
print('Extracting downloaded file: ' + join(self.root, zip_filename))
with zipfile.ZipFile(join(self.root, zip_filename), 'r') as zip_file:
zip_file.extractall(self.root)
def _get_target_folder(self):
if self.background == 'images_background_train' or self.background == 'images_background_val':
return self.background
return 'images_background' if self.background else 'images_evaluation'
def Omniglot_loader(batch_size, num_workers=2, root='../data'):
binary_flip = transforms.Lambda(lambda x: 1 - x)
normalize = transforms.Normalize((0.086,), (0.235,))
train_dataset = Omniglot(
root=root, download=True, background=True,
transform=transforms.Compose(
[transforms.RandomResizedCrop(32, (0.85, 1.)),
transforms.ToTensor(),
binary_flip,
normalize]
))
train_length = len(train_dataset)
train_imgid2cid = [train_dataset[i][2] for i in range(train_length)] # train_dataset[i] returns (img, cid)
# Randomly select 20 characters from 964. By default setting (batch_size=100), each character has 5 images in a mini-batch.
train_sampler = RandSubClassSampler(
inds=range(train_length),
labels=train_imgid2cid,
cls_per_batch=20,
batch_size=batch_size,
num_batch=train_length//batch_size)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, sampler=train_sampler)
train_loader.num_classes = 964
test_dataset = Omniglot(
root=root, download=True, background=False,
transform=transforms.Compose(
[transforms.Resize(32),
transforms.ToTensor(),
binary_flip,
normalize]
))
eval_length = len(test_dataset)
eval_imgid2cid = [test_dataset[i][2] for i in range(eval_length)]
eval_sampler = RandSubClassSampler(
inds=range(eval_length),
labels=eval_imgid2cid,
cls_per_batch=20,
batch_size=batch_size,
num_batch=eval_length // batch_size)
eval_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, sampler=eval_sampler)
eval_loader.num_classes = 659
return train_loader, eval_loader
def Omniglot_bg_loader(batch_size, num_workers=2, train_cls_per_batch=20, test_cls_per_batch=20, root='../data'):
binary_flip = transforms.Lambda(lambda x: 1 - x)
normalize = transforms.Normalize((0.086,), (0.235,))
train_dataset = Omniglot(
root=root, download=False, background='images_background_train',
transform=transforms.Compose(
[transforms.RandomResizedCrop(32, (0.85, 1.)),
transforms.ToTensor(),
binary_flip,
normalize]
))
if train_cls_per_batch is not None:
train_length = len(train_dataset)
train_imgid2cid = [train_dataset[i][2] for i in range(train_length)]
train_sampler = RandSubClassSampler(
inds=range(train_length),
labels=train_imgid2cid,
cls_per_batch=train_cls_per_batch,
batch_size=batch_size,
num_batch=train_length//batch_size)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, sampler=train_sampler)
else:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
train_loader.num_classes = 964 - 169
test_dataset = Omniglot(
root=root, download=False, background='images_background_val',
transform=transforms.Compose(
[transforms.Resize(32),
transforms.ToTensor(),
binary_flip,
normalize]
))
if test_cls_per_batch is not None:
eval_length = len(test_dataset)
eval_imgid2cid = [test_dataset[i][2] for i in range(eval_length)]
eval_sampler = RandSubClassSampler(
inds=range(eval_length),
labels=eval_imgid2cid,
cls_per_batch=test_cls_per_batch,
batch_size=batch_size,
num_batch=eval_length // batch_size)
eval_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, sampler=eval_sampler)
else:
eval_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers)
eval_loader.num_classes = 169
return train_loader, eval_loader
def omniglot_alphabet_func(alphabet, background, root='../data'):
def create_alphabet_dataset(batch_size, num_workers=2):
# This dataset is only for unsupervised clustering
# train_dataset (with data augmentation) is used during the optimization of clustering criteria
# test_dataset (without data augmentation) is used after the clustering is converged
binary_flip = transforms.Lambda(lambda x: 1 - x)
normalize = transforms.Normalize((0.086,), (0.235,))
train_dataset = Omniglot(
root=root, download=True, background=background,
transform=transforms.Compose(
[transforms.Resize(32),
transforms.ToTensor(),
binary_flip,
normalize]
),
deform=transforms.Compose([
transforms.RandomAffine(
degrees = (-5, 5),
translate = (0.1, 0.1),
scale = (0.8, 1.2),
shear = (-10, 10),
fillcolor = 255)
])
)
# Following part dependents on the internal implementation of official Omniglot dataset loader
# Only use the images which has alphabet-name in their path name (_characters[cid])
valid_flat_character_images = [(imgname,cid) for imgname,cid in train_dataset._flat_character_images if alphabet in train_dataset._characters[cid]]
ndata = len(valid_flat_character_images) # The number of data after filtering
train_imgid2cid = [valid_flat_character_images[i][1] for i in range(ndata)] # The tuple (valid_flat_character_images[i]) are (img, cid)
cid_set = set(train_imgid2cid) # The labels are not 0..c-1 here.
cid2ncid = {cid:ncid for ncid,cid in enumerate(cid_set)} # Create the mapping table for New cid (ncid)
valid_characters = {cid2ncid[cid]:train_dataset._characters[cid] for cid in cid_set}
for i in range(ndata): # Convert the labels to make sure it has the value {0..c-1}
valid_flat_character_images[i] = (valid_flat_character_images[i][0],cid2ncid[valid_flat_character_images[i][1]])
# Apply surgery to the dataset
train_dataset._flat_character_images = valid_flat_character_images
train_dataset._characters = valid_characters
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
train_loader.num_classes = len(cid_set)
test_dataset = Omniglot(
root=root, download=True, background=background,
transform=transforms.Compose(
[transforms.Resize(32),
transforms.ToTensor(),
binary_flip,
normalize]
))
# Apply surgery to the dataset
test_dataset._flat_character_images = valid_flat_character_images # Set the new list to the dataset
test_dataset._characters = valid_characters
eval_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers)
eval_loader.num_classes = train_loader.num_classes
print('=> Alphabet %s has %d characters and %d images.'%(alphabet, train_loader.num_classes, len(train_dataset)))
return train_loader, eval_loader
return create_alphabet_dataset
omniglot_background_alphabets=[
'Alphabet_of_the_Magi',
'Gujarati',
'Anglo-Saxon_Futhorc',
'Hebrew',
'Arcadian',
'Inuktitut_(Canadian_Aboriginal_Syllabics)',
'Armenian',
'Japanese_(hiragana)',
'Asomtavruli_(Georgian)',
'Japanese_(katakana)',
'Balinese',
'Korean',
'Bengali',
'Latin',
'Blackfoot_(Canadian_Aboriginal_Syllabics)',
'Malay_(Jawi_-_Arabic)',
'Braille',
'Mkhedruli_(Georgian)',
'Burmese_(Myanmar)',
'N_Ko',
'Cyrillic',
'Ojibwe_(Canadian_Aboriginal_Syllabics)',
'Early_Aramaic',
'Sanskrit',
'Futurama',
'Syriac_(Estrangelo)',
'Grantha',
'Tagalog',
'Greek',
'Tifinagh'
]
omniglot_background_val_alphabets=[
'Alphabet_of_the_Magi',
'Japanese_(katakana)',
'Latin',
'Cyrillic',
'Grantha'
]
omniglot_evaluation_alphabets_mapping = {
'Malayalam':'Malayalam',
'Kannada':'Kannada',
'Syriac':'Syriac_(Serto)',
'Atemayar_Qelisayer':'Atemayar_Qelisayer',
'Gurmukhi':'Gurmukhi',
'Old_Church_Slavonic':'Old_Church_Slavonic_(Cyrillic)',
'Manipuri':'Manipuri',
'Atlantean':'Atlantean',
'Sylheti':'Sylheti',
'Mongolian':'Mongolian',
'Aurek':'Aurek-Besh',
'Angelic':'Angelic',
'ULOG':'ULOG',
'Oriya':'Oriya',
'Avesta':'Avesta',
'Tibetan':'Tibetan',
'Tengwar':'Tengwar',
'Keble':'Keble',
'Ge_ez':'Ge_ez',
'Glagolitic':'Glagolitic'
}
# Create the functions to access the individual alphabet dataset in Omniglot
for funcName, alphabetStr in omniglot_evaluation_alphabets_mapping.items():
locals()['Omniglot_eval_' + funcName] = omniglot_alphabet_func(alphabet=alphabetStr, background=False)
def show_batch(inp, title=None):
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Qt5Agg")
"""Show batch"""
inp = inp.numpy().transpose((1, 2, 0))
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.show()
raw_input()
if __name__ == '__main__':
import numpy as np
train_loader, eval_loader = Omniglot_loader(batch_size=10, num_workers=2, root='./data_shallow14/datasets')
print('len', len(train_loader.dataset), len(eval_loader.dataset))
img, img_d, target, idx = next(iter(train_loader))
print(target, idx)
print(len(np.unique(target)))
out = torchvision.utils.make_grid(img_d)
show_batch(out, title=target)
|
the-stack_0_5188 | # -*- coding: utf-8 -*-
"""Cisco DNA Center ComplianceDetailsOfDevice data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorB70E1B6A2F51A59690669A4B2Fd3F0(object):
"""ComplianceDetailsOfDevice request schema definition."""
def __init__(self):
super(JSONSchemaValidatorB70E1B6A2F51A59690669A4B2Fd3F0, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"deviceUuid": {
"type": "string"
},
"response": {
"items": {
"properties": {
"additionalDataURL": {
"type": "string"
},
"category": {
"type": "string"
},
"complianceType": {
"type": "string"
},
"deviceUuid": {
"type": "string"
},
"displayName": {
"type": "string"
},
"lastSyncTime": {
"type": "string"
},
"lastUpdateTime": {
"type": "string"
},
"message": {
"type": "string"
},
"sourceInfoList": {
"items": {
"properties": {
"appName": {
"type": "string"
},
"businessKey": {
"properties": {
"businessKeyAttributes": {
"type": "string"
},
"otherAttributes": {
"properties": {
"cfsAttributes": {
"type": "string"
},
"name": {
"type": "string"
}
},
"type": "object"
},
"resourceName": {
"type": "string"
}
},
"type": "object"
},
"count": {
"type": "number"
},
"diffList": {
"items": {
"properties": {
"businessKey": {
"type": "string"
},
"configuredValue": {
"type": "string"
},
"displayName": {
"type": "string"
},
"extendedAttributes": {
"type": "string"
},
"intendedValue": {
"type": "string"
},
"moveFromPath": {
"type": "string"
},
"op": {
"type": "string"
},
"path": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"displayName": {
"type": "string"
},
"licenseAppName": {
"type": "string"
},
"name": {
"type": "string"
},
"nameWithBusinessKey": {
"type": "string"
},
"networkProfileName": {
"type": "string"
},
"provisioningArea": {
"type": "string"
},
"sourceEnum": {
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"state": {
"type": "string"
},
"status": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"version": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
the-stack_0_5190 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=arguments-differ
"""Contains a (slow) Python simulator.
It simulates a qasm quantum circuit (an experiment) that has been compiled
to run on the simulator. It is exponential in the number of qubits.
The simulator is run using
.. code-block:: python
QasmSimulatorPy().run(qobj)
Where the input is a Qobj object and the output is a BasicAerJob object, which can
later be queried for the Result object. The result will contain a 'memory' data
field, which is a result of measurements for each shot.
"""
import uuid
import time
import logging
from math import log2
from collections import Counter
import numpy as np
from qiskit.util import local_hardware_info
from qiskit.providers.models import QasmBackendConfiguration
from qiskit.result import Result
from qiskit.providers import BaseBackend
from qiskit.providers.basicaer.basicaerjob import BasicAerJob
from .exceptions import BasicAerError
from .basicaertools import single_gate_matrix
from .basicaertools import cx_gate_matrix
from .basicaertools import einsum_vecmul_index
logger = logging.getLogger(__name__)
class QasmSimulatorPy(BaseBackend):
"""Python implementation of a qasm simulator."""
MAX_QUBITS_MEMORY = int(log2(local_hardware_info()['memory'] * (1024 ** 3) / 16))
DEFAULT_CONFIGURATION = {
'backend_name': 'qasm_simulator',
'backend_version': '2.0.0',
'n_qubits': min(24, MAX_QUBITS_MEMORY),
'url': 'https://github.com/Qiskit/qiskit-terra',
'simulator': True,
'local': True,
'conditional': True,
'open_pulse': False,
'memory': True,
'max_shots': 65536,
'coupling_map': None,
'description': 'A python simulator for qasm experiments',
'basis_gates': ['u1', 'u2', 'u3', 'cx', 'id', 'unitary'],
'gates': [
{
'name': 'u1',
'parameters': ['lambda'],
'qasm_def': 'gate u1(lambda) q { U(0,0,lambda) q; }'
},
{
'name': 'u2',
'parameters': ['phi', 'lambda'],
'qasm_def': 'gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }'
},
{
'name': 'u3',
'parameters': ['theta', 'phi', 'lambda'],
'qasm_def': 'gate u3(theta,phi,lambda) q { U(theta,phi,lambda) q; }'
},
{
'name': 'cx',
'parameters': ['c', 't'],
'qasm_def': 'gate cx c,t { CX c,t; }'
},
{
'name': 'id',
'parameters': ['a'],
'qasm_def': 'gate id a { U(0,0,0) a; }'
},
{
'name': 'unitary',
'parameters': ['matrix'],
'qasm_def': 'unitary(matrix) q1, q2,...'
}
]
}
DEFAULT_OPTIONS = {
"initial_statevector": None,
"chop_threshold": 1e-15
}
# Class level variable to return the final state at the end of simulation
# This should be set to True for the statevector simulator
SHOW_FINAL_STATE = False
def __init__(self, configuration=None, provider=None):
super().__init__(configuration=(
configuration or QasmBackendConfiguration.from_dict(self.DEFAULT_CONFIGURATION)),
provider=provider)
# Define attributes in __init__.
self._local_random = np.random.RandomState()
self._classical_memory = 0
self._classical_register = 0
self._statevector = 0
self._number_of_cmembits = 0
self._number_of_qubits = 0
self._shots = 0
self._memory = False
self._initial_statevector = self.DEFAULT_OPTIONS["initial_statevector"]
self._chop_threshold = self.DEFAULT_OPTIONS["chop_threshold"]
self._qobj_config = None
# TEMP
self._sample_measure = False
def _add_unitary(self, gate, qubits):
"""Apply an N-qubit unitary matrix.
Args:
gate (matrix_like): an N-qubit unitary matrix
qubits (list): the list of N-qubits.
"""
# Get the number of qubits
num_qubits = len(qubits)
# Compute einsum index string for 1-qubit matrix multiplication
indexes = einsum_vecmul_index(qubits, self._number_of_qubits)
# Convert to complex rank-2N tensor
gate_tensor = np.reshape(np.array(gate, dtype=complex),
num_qubits * [2, 2])
# Apply matrix multiplication
self._statevector = np.einsum(indexes, gate_tensor, self._statevector,
dtype=complex, casting='no')
def _get_measure_outcome(self, qubit):
"""Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome.
"""
# Axis for numpy.sum to compute probabilities
axis = list(range(self._number_of_qubits))
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis))
# Compute einsum index string for 1-qubit matrix multiplication
random_number = self._local_random.rand()
if random_number < probabilities[0]:
return '0', probabilities[0]
# Else outcome was '1'
return '1', probabilities[1]
def _add_sample_measure(self, measure_params, num_samples):
"""Generate memory samples from current statevector.
Args:
measure_params (list): List of (qubit, cmembit) values for
measure instructions to sample.
num_samples (int): The number of memory samples to generate.
Returns:
list: A list of memory values in hex format.
"""
# Get unique qubits that are actually measured and sort in
# ascending order
measured_qubits = sorted(list({qubit for qubit, cmembit in measure_params}))
num_measured = len(measured_qubits)
# We use the axis kwarg for numpy.sum to compute probabilities
# this sums over all non-measured qubits to return a vector
# of measure probabilities for the measured qubits
axis = list(range(self._number_of_qubits))
for qubit in reversed(measured_qubits):
# Remove from largest qubit to smallest so list position is correct
# with respect to position from end of the list
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.reshape(np.sum(np.abs(self._statevector) ** 2,
axis=tuple(axis)),
2 ** num_measured)
# Generate samples on measured qubits as ints with qubit
# position in the bit-string for each int given by the qubit
# position in the sorted measured_qubits list
samples = self._local_random.choice(range(2 ** num_measured),
num_samples, p=probabilities)
# Convert the ints to bitstrings
memory = []
for sample in samples:
classical_memory = self._classical_memory
for qubit, cmembit in measure_params:
pos = measured_qubits.index(qubit)
qubit_outcome = int((sample & (1 << pos)) >> pos)
membit = 1 << cmembit
classical_memory = (classical_memory & (~membit)) | (qubit_outcome << cmembit)
value = bin(classical_memory)[2:]
memory.append(hex(int(value, 2)))
return memory
def _add_qasm_measure(self, qubit, cmembit, cregbit=None):
"""Apply a measure instruction to a qubit.
Args:
qubit (int): qubit is the qubit measured.
cmembit (int): is the classical memory bit to store outcome in.
cregbit (int, optional): is the classical register bit to store outcome in.
"""
# get measure outcome
outcome, probability = self._get_measure_outcome(qubit)
# update classical state
membit = 1 << cmembit
self._classical_memory = (self._classical_memory & (~membit)) | (int(outcome) << cmembit)
if cregbit is not None:
regbit = 1 << cregbit
self._classical_register = \
(self._classical_register & (~regbit)) | (int(outcome) << cregbit)
# update quantum state
if outcome == '0':
update_diag = [[1 / np.sqrt(probability), 0], [0, 0]]
else:
update_diag = [[0, 0], [0, 1 / np.sqrt(probability)]]
# update classical state
self._add_unitary(update_diag, [qubit])
def _add_qasm_reset(self, qubit):
"""Apply a reset instruction to a qubit.
Args:
qubit (int): the qubit being rest
This is done by doing a simulating a measurement
outcome and projecting onto the outcome state while
renormalizing.
"""
# get measure outcome
outcome, probability = self._get_measure_outcome(qubit)
# update quantum state
if outcome == '0':
update = [[1 / np.sqrt(probability), 0], [0, 0]]
self._add_unitary(update, [qubit])
else:
update = [[0, 1 / np.sqrt(probability)], [0, 0]]
self._add_unitary(update, [qubit])
def _validate_initial_statevector(self):
"""Validate an initial statevector"""
# If initial statevector isn't set we don't need to validate
if self._initial_statevector is None:
return
# Check statevector is correct length for number of qubits
length = len(self._initial_statevector)
required_dim = 2 ** self._number_of_qubits
if length != required_dim:
raise BasicAerError('initial statevector is incorrect length: ' +
'{} != {}'.format(length, required_dim))
def _set_options(self, qobj_config=None, backend_options=None):
"""Set the backend options for all experiments in a qobj"""
# Reset default options
self._initial_statevector = self.DEFAULT_OPTIONS["initial_statevector"]
self._chop_threshold = self.DEFAULT_OPTIONS["chop_threshold"]
if backend_options is None:
backend_options = {}
# Check for custom initial statevector in backend_options first,
# then config second
if 'initial_statevector' in backend_options:
self._initial_statevector = np.array(backend_options['initial_statevector'],
dtype=complex)
elif hasattr(qobj_config, 'initial_statevector'):
self._initial_statevector = np.array(qobj_config.initial_statevector,
dtype=complex)
if self._initial_statevector is not None:
# Check the initial statevector is normalized
norm = np.linalg.norm(self._initial_statevector)
if round(norm, 12) != 1:
raise BasicAerError('initial statevector is not normalized: ' +
'norm {} != 1'.format(norm))
# Check for custom chop threshold
# Replace with custom options
if 'chop_threshold' in backend_options:
self._chop_threshold = backend_options['chop_threshold']
elif hasattr(qobj_config, 'chop_threshold'):
self._chop_threshold = qobj_config.chop_threshold
def _initialize_statevector(self):
"""Set the initial statevector for simulation"""
if self._initial_statevector is None:
# Set to default state of all qubits in |0>
self._statevector = np.zeros(2 ** self._number_of_qubits,
dtype=complex)
self._statevector[0] = 1
else:
self._statevector = self._initial_statevector.copy()
# Reshape to rank-N tensor
self._statevector = np.reshape(self._statevector,
self._number_of_qubits * [2])
def _get_statevector(self):
"""Return the current statevector"""
vec = np.reshape(self._statevector, 2 ** self._number_of_qubits)
vec[abs(vec) < self._chop_threshold] = 0.0
return vec
def _validate_measure_sampling(self, experiment):
"""Determine if measure sampling is allowed for an experiment
Args:
experiment (QobjExperiment): a qobj experiment.
"""
# If shots=1 we should disable measure sampling.
# This is also required for statevector simulator to return the
# correct final statevector without silently dropping final measurements.
if self._shots <= 1:
self._sample_measure = False
return
# Check for config flag
if hasattr(experiment.config, 'allows_measure_sampling'):
self._sample_measure = experiment.config.allows_measure_sampling
# If flag isn't found do a simple test to see if a circuit contains
# no reset instructions, and no gates instructions after
# the first measure.
else:
measure_flag = False
for instruction in experiment.instructions:
# If circuit contains reset operations we cannot sample
if instruction.name == "reset":
self._sample_measure = False
return
# If circuit contains a measure option then we can
# sample only if all following operations are measures
if measure_flag:
# If we find a non-measure instruction
# we cannot do measure sampling
if instruction.name not in ["measure", "barrier", "id", "u0"]:
self._sample_measure = False
return
elif instruction.name == "measure":
measure_flag = True
# If we made it to the end of the circuit without returning
# measure sampling is allowed
self._sample_measure = True
def run(self, qobj, backend_options=None):
"""Run qobj asynchronously.
Args:
qobj (Qobj): payload of the experiment
backend_options (dict): backend options
Returns:
BasicAerJob: derived from BaseJob
Additional Information:
backend_options: Is a dict of options for the backend. It may contain
* "initial_statevector": vector_like
The "initial_statevector" option specifies a custom initial
initial statevector for the simulator to be used instead of the all
zero state. This size of this vector must be correct for the number
of qubits in all experiments in the qobj.
Example::
backend_options = {
"initial_statevector": np.array([1, 0, 0, 1j]) / np.sqrt(2),
}
"""
self._set_options(qobj_config=qobj.config,
backend_options=backend_options)
job_id = str(uuid.uuid4())
job = BasicAerJob(self, job_id, self._run_job, qobj)
job.submit()
return job
def _run_job(self, job_id, qobj):
"""Run experiments in qobj
Args:
job_id (str): unique id for the job.
qobj (Qobj): job description
Returns:
Result: Result object
"""
self._validate(qobj)
result_list = []
self._shots = qobj.config.shots
self._memory = getattr(qobj.config, 'memory', False)
self._qobj_config = qobj.config
start = time.time()
for experiment in qobj.experiments:
result_list.append(self.run_experiment(experiment))
end = time.time()
result = {'backend_name': self.name(),
'backend_version': self._configuration.backend_version,
'qobj_id': qobj.qobj_id,
'job_id': job_id,
'results': result_list,
'status': 'COMPLETED',
'success': True,
'time_taken': (end - start),
'header': qobj.header.to_dict()}
return Result.from_dict(result)
def run_experiment(self, experiment):
"""Run an experiment (circuit) and return a single experiment result.
Args:
experiment (QobjExperiment): experiment from qobj experiments list
Returns:
dict: A result dictionary which looks something like::
{
"name": name of this experiment (obtained from qobj.experiment header)
"seed": random seed used for simulation
"shots": number of shots used in the simulation
"data":
{
"counts": {'0x9: 5, ...},
"memory": ['0x9', '0xF', '0x1D', ..., '0x9']
},
"status": status string for the simulation
"success": boolean
"time_taken": simulation time of this single experiment
}
Raises:
BasicAerError: if an error occurred.
"""
start = time.time()
self._number_of_qubits = experiment.config.n_qubits
self._number_of_cmembits = experiment.config.memory_slots
self._statevector = 0
self._classical_memory = 0
self._classical_register = 0
self._sample_measure = False
# Validate the dimension of initial statevector if set
self._validate_initial_statevector()
# Get the seed looking in circuit, qobj, and then random.
if hasattr(experiment.config, 'seed_simulator'):
seed_simulator = experiment.config.seed_simulator
elif hasattr(self._qobj_config, 'seed_simulator'):
seed_simulator = self._qobj_config.seed_simulator
else:
# For compatibility on Windows force dyte to be int32
# and set the maximum value to be (2 ** 31) - 1
seed_simulator = np.random.randint(2147483647, dtype='int32')
self._local_random.seed(seed=seed_simulator)
# Check if measure sampling is supported for current circuit
self._validate_measure_sampling(experiment)
# List of final counts for all shots
memory = []
# Check if we can sample measurements, if so we only perform 1 shot
# and sample all outcomes from the final state vector
if self._sample_measure:
shots = 1
# Store (qubit, cmembit) pairs for all measure ops in circuit to
# be sampled
measure_sample_ops = []
else:
shots = self._shots
for _ in range(shots):
self._initialize_statevector()
# Initialize classical memory to all 0
self._classical_memory = 0
self._classical_register = 0
for operation in experiment.instructions:
conditional = getattr(operation, 'conditional', None)
if isinstance(conditional, int):
conditional_bit_set = (self._classical_register >> conditional) & 1
if not conditional_bit_set:
continue
elif conditional is not None:
mask = int(operation.conditional.mask, 16)
if mask > 0:
value = self._classical_memory & mask
while (mask & 0x1) == 0:
mask >>= 1
value >>= 1
if value != int(operation.conditional.val, 16):
continue
# Check if single gate
if operation.name == 'unitary':
qubits = operation.qubits
gate = operation.params[0]
self._add_unitary(gate, qubits)
elif operation.name in ('U', 'u1', 'u2', 'u3'):
params = getattr(operation, 'params', None)
qubit = operation.qubits[0]
gate = single_gate_matrix(operation.name, params)
self._add_unitary(gate, [qubit])
# Check if CX gate
elif operation.name in ('id', 'u0'):
pass
elif operation.name in ('CX', 'cx'):
qubit0 = operation.qubits[0]
qubit1 = operation.qubits[1]
gate = cx_gate_matrix()
self._add_unitary(gate, [qubit0, qubit1])
# Check if reset
elif operation.name == 'reset':
qubit = operation.qubits[0]
self._add_qasm_reset(qubit)
# Check if barrier
elif operation.name == 'barrier':
pass
# Check if measure
elif operation.name == 'measure':
qubit = operation.qubits[0]
cmembit = operation.memory[0]
cregbit = operation.register[0] if hasattr(operation, 'register') else None
if self._sample_measure:
# If sampling measurements record the qubit and cmembit
# for this measurement for later sampling
measure_sample_ops.append((qubit, cmembit))
else:
# If not sampling perform measurement as normal
self._add_qasm_measure(qubit, cmembit, cregbit)
elif operation.name == 'bfunc':
mask = int(operation.mask, 16)
relation = operation.relation
val = int(operation.val, 16)
cregbit = operation.register
cmembit = operation.memory if hasattr(operation, 'memory') else None
compared = (self._classical_register & mask) - val
if relation == '==':
outcome = (compared == 0)
elif relation == '!=':
outcome = (compared != 0)
elif relation == '<':
outcome = (compared < 0)
elif relation == '<=':
outcome = (compared <= 0)
elif relation == '>':
outcome = (compared > 0)
elif relation == '>=':
outcome = (compared >= 0)
else:
raise BasicAerError('Invalid boolean function relation.')
# Store outcome in register and optionally memory slot
regbit = 1 << cregbit
self._classical_register = \
(self._classical_register & (~regbit)) | (int(outcome) << cregbit)
if cmembit is not None:
membit = 1 << cmembit
self._classical_memory = \
(self._classical_memory & (~membit)) | (int(outcome) << cmembit)
else:
backend = self.name()
err_msg = '{0} encountered unrecognized operation "{1}"'
raise BasicAerError(err_msg.format(backend, operation.name))
# Add final creg data to memory list
if self._number_of_cmembits > 0:
if self._sample_measure:
# If sampling we generate all shot samples from the final statevector
memory = self._add_sample_measure(measure_sample_ops, self._shots)
else:
# Turn classical_memory (int) into bit string and pad zero for unused cmembits
outcome = bin(self._classical_memory)[2:]
memory.append(hex(int(outcome, 2)))
# Add data
data = {'counts': dict(Counter(memory))}
# Optionally add memory list
if self._memory:
data['memory'] = memory
# Optionally add final statevector
if self.SHOW_FINAL_STATE:
data['statevector'] = self._get_statevector()
# Remove empty counts and memory for statevector simulator
if not data['counts']:
data.pop('counts')
if 'memory' in data and not data['memory']:
data.pop('memory')
end = time.time()
return {'name': experiment.header.name,
'seed_simulator': seed_simulator,
'shots': self._shots,
'data': data,
'status': 'DONE',
'success': True,
'time_taken': (end - start),
'header': experiment.header.to_dict()}
def _validate(self, qobj):
"""Semantic validations of the qobj which cannot be done via schemas."""
n_qubits = qobj.config.n_qubits
max_qubits = self.configuration().n_qubits
if n_qubits > max_qubits:
raise BasicAerError('Number of qubits {} '.format(n_qubits) +
'is greater than maximum ({}) '.format(max_qubits) +
'for "{}".'.format(self.name()))
for experiment in qobj.experiments:
name = experiment.header.name
if experiment.config.memory_slots == 0:
logger.warning('No classical registers in circuit "%s", '
'counts will be empty.', name)
elif 'measure' not in [op.name for op in experiment.instructions]:
logger.warning('No measurements in circuit "%s", '
'classical register will remain all zeros.', name)
|
the-stack_0_5191 | # THIS IS PART 2 SINCE I SKIPPED BADLIBS!
from random import randint
import sys
guess_this_number = randint(1,10)
guess = 0
guesses = 0
clue = ""
first_round = True
while guess != guess_this_number:
if first_round == True:
guess = int(input("Enter an integer number: "))
first_round = False;
else:
print("- - - - - - - - - - - - - - - -")
print(guess, clue)
guess = int(input("Guess again: "))
if (guess < guess_this_number):
clue = "is too low!"
else:
clue = "is too high!"
guesses += 1
print("- - - - - - - - - - - - - - - - - - - - - - - -")
print(guess_this_number, "is correct and you made it in ", guesses, " guesses!")
print("- - - - - - - - - - - - - - - - - - - - - - - -")
|
the-stack_0_5192 | import os
import subprocess
import sys
from functools import partial
from sofa_config import *
from sofa_print import *
def sofa_viz(cfg):
sofa_home = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(
['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)])
subprocess.Popen(['sleep', '2'])
print_warning(
'If your rendering timeline is slow, please try \033[4msofa report --plot_ratio=10\033[24m to downsample scatter points,')
print_warning('and then \033[4msofa viz\033[24m to see the downsampled results.')
print_hint('SOFA Vlization is listening on port \033[4m\033[97mhttp://localhost:%d\033[24m\033[0m\033[24m' % (cfg.viz_port) )
print_hint('To change port, please run command: \033[4msofa viz --viz_port=PortNumber\033[24m')
print_hint('Please open your browser to start profiling.')
print_hint('After profiling, please enter Ctrl+C to exit.')
os.system(
'cd %s && python3.6 -m http.server %d 2>&1 1> /dev/null; cd -' %
(cfg.logdir,cfg.viz_port))
|
the-stack_0_5193 | import traceback
from queue import Empty
from queue import Queue
from threading import Thread
from .promise import Promise
class Task(object):
"""
Task runs a python function `target` when called.
"""
def __init__(self, target, *args, **kwargs):
"""Initialize the Task object."""
self.target = target
self.args = args
self.kwargs = kwargs
def run(self):
self.target(*self.args, **self.kwargs)
class TaskQueue(Thread):
"""
A background thread to start all queued processes one after another.
"""
def __init__(self):
super().__init__(daemon=True)
self.queue = Queue()
self.active_task = None
self.running = False
def __del__(self):
self.running = False
def execute(self, task):
self.queue.put(task)
def cancel_all(self):
try:
while not self.Empty():
self.queue.get_nowait()
self.queue.task_done()
except Empty:
pass
def busy(self):
result = False
with self._block:
result = self.active_task is not None
return result
def run(self):
self.running = True
while self.running:
task = self.queue.get()
with self._block:
self.active_task = task
try:
task.run()
except:
traceback.print_exc()
finally:
self.queue.task_done()
with self._block:
self.active_task = None
_tasks = TaskQueue()
_tasks.start()
def busy():
return _tasks.busy()
def execute_async(func, *args, **kwargs):
return Promise(lambda resolve_fn: _tasks.execute(
Task(func, resolve_fn, *args, **kwargs)))
def cancel_all():
_tasks.cancel_all()
|
the-stack_0_5195 | #coding:utf-8
import pyglet
window = pyglet.window.Window()
label = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=36,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.app.run() |
the-stack_0_5196 |
from env import *
from replayBuffer import *
from params import *
env = HyperGraphEnv()
tf_env = TFPyEnvironment(env)
#hypermaramters
fc_layer_params=[64,64,64,64,64,64]
q_net = QRnnNetwork(tf_env.observation_spec(), tf_env.action_spec(), lstm_size=(16,))
q_net_2 = q_net = QNetwork(
tf_env.observation_spec(),
tf_env.action_spec(),
fc_layer_params=fc_layer_params)
#agent
train_step = tf.Variable(0)
#optimizer = tf.keras.optimizers.RMSprop(learning_rate=2.5e-4, rho=0.95, momentum=0.0, epsilon=0.00001, centered= True)
optimizer = tf.keras.optimizers.Adam(lr=0.0001)
decay_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate = 1.0,
decay_steps = 25000,
end_learning_rate = 0.03
)
tf_agent = DqnAgent(tf_env.time_step_spec(),
tf_env.action_spec(),
q_network=q_net_2,
optimizer = optimizer,
td_errors_loss_fn = tf.keras.losses.Huber(reduction="none"),
train_step_counter = train_step,
target_update_period = 100,
epsilon_greedy = lambda : decay_fn(train_step))
tf_agent.initialize()
#replay buffer
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
#data_spec = agent.collect_data_spec,
data_spec = tf_agent.collect_data_spec,
batch_size = tf_env.batch_size,
max_length = replay_buffer_capacity
)
replay_buffer_observer = replay_buffer.add_batch
collect_driver = DynamicEpisodeDriver(
tf_env,
tf_agent.collect_policy,
observers=[replay_buffer_observer] + train_metrics,
num_episodes=2)
initial_collect_policy = RandomTFPolicy(tf_env.time_step_spec(),
tf_env.action_spec())
init_driver = DynamicEpisodeDriver(
tf_env,
initial_collect_policy,
observers=[replay_buffer.add_batch, ShowProgress(20000)],
num_episodes=1000)
final_time_step, final_policy_state = init_driver.run()
tf.random.set_seed(9) # chosen to show an example of trajectory at the end of an episode
trajectories, buffer_info = next(iter(replay_buffer.as_dataset(
sample_batch_size=2,
num_steps= 2,
single_deterministic_pass=False)))
time_steps, action_steps, next_time_steps = to_transition(trajectories)
time_steps.observation.shape
dataset = replay_buffer.as_dataset(
sample_batch_size=64,
num_steps= 2,
num_parallel_calls=3).prefetch(3)
collect_driver.run = function(collect_driver.run)
tf_agent.train = function(tf_agent.train)
def train_agent(n_iterations):
time_step = None
policy_state = tf_agent.collect_policy.get_initial_state(tf_env.batch_size)
iterator = iter(dataset)
for iteration in range(n_iterations):
time_step, policy_state = collect_driver.run(time_step, policy_state)
trajectories, buffer_info = next(iterator)
train_loss = tf_agent.train(trajectories)
print("\r{} loss:{:.5f}".format(
iteration, train_loss.loss.numpy()), end="")
if iteration % 100 == 0:
log_metrics(train_metrics)
train_agent(n_iterations=500000)
|
the-stack_0_5198 | #!/usr/bin/env python
'''
some index fastq's have a weird number of quality line characters. some have an extra
character; others seem to have a single character.
this script truncates quality lines longer than the sequence line and pads quality
lines that are shorter than the sequence line.
author : scott w olesen <[email protected]>
'''
import argparse, sys, os, itertools
sys.path.append(os.path.normpath(os.path.abspath(__file__) + '/../..'))
if __name__ == '__main__':
# parse command line arguments
parser = argparse.ArgumentParser(description='correct quality line length')
parser.add_argument('fastq', help='input barcode fastq')
parser.add_argument('-z', '--fill_char', default='F', help='fill character (default: F)')
parser.add_argument('-o', '--output', default=sys.stdout, type=argparse.FileType('w'), help='output fastq (default: stdout)')
args = parser.parse_args()
with open(args.fastq) as f:
for four_lines in itertools.izip(*[iter(f)]*4):
at_line, seq_line, plus_line, quality_line = [l.rstrip() for l in four_lines]
ls = len(seq_line)
lq = len(quality_line)
if lq < ls:
quality_line = quality_line.ljust(len(seq_line), args.fill_char)
elif lq > ls:
quality_line = quality_line[0: ls]
args.output.write("\n".join([at_line, seq_line, plus_line, quality_line]) + "\n") |
the-stack_0_5199 | from typing import Optional
import os
from fastapi import FastAPI
app = FastAPI()
# multiple path parameters.
@app.get("/users/{user_id}/items/{item_id}")
async def read_user_item(
user_id: int,
item_id: str,
q: Optional[str] = None,
short: bool = False
):
# http://127.0.0.1:11111/users/1/items/bladeoftheruinedking?q=lifesteal&short=no
item = {"item_id": item_id, "owner_id": user_id}
if q:
item.update({"q": q})
if not short:
item.update(
{"description": "This is an amazing item that has a long description"}
)
return item
# required query parameter
@app.get("/items/{item_id}")
async def read_user_item(item_id: str, needy: str):
# this throws an error saying that the needy query parameter is required
# http://127.0.0.1:11111/items/1
# after filling the needy parameter it works.
# http://127.0.0.1:11111/items/1?needy=filled
item = {"item_id": item_id, "needy": needy}
return item
if __name__ == '__main__':
print(f'INFO: Starting the FASTAPI server...')
print(f'INFO: DOCS on: http://127.0.0.1:11111/docs')
os.system(f"uvicorn {(__file__.split('/')[-1]).split('.')[0]}:app --host 127.0.0.1 --port 11111")
|
the-stack_0_5201 | import platform
import torch
#
from utils.dataset import train_data, test_data
from utils.model import SimpleLinear, SimpleCNN
from train import train
from test import test
if __name__ == '__main__':
# == Setting ==
device = torch.device('cpu')
# == Model ==
model = SimpleCNN()
model = model.to(device)
# == optimizer ==
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters())
# == Main Loop ==
max_acc = 0
max_epoch = 1
# first epoch
# test(model, test_data, device=device)
for epoch in range(1, max_epoch + 1):
train(model, train_data, epoch, criterion, optimizer, device=device)
acc = test(model, test_data, device=device)
if acc > max_acc:
max_acc = acc
torch.save(model.state_dict(), 'checkpoints/best_model.pt')
print('==========Max Acc: {}=========='.format(max_acc))
|
the-stack_0_5202 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 13:08:16 2020
@author: haolinl
"""
import copy
import os
import time
import numpy as np
import random
import scipy.io # For extracting data from .mat file
class inputFileGenerator(object):
"""
Generate input file for Abaqus.
Unit system:
Length: m
Force: N
Pressure: Pa
"""
def __init__(self, data_file_name, write_path, material_type, fix_indices_list, node_variable_name, elem_variable_name, user_prescribed_force_field=[]):
"""
Initialize parameters.
Parameters:
----------
data_file_name: String.
The file path of information of node, element, etc.
write_path: String.
The path to write the inp file.
material_type: String.
The type of material.
Used to indicate whether to consider material nonlinearity.
fix_indices_list: List of ints.
The node indices to be fixed.
node_variable_name: String.
The variable name of the nodes matrix in the data file.
elem_variable_name: String.
The variable name of the elements matrix in the data file.
user_prescribed_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: nSurfI x 3.
Default: [].
"""
# Data & Variables.
self.data_file_name = data_file_name
self.data_mat = scipy.io.loadmat(self.data_file_name)
self._surface_mat = self.data_mat["FaceI"]
self._surface_nodes = self.data_mat["idxSurfI"]
self._surface_nodes_num = self.data_mat["nSurfI"][0,0]
self._outer_surface_regionNum = 1 # Int. The region number of outer surface.
self._outer_surface_nodes_list = self._extractOuterSurfaceNodes(self.data_mat["faces"], self._outer_surface_regionNum) # List of sorted ints. The indices of outer surface nodes. Indexed from 1.
self._outer_surface_nodes_num = len(self._outer_surface_nodes_list)
self._triangle_nodes_list = []
self._coupled_list = []
self._node_variable_name = node_variable_name
self._elem_variable_name = elem_variable_name
self._inputFile_lines_total = []
self.writePath = write_path
self._modulus = 1e7 # Young's modulus. Unit: Pa. Default: 1e7.
self._poisson_ratio = 0.48 # Poisson's ratio. Linear elastic default: 0.3; neo-Hookean default: 0.48.
self._isCoupleOn = False # Boolean. True: use coupling constraint; False: do not use coupling constraint. Must not turn on if applying Laplacian smoothing.
self._coupling_type = "Kinematic" # String. "Kinematic" / "Distributing".
self._coupling_neighbor_layers = 1 # How deep does the neighborhood searching go. Default: 1.
self._isLaplacianSmoothingOn = True # Boolean. True: use laplacian smoothing. False: do not use laplacian smoothing.
self._laplacian_variable_name = "laplacianMatrixI3"
self._massMatrix_variable_name = "massMatrixI3"
self._laplacian_iter_num = 20 # Default: 3.
self._smoothing_rate = 0.1 # Default: 0.1 (Previous: 1e-4).
self.loads_num = 3 # For initial testing.
self._load_sampling_style = "gaussian" # String. Indicating the type of random sampling for force components. "uniform" / "gaussian".
self._load_scale = (0.0, 10.0) # Absolute range of the force for uniform sampling. Case and BC specific. (min, max). Unit: N.
self._gaussian_params = (12.0, 2.4) # Mean and deviation of the force for Gaussian sampling. Case and BC specific. (mean, deviation). Unit: N.
self._load_params_tuple = None
self._initial_force_component_vector = [] # List of floats. Default: []. Example: [5., 5., 5.].
self.autoIncrementNum = 5000 # Int. The maximum increment number of the AutoSolver.
self.initIncrem = 0.001 # Float. The initial length of the increment (for fixed-step, this is also the length per increm).
self.minIncrem = 1e-20 # Float. The minimum increment length for the AutoSolver (ueless for the StaticSolver).
self.maxIncrem = 1.0 # Float. The maximum increment length for the AutoSolver (useless for the StaticSovler).
self.totalTime = 1.0 # Float. The total time for one simulation step.
self.frameNum = 1 # Int. The number of frames intending to extract from the nodal file.
# ================== Load sampling variables ================== #
if self._isCoupleOn: self._couple_region_num = self.loads_num
else: self._couple_region_num = 0
if self._load_sampling_style == "gaussian": self._load_params_tuple = self._gaussian_params
elif self._load_sampling_style == "uniform": self._load_params_tuple = self._load_scale
else:
self._load_sampling_style = "uniform"
self._load_params_tuple = self._load_scale
# ============================================================= #
# Header.
self._header = ["*Heading"]
# Part definition.
self._part_name = "part-1"
self._material_name = "tissue"
self._part_initial = ["*Part, name={}".format(self._part_name)] # Total list of Part definition.
self._node = ["*Node"]
self._elem = ["*Element, type=C3D10"] # Nonlinear tetrahedron. http://web.mit.edu/calculix_v2.7/CalculiX/ccx_2.7/doc/ccx/node33.html#tennode.
self._nset_all = []
self._elset_all = []
self._section = ["*Solid Section, elset=allElems, material={}".format(self._material_name),
","]
self._part_end = ["*End Part"]
self._new_node_list = []
self._new_node_dict = {}
self._node_num = None
self._orig_node_num = None
self._elem_num = None
self._part = self.generatePart()
# Load settings.
self._loads_nset_name_list = []
self._rf_name_list = []
self._rf_nset_name_list = []
self._rf_nsets = []
self._load_nsets = [] # Nset definition of loads.
self._load = self.generateLoadSetting()
# Assembly definition.
self._assembly_name = "assembly-1"
self._instance_name = "instance-1"
self._assembly_initial = ["*Assembly, name={}".format(self._assembly_name)] # Total list of Assembly definition.
self._instance = ["*Instance, name={}, part={}".format(self._instance_name, self._part_name),
"*End Instance"]
self._ref_nodes_list = []
self._fix_nset_name = "fix"
self._fix_indices_list = fix_indices_list
self._fix_nset = self.generateNset(self._fix_indices_list, self._fix_nset_name, self._instance_name) # Nset definition of fix BC.
self._loads_posi_indices_list = self._generateLoadPositions(self.loads_num, self._fix_indices_list) # Generate load positions. Randomly. For fixed mode: style="fix", input_posi_indices_list=[415, 470, 107].
self._laplacian_initial_loads_posi = None # List. Containing the original position of concentrated forces.
self._laplacian_force_field = None # 2D Array of floats. Size: nSurfI * 3. The force field on the outer surface.
self._user_prescribed_force_field = user_prescribed_force_field # List of floats. Size: nSurfI * 3. The prescribed force field on the outer surface. Default: [].
self._surface_list = []
self._coupling_list = []
self._nset_boundary = [] # All nsets definitions in assembly. Boundary conditions
self._assembly_end = ["*End Assembly"]
self._assembly = self.generateAssembly()
# Material.
self.material_type = material_type # String. Indicate material type. "linear"/"neo_hookean_fitting"/"neo_hookean_solid".
self._material_def_file_name = "" # Default: "". If there is a file of stress strain definition, please specify here (must not be "").
self._material = self.generateMaterial(self.material_type)
# Boundary condition.
self._boundary_initial = ["*Boundary"]
self._boundary = self.generateBoundaryCondition_fixAll()
# Step settings.
self.freq = int(self.autoIncrementNum / self.frameNum) # Int. The data frame extraction frequency (also refers to the number of increments. Extract one frame per "self.freq" increments). Especially for StaticSolver case.
self._step = ["*Step, name=step-1, nlgeom=YES, inc={}".format(self.autoIncrementNum),
"*Static",
"{}, {}, {}, {}".format(self.initIncrem, self.totalTime,
self.minIncrem, self.maxIncrem)] # Auto solver.
self._step_end = ["*End Step"]
# Rest settings.
self._restart = ["*Restart, write, frequency=0"]
self._output = ["*Output, field, variable=PRESELECT",
"*Output, history, variable=PRESELECT"]
self._fil = ["*FILE FORMAT, ASCII",
"*node file, frequency={}".format(self.freq),
"U, COORD",
"*El file, frequency={}".format(self.freq),
"S, COORD"]
self._resSettings = self._restart + self._output + self._fil
def readFile(self, read_path):
"""
Read files from specific path.
Parameters:
----------
read_path: String.
Path of the original inp file.
Return:
----------
lines: List of strings.
The list of lines from the file.
"""
with open(read_path, "rt") as f: lines = f.read().splitlines()
return lines
def writeFile(self, write_status):
"""
Write 'self.write_lines' into a new inp file.
Parameters:
----------
write_status: String.
"Normal" / "Fast".
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
"""
if write_status == "Normal":
self._inputFile_lines_total = (self._header + self._part + self._assembly +
self._material + self._boundary + self._step +
self._load + self._resSettings + self._step_end)
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
elif write_status == "Fast":
self._inputFile_lines_total = self._header + self._part
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
else:
self.writeFile("Normal")
def generatePart(self):
"""
Generate part definition.
Returns:
----------
The list collection of all sub-definition lists, including:
part_initial: header part of "Part definition".
node: Node definition.
elem: Element definition.
elset_all: The elset containing all elements. For material definition specifically.
section: Section definition.
part_end: The endline of "Part definition".
"""
self.generateNodes(self.data_mat[self._node_variable_name], self._node)
self.generateElements(self.data_mat[self._elem_variable_name], self._elem)
self.nonlinearization()
# Generate all element elset.
allElem_list, allElem_list_name = [], "allElems"
for i in range(len(self._elem[1:])): allElem_list.append(str(i+1))
self._elset_all = self.generateElset(allElem_list, allElem_list_name)
# Generate Section.
self._section = self.generateSection(allElem_list_name, self._material_name)
# Collection.
return (self._part_initial + self._node + self._elem + self._elset_all +
self._section + self._part_end)
def generateNodes(self, node_mat, target_node_list, specified_indices_list=[]):
"""
Generate nodes information.
Parameters:
----------
node_mat: 2D Array of ints.
The matrix containing the coordinates of the nodes to-be-defined under "*Node".
targer_node_list: List of strings.
The definition of node list.
specified_indices_list (optional): List of ints.
List the indices of the input node list, following the exact order of the node_mat.
Default: [].
"""
for i in range(node_mat.shape[0]):
if specified_indices_list == []: node_list_temp = ["{}".format(i+1)]
else: node_list_temp = ["{}".format(specified_indices_list[i])]
node_list_temp += [str(coord) for coord in list(node_mat[i,:])]
target_node_list.append(', '.join(node_list_temp))
def _extractOuterSurfaceNodes(self, faces_def_matrix, outer_surface_regionNum):
"""
Extract the nodes on the outer surface of the geometry (for force application in next step)
Parameters:
----------
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
outer_surface_nodes_list: List of ints.
The indices of nodes on the outer surface. Indexed from 1. Sorted.
"""
outer_surface_nodes_list = []
for i in range(faces_def_matrix.shape[0]):
if faces_def_matrix[i,0] == outer_surface_regionNum: # The region number of outer surface.
outer_surface_nodes_list += [int(ind) for ind in faces_def_matrix[i,1:]] # Indexed from 1.
outer_surface_nodes_list = list(set(outer_surface_nodes_list))
outer_surface_nodes_list.sort()
return outer_surface_nodes_list
def generateElements(self, elem_mat, target_elem_list, specified_indices_list=[]):
"""
Generate elements information.
Parameters:
----------
elem_mat: 2D Array of ints.
The matrix containing the indices of each element to-be-defined under "*Element".
targer_elem_list: List of strings.
The definition of element list.
specified_indices_list (optional): List of ints.
List the indices of the input element list, following the exact order of the elem_mat.
Default: [].
"""
for i in range(elem_mat.shape[0]):
if specified_indices_list == []: elem_list_temp = ["{}".format(i+1)]
else: elem_list_temp = ["{}".format(specified_indices_list[i])]
elem_line_temp = [str(ind) for ind in list(elem_mat[i,:])]
# Make sure the order of nodes for tetrahedron definition is counter-clockwise, otherwise resulting in negative volume.
ind_temp = elem_line_temp[1]
elem_line_temp[1] = elem_line_temp[2]
elem_line_temp[2] = ind_temp
elem_list_temp += elem_line_temp
target_elem_list.append(', '.join(elem_list_temp))
def generateNset(self, node_list, nset_name, instance_name=None):
"""
Generate node set information.
Parameters:
----------
node_list: List of ints.
The list of nodes to be contained in the node list.
nset_name: String.
The name of the to-be-defined node list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
nset: List of strings.
The definition of a specific nset.
"""
if instance_name == None: nset = ["*Nset, nset={}".format(nset_name)]
else: nset = ["*Nset, nset={}, instance={}".format(nset_name, instance_name)]
nset_line_temp, nset_string_temp = [], None
for i, ind in enumerate(node_list):
nset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
nset_line_temp, nset_string_temp = [], None
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
return nset
def generateElset(self, elem_list, elset_name, instance_name=None):
"""
Generate element set information.
Parameters:
----------
elem_list: List of ints.
The list of elements to be contained in the element list.
elset_name: String.
The name of the to-be-defined element list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
elset: List of strings.
The definition of a specific elset.
"""
if instance_name == None: elset = ["*Elset, elset={}".format(elset_name)]
else: elset = ["*Elset, elset={}, instance={}".format(elset_name, instance_name)]
elset_line_temp, elset_string_temp = [], None
for i, ind in enumerate(elem_list):
elset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
elset_line_temp, elset_string_temp = [], None
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
return elset
def generateSection(self, elset_name, material_name):
"""
Generate section information.
Parameters:
----------
elset_name: String.
The name of the elset to be assigned a section.
material_name: String.
The name of defined material.
Returns:
----------
section: List of strings.
The definition of section.
"""
section = ["*Solid Section, elset={}, material={}".format(elset_name, material_name),
","]
return section
def generateMaterial(self, material_type):
"""
Generate lines for material definition.
Parameters:
----------
material_type: String.
Indicate what type of material is used.
Returns:
----------
material_lines: List of lines.
The lines of material definition.
"""
material_lines = ["*Material, name={}".format(self._material_name)]
if material_type == "neo_hookean_fitting":
stress_strain_lines = self._generateNeoHookeanFitting(self._modulus, (-0.3, 0.3), file_name=self._material_def_file_name)
material_lines += ["*Hyperelastic, neo hooke, test data input, poisson={}".format(self._poisson_ratio),
"*Uniaxial Test Data"]
material_lines += stress_strain_lines
elif material_type == "neo_hookean_solid":
c10 = self._modulus / (4 * (1 + self._poisson_ratio))
d1 = 6 * (1 - 2 * self._poisson_ratio) / self._modulus
material_lines += ["*Hyperelastic, neo hooke",
"{}, {}".format(c10, d1)]
elif material_type == "linear":
material_lines += ["*Elastic",
"{}, {}".format(self._modulus, self._poisson_ratio)]
else: material_lines = self.generateMaterial("linear")
return material_lines
def _generateNeoHookeanFitting(self, modulus, strain_range, file_name=""):
"""
Import/Generate stress strain data for neo-Hookean material fitting.
Parameters:
----------
modulus: Float.
The elastic modulus of material.
strain_range: Tuple of floats.
Range for strain interpolation.
file_name (optional): String.
The name of stress strain data definition file.
Default: "".
Returns:
----------
stress_strain_lines: List of strings.
The lines of stress strain data.
"""
if file_name != "": return self.readFile(file_name)
else:
"""
Assumptions of neo-Hookean formulation:
Incompressible (Poisson's ratio = ~0.5, small deformation).
Undergoing uniaxial loading.
Formulation: sigma = 2*C*(stretch - 1/(stretch^2)).
E = 6*C.
"""
strain_data = np.linspace(strain_range[0], strain_range[1], 100)
stretch_data = strain_data + 1.0
stress_data = (self._modulus / 3.0) * (stretch_data - 1.0 / stretch_data**2) # Formulation.
stress_strain_lines = []
for i in range(len(stress_data)):
stress_strain_lines.append("%.6f, %.6f" % (stress_data[i], strain_data[i]))
return stress_strain_lines
def _generateLoadPositions(self, loads_num, fix_indices_list, style="random", input_posi_indices_list=[]):
"""
Randomly generate positions of the load.
Parameters:
----------
loads_num: Int.
Number of loads.
fix_indices_list: List of ints.
Indices of fixed nodes.
style (optional): String.
Indicate how to generate initial load positions.
"random" / "fix":
"random": Randomly generate load positions.
"fix": Use the user input of initial load position indices.
Default: "random".
input_posi_indices_list (optional): List of ints.
User input of initial load positions indices list.
Indexed from 1.
Default: [].
Returns:
----------
loads_posi_indices_list: List of ints.
Picked indices for load application positions.
"""
if style == "random":
loads_posi_indices_list = []
for i in range(loads_num):
while(True):
load_posi_index_temp = random.choice(self._outer_surface_nodes_list) # Randomly chosen an outer surface node to apply load F(x, y, z). Indexed from 1.
if load_posi_index_temp not in fix_indices_list: break # The randomly generated index cannot be one of the fixed nodes.
loads_posi_indices_list.append(load_posi_index_temp)
return loads_posi_indices_list
elif style == "fix": return input_posi_indices_list
else: return self._generateLoadPositions(loads_num, fix_indices_list)
def _generateLoadValues(self, output_dimension, load_scale, sampling_style="uniform"):
"""
Randomly generate force values for load component definition.
Using function: numpy.random.rand().
Parameters:
----------
output_dimension: Tuple of ints.
The shape of output random array.
Size: 2*1. (dim1, dim2).
load_scale: Tuple of floats.
Size: 2*1. (min_laod, max_laod) / (mean, deviation).
sampling_style (optional): String.
Indicating the type of sampling.
"uniform": uniform distribution.
"gaussian": Gaussian distribution.
Default: "uniform".
Returns:
----------
load_result: Array of floats.
Size: output_dimension.
"""
if sampling_style == "uniform":
load_result = (np.random.rand(output_dimension[0], output_dimension[1]) * 2 - 1) * abs(load_scale[1] - load_scale[0])
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if load_value_temp < 0: load_result[index] -= self._load_scale[0]
else: load_result[index] += self._load_scale[0]
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
elif sampling_style == "gaussian":
mean, deviation = load_scale[0], load_scale[1]
load_result = np.random.normal(mean, deviation, size=output_dimension)
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if np.random.rand() <= 0.5: load_result[index] *= -1
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
else: load_result = self._generateLoadValues(output_dimension, load_scale)
return load_result
def generateAssembly(self):
"""
Generate assembly definition.
Returns:
----------
The list collection of all sub-definition lists, including:
assenbly_initial: Header of the assembly definition.
instance: The instance definition.
nset_boundary: The definition of BC related node set.
asssenbly_end: The endline of assembly definition.
"""
# Generate "self.loads_num" nsets, each of which has 1 node.
if self._isCoupleOn:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
ref_name_temp = "rf-{}".format(i+1)
ref_nset_name_temp = "rf-{}-nset".format(i+1)
self._rf_name_list.append(ref_name_temp)
self._rf_nset_name_list.append(ref_nset_name_temp)
# Generate assembly node definitions for reference points.
ref_node_list_temp = ["*Node"]
ref_pt_coord_list_temp = [float(item) for item in self._node[load_posi_index_temp].split(',')[1:]]
self.generateNodes(np.array(ref_pt_coord_list_temp).astype(float).reshape(1,-1), ref_node_list_temp,
specified_indices_list=[i+1])
self._ref_nodes_list += copy.deepcopy(ref_node_list_temp)
rf_nset_list_temp = self._findCouplingNodes(load_posi_index_temp, self._coupling_neighbor_layers)
# Generate reference point node sets.
self._load_nsets += self.generateNset([i+1], ref_name_temp)
# Generate coupling constraint node sets.
self._rf_nsets += self.generateNset(rf_nset_list_temp, ref_nset_name_temp,
self._instance_name)
self.generateCoupling()
else:
if self._isLaplacianSmoothingOn:
force_vector_temp = np.zeros(shape=(3*self._surface_nodes_num, 1))
self._laplacian_initial_loads_posi = copy.deepcopy(self._loads_posi_indices_list)
if self._initial_force_component_vector == []:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = self._generateLoadValues((3,1), self._load_params_tuple,
sampling_style=self._load_sampling_style)
else:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = np.array(self._initial_force_component_vector).astype(float).reshape(3,1)
laplacian_matrix, mass_matrix = self.data_mat[self._laplacian_variable_name], self.data_mat[self._massMatrix_variable_name]
force_vector_new = self._laplacianSmoothing(force_vector_temp, laplacian_matrix, mass_matrix, iter_num=self._laplacian_iter_num,
smoothing_rate=self._smoothing_rate, laplacian_force_field=self._user_prescribed_force_field) # Size: (nSurfI x 3)*1. Fix force value: initial_BC_state="fix" (not recommended).
self._laplacian_force_field = force_vector_new.reshape(-1,3)
self._loads_posi_indices_list = copy.deepcopy([(list(force_vector_new).index(item)//3)+1 for item in list(force_vector_new) if item != 0]) # Indexed from 1.
self._loads_posi_indices_list = list(set(self._loads_posi_indices_list))
self._loads_posi_indices_list.sort()
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
self._load_nsets += self.generateNset(self._laplacian_initial_loads_posi, "Orig_loads_posi", self._instance_name)
self._load = self.generateLoadSetting(force_list=list(force_vector_new.reshape(-1,1)))
else:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
# Concatenate assembly subparts.
self._nset_boundary = self._nset_boundary + self._load_nsets + self._rf_nsets + self._fix_nset + self._surface_list + self._coupling_list
return (self._assembly_initial + self._instance + self._ref_nodes_list + self._nset_boundary + self._assembly_end)
def generateCoupling(self):
"""
Generate coupling constriants for concentrated forces application.
"""
for index, rf_name in enumerate(self._rf_nset_name_list):
self._surface_list += ["*Surface, type=NODE, name={}_CNS_, internal".format(rf_name),
"{}, 1.".format(rf_name)]
self._coupling_list += ["*Coupling, constraint name={}, ref node={}, surface={}_CNS_".format(self._rf_name_list[index],
self._rf_name_list[index],
rf_name),
"*{}".format(self._coupling_type)]
def _findCouplingNodes(self, rf_node_ind, neighbor_layers):
"""
Find the immediate neighbors of each specified node index.
Parameters:
----------
rf_node_ind: Int.
The index of target node.
Returns:
----------
rf_nset_list: List of ints (duplicated items removed).
"rf_node_ind"'s corresponding immediate neighbor nodes set.
"""
rf_nset_list, new_nodes_list, searched_nodes_list = [rf_node_ind], [rf_node_ind], []
for j in range(neighbor_layers):
for ind_temp in new_nodes_list:
for i in range(len(self._triangle_nodes_list)):
if ind_temp in self._triangle_nodes_list[i]:
rf_nset_list += copy.deepcopy(self._triangle_nodes_list[i])
else: continue
searched_nodes_list += copy.deepcopy(new_nodes_list)
rf_nset_list = list(set(copy.deepcopy(rf_nset_list)))
new_nodes_list = [ind for ind in rf_nset_list if ind not in searched_nodes_list]
# Avoid assigning same nodes to different coupled node sets.
for ind in rf_nset_list:
if ind in self._coupled_list: rf_nset_list.remove(ind)
else: self._coupled_list.append(ind)
return rf_nset_list
def generateBoundaryCondition_fixAll(self):
"""
Generate fix boundary condition.
Returns:
----------
The list collection of all sub-definition lists, including:
boundary_initial: Header of boundary condition definition.
BC_list_temp: The detailed BC definition of boundary conditions.
"""
BC_list_temp = []
for i in range(6): # 6: 6 DOFs (disp. + rot.); 3: 3 DOFs (disp.).
BC_list_temp.append("{}, {}, {}".format(self._fix_nset_name, i+1, i+1))
return (self._boundary_initial + BC_list_temp)
def generateLoadSetting(self, force_list=[]):
"""
Generate load information.
Returns:
----------
load_list: List of strings.
Definition of concentrated forces.
force_list (optional): List of forces (floats).
Size: loads_num * 3.
Default: [].
"""
load_list = []
if force_list == []:
force_list = list(self._generateLoadValues((self.loads_num*3, 1), self._load_params_tuple, sampling_style=self._load_sampling_style))
force_list = np.array(force_list).astype(float).reshape(-1,3) # 2D Array of floats. Size: self._loads_num * 3.
if self._isCoupleOn:
for j, rf_name in enumerate(self._rf_name_list): # Length: self._loads_num
load_temp = ["*Cload, op=NEW"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(rf_name, i+1, force_list[j,i]))
load_list += copy.deepcopy(load_temp)
else:
for j, load_name in enumerate(self._loads_nset_name_list): # Length: length of self._loads_nset_name_list.
load_temp = ["*Cload"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(load_name, i+1, force_list[self._loads_posi_indices_list[j]-1,i]))
load_list += copy.deepcopy(load_temp)
return load_list
def _laplacianMatrixShrink(self, laplacian_matrix, surface_nodes_list, faces_def_matrix, outer_surface_regionNum):
"""
Assign zeros to the DOFs without force value applied.
Parameters:
----------
laplacian_matrix: 2D Array of floats.
The surface's Laplacian for force smoothing.
Size: nSurfI*3 x nSurfI*3.
surface_nodes_list: List of ints.
All indices of nodes on all surfaces.
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
laplacian_matrix: 2D Array of floats.
Laplacian with zeros assigned to the nodes not on the outer surfaces.
Size: nSurfI*3 x nSurfI*3.
"""
surface_nodes_list = [ind for ind in surface_nodes_list]
outer_surface_nodes_list = self._extractOuterSurfaceNodes(faces_def_matrix, outer_surface_regionNum)
other_surface_nodes_list = [ind for ind in surface_nodes_list if ind not in outer_surface_nodes_list]
other_surface_nodes_list.sort()
for ind in other_surface_nodes_list:
laplacian_matrix[surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3,:] = 0.0
laplacian_matrix[:,surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3] = 0.0
return laplacian_matrix
def _laplacianSmoothing(self, force_vector, laplacian_matrix, mass_matrix, iter_num=3, smoothing_rate=1e-4, initial_BC_state="", laplacian_force_field=[]):
"""
Implement laplacian smoothing based on pre-calculated Laplacian matrix.
Formulation: Forward Euler.
F_(n+1) = (I + lambda*massMatrix*Laplacian) * F_n
Parameters:
----------
force_vector: 1D Array of floats.
With concentrated force values applied at the specidied nodes.
Size: (self._surface_nodes_num x 3) * 1.
laplacian_matrix: 2D Array of floats.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
mass_matrix: 2D Array of floats.
Diagonal matrix.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
iter_num (optional): Int.
The number of smoothing iterations.
Default: 3.
smoothing_rate (optional): float.
The coefficient that control the step size of smoothing.
Default: 1e-4.
initial_BC_state (optional): String.
Indicating whether to "fix" or "decay" the original concentrated force value.
Default: "". Indicating smoothing including the original forces.
laplacian_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: self._surface_nodes_num x 3.
Default: [].
Returns:
----------
force_vector_new: 1D Array of floats.
The laplacian-smoothed force vector.
Size: (self._surface_nodes_num x 3) * 1.
"""
if laplacian_force_field == []:
force_vector_new = copy.deepcopy(force_vector)
for i in range(iter_num):
force_vector_new += smoothing_rate * (laplacian_matrix @ force_vector_new) # Without mass matrix.
# force_vector_new += smoothing_rate * (mass_matrix @ laplacian_matrix @ force_vector_new) # With mass matrix (NOT recommended).
if initial_BC_state == "fix":
for j, value in enumerate(force_vector):
if value != 0:
force_vector_new[j] = value
else: force_vector_new = np.array(laplacian_force_field).astype(float).reshape(len(laplacian_force_field),1)
return force_vector_new
def _computeMidPoint(self, ind_1, ind_2):
"""
Compute the mid-point of the edge.
Parameters:
----------
ind_1: Int.
The first index of the node pair. Indexed from 1.
ind_2: Int.
The second index of the node pair. Indexed from 1.
Returns:
----------
ind_mid: Int.
The index of the self._node. Index from 1.
"""
key_string_temp_1, key_string_temp_2 = "{}_{}".format(ind_1, ind_2), "{}_{}".format(ind_2, ind_1)
if key_string_temp_1 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_1]
elif key_string_temp_2 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_2]
else:
coord_temp_1 = np.array(self._node[ind_1].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_2 = np.array(self._node[ind_2].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_mid = (coord_temp_1 + coord_temp_2) / 2.0
coord_mid_list = [str(item) for item in list(coord_temp_mid[0])]
self._node_num = len(self._node)
new_node_def_list_temp = copy.deepcopy([str(self._node_num)])
new_node_def_list_temp += copy.deepcopy(coord_mid_list)
self._node.append(', '.join(new_node_def_list_temp))
self._new_node_list.append(', '.join(new_node_def_list_temp))
self._new_node_dict[key_string_temp_1] = self._node_num
self._new_node_dict[key_string_temp_2] = self._node_num
return self._node_num
def insertNode(self):
"""
Insert one node (at the mid-point) of each edge.
Create C3D10 element structure.
"""
for index, elem_def_string in enumerate(self._elem[1:]):
elem_node_list_temp = [int(ind) for ind in elem_def_string.split(',')[1:]]
# Obtain the mid-point index in order. Assume tetrahedral element (C3D4).
mid_pt_ind_5 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[1])
mid_pt_ind_6 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[2])
mid_pt_ind_7 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[2])
mid_pt_ind_8 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[3])
mid_pt_ind_9 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[3])
mid_pt_ind_10 = self._computeMidPoint(elem_node_list_temp[2], elem_node_list_temp[3])
elem_new_def_list_temp = [str(mid_pt_ind_5),
str(mid_pt_ind_6),
str(mid_pt_ind_7),
str(mid_pt_ind_8),
str(mid_pt_ind_9),
str(mid_pt_ind_10)]
# Redefine the new C3D10 element in order.
elem_def_list_temp = copy.deepcopy(elem_def_string.split(',')) + copy.deepcopy(elem_new_def_list_temp)
elem_def_string_temp = ', '.join(elem_def_list_temp)
self._elem[index+1] = copy.deepcopy(elem_def_string_temp)
def _triangleNodesCollection(self):
"""
Collect all the nodes on each triangle (surface).
Need to be implemented after "self.insertNode()".
"""
for i in range(self._surface_mat.shape[0]):
tri_temp = self._surface_mat[i,:]
# Assuming all triangles on the surface of geometry.
middle_pts_list_temp = [self._computeMidPoint(tri_temp[0], tri_temp[1]),
self._computeMidPoint(tri_temp[0], tri_temp[2]),
self._computeMidPoint(tri_temp[1], tri_temp[2])]
triangle_nodes_list_temp = list(copy.deepcopy(tri_temp)) + copy.deepcopy(middle_pts_list_temp)
self._triangle_nodes_list.append(copy.deepcopy(triangle_nodes_list_temp)) # List of lists of ints.
def nonlinearization(self):
"""
Nonlinearize the linear tetrahedral (CST) element to quadratic tetrahedral element.
"""
self._elem_num = len(self._elem) - 1
self._orig_node_num = len(self._node) - 1
self.insertNode()
self._triangleNodesCollection()
self._node_num = len(self._node) - 1
def saveLog(file_name_list, elapsed_time_list, write_status, data_file_name,
sample_num, fix_indices_list, loads_num, load_sampling_type, load_param_tuple,
material_type, modulus, poisson_ratio, isCoupleOn, isLaplacianSmoothingOn,
coupling_type="", coupling_neighbor_layer_num=1,
laplacian_iter_num=5, laplacian_smoothing_rate=1e-4, write_path="nonlinear_case_generation.log"):
"""
Save the nonlinear cases generation results into .log file.
Parameters:
----------
file_name_list: List of strings.
Names of generated files.
elapsed_time_list: List of floats.
Elapsed time of generation for each input file.
In exact order.
write_status: String.
Indicating the type of input file generation.
"Normal" / "Fast":
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
data_file_name: String.
The name of modeling data file.
Format: .mat
sample_num: Int.
Number of generated input files.
fix_indices_list: List of ints.
Indices of fixed points.
Indexed from 1.
loads_num: Int.
The number of concentrated forces.
load_sampling_type: String.
The distribution type for force sampling.
"uniform" / "gaussian":
"uniform": uniform distribution with specified (min, max) range.
"gaussian": gaussian distribution with specified (mean, dev) parameters.
load_param_tuple: tuple of floats.
Parameters of load sampling.
load_sampling_type specific.
material_type: String.
The type of material.
"linear" / "neo_hookean_solid" / "neo_hookean_fitting":
"linear": linear elastic material.
"neo_hookean_solid": neo-Hookean solid following the stain energy formulation.
"neo_hookean_fitting": neo-Hookean solid following the strass-strain curved fitted from user-input strss-strain data.
modulus: Float.
Elastic modulus of the material.
poisson_ratio: Float.
Poisson's ratio of the material.
isCoupleOn: Boolean indicator.
True: using coupling constraint for local force distribution.
False: not using coupling constraint.
isLaplacianSmoothingOn: Boolean indicator.
True: using Laplacian-Beltrami operator matrix to smooth the force distribution.
False: not using Laplacian smoothing.
coupling_type (optional): String.
The type of coupling constraint.
Default: "".
coupling_neighbor_layer_num (optional): Int.
The number of neighbor layers to which the local force distributing goes.
Default: 1.
laplacian_iter_num (optional): Int.
The number of iteration for laplacian smoothing.
Default: 5.
laplacian_smoothing_rate (optional): Float.
The rate of Laplacian smoothing.
Default: 1e-4.
write_path (optional): String.
The path of to-be-written file.
Default: "nonlinear_case_generation.log".
"""
if isCoupleOn: isCoupleOn_status = "On"
else: isCoupleOn_status = "Off"
if isLaplacianSmoothingOn: isLaplacianSmoothingOn_status = "On"
else: isLaplacianSmoothingOn_status = "Off"
content = ["Data_file_name: {}".format(data_file_name),
"Sample_num = {}".format(sample_num),
"Fixed_indices_list (indexed from 1): {}".format(fix_indices_list),
"Material type: {}".format(material_type),
"Elastic modulus = {} Pa".format(modulus),
"Poisson's ratio = {}".format(poisson_ratio),
"Loads_num = {}".format(loads_num)]
if load_sampling_type == "uniform":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
elif load_sampling_type == "gaussian":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling parameters (mean, dev): {} N".format(load_param_tuple)]
else:
load_sampling_type = "uniform"
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
content += ["Coupling constraint status: {}".format(isCoupleOn_status),
"Laplacian smoothing status: {}".format(isLaplacianSmoothingOn_status)]
if isCoupleOn:
content += ["Coupling type: {}".format(coupling_type),
"Coupling neighbor layer numbers: {}".format(coupling_neighbor_layer_num)]
if isLaplacianSmoothingOn:
content += ["Laplacian smoothing iteration numbers = {}".format(laplacian_iter_num),
"Laplacian smoothing rate = {}".format(laplacian_smoothing_rate)]
content += ["----------------------------------------------------------",
"Input file\t\tExport status\tGeneration status\tElapsed time/s"]
elapsed_time_total = 0
for i, file_name in enumerate(file_name_list):
data_string_temp = "{}\t\t{}\t\tCompleted\t".format(file_name, write_status) + "\t%.8f" % (elapsed_time_list[i])
content.append(data_string_temp)
elapsed_time_total += elapsed_time_list[i]
content += ["----------------------------------------------------------",
"Total elapsed time: {} s".format(elapsed_time_total)]
content = '\n'.join(content)
with open(write_path, 'w') as f: f.write(content)
def main():
abaqus_default_directory = "C:/temp" # Default working directory of Abaqus.
inp_folder = "inp_files"
sample_nums = 2000 # Default: 2000.
data_file_path = "data_kidney.mat"
node_variable_name, elem_variable_name = "NodeI", "EleI"
results_folder_path_stress, results_folder_path_coor = "stress", "coor"
material_type = "neo_hookean_solid" # "linear" / "neo_hookean_fitting" / "neo_hookean_solid".
fix_indices_list = [2, 453, 745] # Specify the node to fix. At least 3. Indexed from 1.
write_status = "Normal" # String. "Normal" / "Fast". "Normal": generate all definitions; "Fast": generate nodes and elements definition only.
# ================================== Force interpolation related variables ================================== #
force_field_mat_name = "force_field_data.mat"
force_interpolation_folder = "inp_interpolation"
isPrescribedForceOn = False # Boolean indicator. True: use prescribed force field; False: no specified force field. Default: False.
force_type = "random" # String. The type of prescribed force field. "interpolated": interpolated force fields; "random": weighted-summed force fields.
eigen_num_force, force_scalar = 100, 2.0 # Float. The scalar of force fields controlling the force magnitude -> deformation magnitude of the tumor in nonlinear solver. Unit: N.
# =========================================================================================================== #
if isPrescribedForceOn:
"""
The pipeline of generating interpolated force fields:
1. Run "nonlinearCasesCreation.py" with 'isPrescribedForceOn = False' firstly.
2. Run "forceInterpolation.py" in the same directory.
3. Set 'isPrescribedForceOn = True', set 'force_type = "interpolated", then run "nonlinearCasesCreation.py" again.
Get input files with "*_interpolated.inp" in the folder 'force_interpolation_folder'.
4. Set 'isPrescribedForceOn = True', set 'force_type = "random", then run "nonlinearCasesCreation.py" again.
Get input files with "*_random.inp" in the folder 'force_interpolation_folder'.
"""
force_fields = (scipy.io.loadmat(force_field_mat_name)["force_field_interpolated"] if force_type == "interpolated" else
scipy.io.loadmat(force_field_mat_name)["force_field_random"]) # Size: nSurfI*3 x sampleNum. Concatenated as xyzxyz...
sample_nums = force_fields.shape[1]
# Generate input file for Abaqus.
file_name_list, elapsed_time_list, force_field_matrix = [], [], None
for i in range(sample_nums):
start_time = time.time()
if isPrescribedForceOn:
if not os.path.isdir(force_interpolation_folder): os.mkdir(force_interpolation_folder)
file_name_temp = ("{}_interpolated.inp".format(str(i+20001)) if force_type == "interpolated" else
"{}_random.inp".format(str(i+20001)))
write_path = os.path.join(force_interpolation_folder, file_name_temp)
force_field_prescribed_list = list(force_fields[:,i])
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name,
user_prescribed_force_field=force_field_prescribed_list)
else:
if not os.path.isdir(inp_folder): os.mkdir(inp_folder)
file_name_temp = "{}.inp".format(str(i+20001))
write_path = os.path.join(inp_folder, file_name_temp)
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name)
inputFile_temp.writeFile(write_status)
end_time = time.time()
elapsed_time = end_time - start_time
file_name_list.append(file_name_temp)
elapsed_time_list.append(elapsed_time)
if i == 0: force_field_matrix = inputFile_temp._laplacian_force_field.reshape(-1,1)
else: force_field_matrix = np.hstack((force_field_matrix, inputFile_temp._laplacian_force_field.reshape(-1,1)))
# ============================ For force visualization only (sample_nums = 1) ============================ #
# print(inputFile_temp._laplacian_initial_loads_posi)
# force_field = {"force_field": inputFile_temp._laplacian_force_field}
# scipy.io.savemat("force_field.mat", force_field)
# ======================================================================================================== #
print("Input_file: ", file_name_temp, "| Status:", write_status, "| Generation: Completed | Time: %.4f s" % (elapsed_time))
saveLog(file_name_list, elapsed_time_list, write_status, data_file_path, sample_nums,
fix_indices_list, inputFile_temp.loads_num, inputFile_temp._load_sampling_style, inputFile_temp._load_params_tuple,
material_type, inputFile_temp._modulus, inputFile_temp._poisson_ratio,
inputFile_temp._isCoupleOn, inputFile_temp._isLaplacianSmoothingOn,
coupling_type=inputFile_temp._coupling_type, coupling_neighbor_layer_num=inputFile_temp._coupling_neighbor_layers,
laplacian_iter_num=inputFile_temp._laplacian_iter_num, laplacian_smoothing_rate=inputFile_temp._smoothing_rate,
write_path="nonlinear_case_generation.log")
if not isPrescribedForceOn: weight_matrix = (2.0 * np.random.rand(eigen_num_force, 3*sample_nums) - 1.0) # Distinct random weights corresponding to each smoothed concentrated force field.
else: weight_matrix = scipy.io.loadmat(force_field_mat_name)["weight_matrix"] # Distinct random force field for each smoothed concentrated force field.
mdict = {"fix_indices_list": fix_indices_list,
"orig_data_file_name": data_file_path,
"orig_config_var_name": node_variable_name,
"inp_folder": inp_folder if not isPrescribedForceOn else force_interpolation_folder, # The folder containing input files.
"current_directory": os.getcwd(),
"results_folder_path_stress": results_folder_path_stress,
"results_folder_path_coor": results_folder_path_coor,
"original_node_number": inputFile_temp._orig_node_num,
"couple_region_num": inputFile_temp._couple_region_num,
"force_field_matrix": force_field_matrix, # The force field matrix of all generated samples. Size: nSurfI*3 x sampleNum_total.
"weight_matrix": weight_matrix, "force_scalar_coeff": force_scalar, # The randomly generated matrix for force fields' reconstruction. Size: eigen_num x (3*sample_num).
"eigen_number_force": eigen_num_force, # Int. The eigenmode number of force field reconstruction. (Used only in force field interpolation)
"alpha_indexing_vector": np.zeros(shape=(sample_nums, 1)) if not isPrescribedForceOn else scipy.io.loadmat(force_field_mat_name)["alpha_indexing_vector"]
}
scipy.io.savemat("training_parameters_transfer.mat", mdict)
# np.save(os.path.join(abaqus_default_directory, "training_parameters_transfer.npy"), mdict, fix_imports=True)
# np.savez(os.path.join(abaqus_default_directory, "training_parameters_transfer.npz"),
# fix_indices_list=fix_indices_list,
# orig_data_file_name=data_file_path,
# orig_config_var_name=node_variable_name,
# inp_folder=inp_folder,
# current_directory=os.getcwd(),
# results_folder_path_stress=results_folder_path_stress,
# results_folder_path_coor=results_folder_path_coor)
if __name__ == "__main__":
main()
|
the-stack_0_5204 | # Copyright (c) WiPhy Development Team
# This library is released under the MIT License, see LICENSE.txt
import os
import unittest
import numpy as np
import numpy as np
import wiphy.util.general as me
import wiphy.code.modulator as mod
import wiphy.code.im as im
import wiphy.code.duc as duc
class Test(unittest.TestCase):
def test_getGrayIndixes(self):
self.assertEqual(me.getGrayIndixes(2), [0, 1, 3, 2])
self.assertEqual(me.getGrayIndixes(4), [0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8])
def test_frodiff(self):
fro = me.frodiff(np.array([1, 1j, 0, 0]), np.array([1, 0, 1j, 0]))
self.assertAlmostEqual(fro, 2.0, msg="The Frobenius norm calculation is wrong")
fro = me.frodiff(me.randn_c(int(1e6)), me.randn_c(int(1e6)))
self.assertAlmostEqual(fro / 2e6, 1.0, places=2)
def test_matmulb(self):
H = np.array([[1, 1.j], [-1.j, -1]])
codes = im.generateIMCodes("opt", 2, 1, 2, "PSK", 1, 1)
ret = me.matmulb(H, codes)
np.testing.assert_almost_equal(ret, np.matmul(H, codes))
def test_getEuclideanDistances(self):
codes = mod.generatePSKSymbols(4).reshape(4, 1, 1)
ret = me.asnumpy(me.getEuclideanDistances(np.array(codes)))
np.testing.assert_almost_equal(ret, [2., 2., 4., 4., 2., 2.])
#
codes = im.generateIMCodes("opt", 2, 1, 2, "PSK", 1, 1)
ret = me.asnumpy(me.getEuclideanDistances(np.array(codes)))
np.testing.assert_almost_equal(ret, [2.])
#
codes = im.generateIMCodes("opt", 4, 2, 4, "PSK", 1, 1)
ret = me.asnumpy(me.getEuclideanDistances(np.array(codes)))
np.testing.assert_almost_equal(ret, [1., 1., 2., 2., 1., 1.])
#
codes = duc.generateDUCCodes(2, 2)
ret = me.asnumpy(me.getEuclideanDistances(np.array(codes)))
np.testing.assert_almost_equal(ret, [16.])
def test_getMinimumEuclideanDistance(self):
codes = mod.generatePSKSymbols(4).reshape(4, 1, 1)
med = me.getMinimumEuclideanDistance(np.array(codes))
self.assertAlmostEqual(med, 2.0)
codes = mod.generateStarQAMSymbols(16).reshape(16, 1, 1)
med = me.getMinimumEuclideanDistance(np.array(codes))
self.assertAlmostEqual(med, 0.2343145750507619)
codes = im.generateIMCodes("opt", 4, 2, 4, "PSK", 4, 1)
med = me.getMinimumEuclideanDistance(np.array(codes))
self.assertAlmostEqual(med, 1.0)
codes = im.generateIMCodes("opt", 8, 4, 64, "PSK", 2, 1)
med = me.getMinimumEuclideanDistance(np.array(codes))
self.assertAlmostEqual(med, 0.5)
def test_getDFTMatrix(self):
W = me.getDFTMatrix(4)
np.testing.assert_almost_equal(W.dot(W.conj().T), np.eye(4, dtype=np.complex), decimal=3)
W = me.getDFTMatrix(8)
np.testing.assert_almost_equal(W.dot(W.conj().T), np.eye(8, dtype=np.complex), decimal=3)
W = me.getDFTMatrix(16)
np.testing.assert_almost_equal(W.dot(W.conj().T), np.eye(16, dtype=np.complex), decimal=3)
def test_inv_dB(self):
self.assertAlmostEqual(me.inv_dB(0.0), 1.0, msg="The implementation of inv_dB may be wrong.")
def test_randn(self):
ret = me.randn(int(1e6))
meanPower = np.mean(np.power(np.abs(ret), 2))
self.assertAlmostEqual(meanPower, 1.0, places=2, msg="The mean power of randn differs from 1.0")
def test_randn_c(self):
ret = me.randn_c(int(1e6))
meanPower = np.mean(np.power(np.abs(ret), 2))
self.assertAlmostEqual(meanPower, 1.0, places=2, msg="The mean power of randn_c differs from 1.0")
def test_countErrorBits(self):
self.assertEqual(me.countErrorBits(1, 2), 2)
self.assertEqual(me.countErrorBits(1, 5), 1)
def test_getXORtoErrorBitsArray(self):
a = me.getXORtoErrorBitsArray(4)
np.testing.assert_almost_equal(a, np.array([0, 1, 1, 2, 1]))
a = me.getXORtoErrorBitsArray(16)
np.testing.assert_almost_equal(a, np.array([0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1]))
def test_getErrorBitsTable(self):
t = me.getErrorBitsTable(4)
np.testing.assert_almost_equal(t, np.array([[0, 1, 1, 2], [1, 0, 2, 1], [1, 2, 0, 1], [2, 1, 1, 0]]))
t = me.getErrorBitsTable(8)
np.testing.assert_almost_equal(t, np.array(
[[0, 1, 1, 2, 1, 2, 2, 3], [1, 0, 2, 1, 2, 1, 3, 2], [1, 2, 0, 1, 2, 3, 1, 2], [2, 1, 1, 0, 3, 2, 2, 1],
[1, 2, 2, 3, 0, 1, 1, 2], [2, 1, 3, 2, 1, 0, 2, 1], [2, 3, 1, 2, 1, 2, 0, 1], [3, 2, 2, 1, 2, 1, 1, 0]]))
def test_getRandomHermitianMatrix(self):
np.set_printoptions(linewidth=np.inf)
H = me.getRandomHermitianMatrix(4)
np.testing.assert_almost_equal(H, H.conj().T)
H = me.getRandomHermitianMatrix(8)
np.testing.assert_almost_equal(H, H.conj().T)
H = me.getRandomHermitianMatrix(16)
np.testing.assert_almost_equal(H, H.conj().T)
def test_convertIntToBinArray(self):
np.testing.assert_almost_equal(me.convertIntToBinArray(0, 1), [0])
np.testing.assert_almost_equal(me.convertIntToBinArray(2, 2), [1, 0])
np.testing.assert_almost_equal(me.convertIntToBinArray(3, 2), [1, 1])
np.testing.assert_almost_equal(me.convertIntToBinArray(4, 3), [1, 0, 0])
def test_CayleyTransform(self):
U = me.CayleyTransform(me.asnumpy(me.getRandomHermitianMatrix(4)))
np.testing.assert_almost_equal(me.asnumpy(U.dot(U.conj().T)), np.eye(4, dtype=np.complex))
U = me.CayleyTransform(me.asnumpy(me.getRandomHermitianMatrix(8)))
np.testing.assert_almost_equal(me.asnumpy(U.dot(U.conj().T)), np.eye(8, dtype=np.complex))
U = me.CayleyTransform(me.asnumpy(me.getRandomHermitianMatrix(16)))
np.testing.assert_almost_equal(me.asnumpy(U.dot(U.conj().T)), np.eye(16, dtype=np.complex))
def test_kurtosis(self):
self.assertAlmostEqual(me.kurtosis(me.randn(10000000)), 0.0, places=2)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5205 | import numpy as np
import matplotlib.pyplot as plt
from modelling.utilities import ProgressBar
class Solver:
def __init__(self, function, initial=np.array([0,0])):
self._function = function
self._initial = initial
self._solution = np.array([])
self._solutions = np.zeros((len(function),1))
self._time = np.array([])
self._label = "none"
self._fig, self._ax = plt.subplots()
self._functions = function
def __euler(self, start, end, h):
x0 = self._initial[0]
result_a = np.arange(x0, end, h)
time_a = np.arange(x0, end, h)
result_a[0] = self._initial[1]
for (i, previous_result) in enumerate(result_a[:-1]):
result_a[i+1] = previous_result + h*self._function(time_a[i], previous_result)
result_b = np.arange(x0, start-h, -h)
time_b = np.arange(x0, start-h, -h)
result_b[0] = self._initial[1]
for (i, previous_result) in enumerate(result_b[:-1]):
result_b[i+1] = previous_result + -h*self._function(time_b[i], previous_result)
result = np.concatenate((result_b[::-1], result_a[1:]))
time = np.concatenate((time_b[::-1], time_a[1:]))
self._solution = result
self._time = time
self._label = "Euler"
return np.stack([time, result])
def __heun(self, start, end, h):
x0 = self._initial[0]
result_a = np.arange(x0, end, h)
time_a = np.arange(x0, end, h)
result_a[0] = self._initial[1]
for (i, previous_result) in enumerate(result_a[:-1]):
y_i1 = previous_result + h*self._function(time_a[i], previous_result)
result_a[i+1] = previous_result + h/2*(self._function(time_a[i], previous_result) + self._function(time_a[i+1], y_i1))
result_b = np.arange(x0, start-h, -h)
time_b = np.arange(x0, start-h, -h)
result_b[0] = self._initial[1]
for (i, previous_result) in enumerate(result_b[:-1]):
y_i1 = previous_result - h*self._function(time_b[i], previous_result)
result_b[i+1] = previous_result - h/2*(self._function(time_b[i], previous_result) + self._function(time_b[i+1], y_i1))
result = np.concatenate((result_b[::-1], result_a[1:]))
time = np.concatenate((time_b[::-1], time_a[1:]))
self._solution = result
self._time = time
self._label = "Heun"
return np.stack([time, result])
def __rk4(self, start, end, h):
result = np.arange(start, end, h)
time = np.arange(start, end, h)
result[0] = self._initial
for (i,_) in enumerate(time[1:],1):
k1 = h*self._function(time[i-1], result[i-1])
k2 = h*self._function(time[i-1] + h/2, result[i-1] + k1/2)
k3 = h*self._function(time[i-1] + h/2, result[i-1] + k2/2)
k4 = h*self._function(time[i-1] + h, result[i-1] + k3)
result[i] = result[i-1] + 1/6*(k1+2*k2+2*k3+k4)
self._solution = result
self._time = time
self._label = "Runge-Kutta 4"
return np.stack([time, result], axis=1)
def __rk_4_vec(self, start, end, h):
result = np.zeros([len(self._functions), int((end-start)/h)])
time = np.arange(start, end, h)
result[:,0] = self._initial[1:].T
k_values = np.zeros((len(self._functions), 4))
for (i,t) in enumerate(time[:-1],0):
for (k, func) in enumerate(self._functions):
k_values[k,0] = h*func(t, *result[:,i])
for (k, func) in enumerate(self._functions):
# print(result[:,i] + k_values[k,0]/2, k_values[:,0])
k_values[k,1] = h*func(t + h/2, *(result[:,i] + k_values[:,0]/2))
for (k, func) in enumerate(self._functions):
k_values[k,2] = h*func(t + h/2, *(result[:,i] + k_values[:,1]/2))
for (k, func) in enumerate(self._functions):
k_values[k,3] = h*func(t + h, *(result[:,i] + k_values[:,2]))
result[:,i+1] = result[:,i] + 1/6*(k_values[:,0]+2*k_values[:,1]+2*k_values[:,2]+k_values[:,3])
self._solutions = result
self._time = time
self._label = "Runge-Kutta 4"
return np.vstack([time, result])
def __rk45_vec(self, start, end, error=0.0001, h=0.5, limit=100):
progress = ProgressBar()
t = self._initial[0]
result = np.zeros((len(self._functions),1))
result[:, 0] = self._initial[1:].T
# print(result)
time = np.array([t])
while(t < end):
h, y5 = self.__rk45_step_vec(limit, h, t, result[:,-1], error)
t += h
# print(result.shape, y5.shape)
result = np.concatenate([result, y5], axis=1)
# print(result)
time = np.append(time, [t])
# print(t)
progress.step(h/(end-start)*100)
progress.show()
h = -h
t = self._initial[0]
while(start < t):
h, y5 = self.__rk45_step_vec(limit, h, t, result[:,0], error)
t += h
result = np.append([y5], result)
time = np.append([t], time)
progress.step(-h/(end-start)*100)
progress.show()
self._solutions = result
self._time = time
self._label = "Runge-Kutta 45"
return np.vstack([time, result])
def __rk45(self, start, end, error=0.0001, h=0.5, limit=100):
t = self._initial[0]
result = np.array([self._initial[1]])
time = np.array([t])
while(t < end):
h, y5 = self.__rk45_step(limit, h, t, result[-1], error)
t += h
result = np.append(result, [y5])
time = np.append(time, [t])
h = -h
t = self._initial[0]
while(start < t):
h, y5 = self.__rk45_step(limit, h, t, result[0], error)
t += h
result = np.append([y5], result)
time = np.append([t], time)
self._solution = result
self._time = time
self._label = "Runge-Kutta 45"
return np.stack([time, result])
def __rk45_step_vec(self, limit, h, ti, yi, max_error, error_factor=2):
i = 0
k_values = np.zeros((len(self._functions), 6))
while (i < limit):
i += 1
for (k, func) in enumerate(self._functions):
k_values[k,0] = h*func(ti, *yi)
for (k, func) in enumerate(self._functions):
k_values[k,1] = h*func(ti + h/4, *(yi + k_values[:,0]))
for (k, func) in enumerate(self._functions):
k_values[k,2] = h*func(ti + h*3/8, *(yi + k_values[:,0]*3/32 + k_values[:,1]*9/32))
for (k, func) in enumerate(self._functions):
k_values[k,3] = h*func(ti + h*12/13, *(yi + k_values[:,0]*1932/2197 - k_values[:,1]*7200/2197 + k_values[:,2]*7296/2197))
for (k, func) in enumerate(self._functions):
k_values[k,4] = h*func(ti + h, *(yi + k_values[:,0]*439/216 - k_values[:,1]*8 + k_values[:,2]*3680/513 - k_values[:,3]*845/4104))
for (k, func) in enumerate(self._functions):
k_values[k,5] = h*func(ti + h/2, *(yi - k_values[:,0]*8/27) + k_values[:,1]*2 - k_values[:,2]*3544/2565 + k_values[:,3]*1859/4104 - k_values[:,4]*11/40)
y5 = yi + 16/135*k_values[:,0] + 6656/12825*k_values[:,2] + 28561/56430*k_values[:,3] - 9/50*k_values[:,4] + 2/55*k_values[:,5]
y4 = yi + 25/216*k_values[:,0] + 1408/2565*k_values[:,2] + 2197/4104*k_values[:,3] - 1/5*k_values[:,4]
error = np.amax(np.abs(y5-y4))
# print(error, max_error)
if (error > max_error):
h /= error_factor
elif (error < max_error/error_factor):
h *= error_factor
break
else:
break
return h, np.reshape(y5, (len(self._functions),1))
def __rk45_step(self, limit, h, ti, yi, error):
i = 0
while (i < limit):
i += 1
k1 = h*self._function(ti, yi)
k2 = h*self._function(ti+h/4, yi+k1/4)
k3 = h*self._function(ti+3*h/8, yi+3*k1/32+9*k2/32)
k4 = h*self._function(ti+12*h/13, yi+1932/2197*k1-7200/2197*k2+7296/2197*k3)
k5 = h*self._function(ti+h, yi+k1*439/216-k2*8+k3*3680/513-k4*845/4104)
k6 = h*self._function(ti+h/2, yi-8/27*k1+2*k2-3544/2565*k3+1859/4104*k4-11/40*k5)
y5 = yi + 16/135*k1+6656/12825*k3+28561/56430*k4-9/50*k5+2/55*k6
y4 = yi + 25/216*k1+1408/2565*k3+2197/4104*k4-1/5*k5
if (abs(y5-y4) > error):
h /= 2
else:
break
return h, y5
def run(self, method, start, end, h):
if (method == "euler"):
return self.__euler(start, end, h)
elif (method == "heun"):
return self.__heun(start, end, h)
elif (method == "rk4"):
return self.__rk_4_vec(start, end, h)
elif (method == "rk45"):
return self.__rk45_vec(start, end, h=h)
else:
return None
def plot(self, label=[]):
if not label:
label = self._label
for (i, sol) in enumerate(self._solutions):
self._ax.plot(self._time, sol, label=label[i])
def show(self):
self._ax.legend()
plt.title("Populations vs Time")
plt.legend()
plt.xlabel("Time")
plt.ylabel("Population")
plt.show() |
the-stack_0_5206 | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_empty_listarray():
a = ak.Array(
ak.layout.ListArray64(
ak.layout.Index64(np.array([], dtype=np.int64)),
ak.layout.Index64(np.array([], dtype=np.int64)),
ak.layout.NumpyArray(np.array([])),
)
)
assert ak.to_list(a * 3) == []
starts = ak.layout.Index64(np.array([], dtype=np.int64))
stops = ak.layout.Index64(np.array([3, 3, 5], dtype=np.int64))
content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5]))
array = ak.Array(ak.layout.ListArray64(starts, stops, content))
array + array
|
the-stack_0_5208 | """
Sponge Knowledge Base
Demo Plus
"""
from java.lang import System
from os import listdir
from os.path import isfile, join, isdir
class DrawAndUploadDoodle(Action):
def onConfigure(self):
self.withLabel("Draw and upload a doodle").withDescription("Shows a canvas to draw a doodle and uploads it to the server")
self.withArg(
BinaryType("image").withLabel("Doodle").withMimeType("image/png")
.withFeatures({"characteristic":"drawing", "width":300, "height":250, "background":"FFFFFF", "color":"000000", "strokeWidth":2})
)
self.withResult(StringType().withLabel("Status"))
self.withFeatures({"icon":"brush"})
def onCall(self, image):
if not sponge.getVariable("demo.readOnly", False):
filename = str(System.currentTimeMillis()) + ".png"
SpongeUtils.writeByteArrayToFile(image, sponge.getProperty("doodlesDir") + "/" + filename)
return "Uploaded as " + filename
else:
return "Uploading disabled in the read only mode"
class ListDoodles(Action):
def onConfigure(self):
self.withLabel("List doodles").withDescription("Returns a list of doodle filenames").withFeatures({"visible":False})
self.withNoArgs().withResult(ListType(StringType()).withLabel("Doodles"))
def onCall(self):
dir = sponge.getProperty("doodlesDir")
doodles = [f for f in listdir(dir) if isfile(join(dir, f)) and f.endswith(".png")] if isdir(dir) else []
return sorted(doodles, reverse=True)
class ViewDoodle(Action):
def onConfigure(self):
self.withLabel("View a doodle").withDescription("Views a doodle")
self.withArg(StringType("image").withLabel("Doodle name").withProvided(ProvidedMeta().withValue().withValueSet().withOverwrite()))
self.withResult(BinaryType().withAnnotated().withMimeType("image/png").withLabel("Doodle image"))
self.withFeature("icon", "drawing")
def onCall(self, name):
return AnnotatedValue(SpongeUtils.readFileToByteArray(sponge.getProperty("doodlesDir") + "/" + name)).withFeatures({"filename":"doodle_" + name})
def onProvideArgs(self, context):
if "image" in context.provide:
doodles = sponge.call("ListDoodles")
context.provided["image"] = ProvidedValue().withValue(doodles[0] if doodles else None).withValueSet(doodles)
def onStartup():
sponge.logger.info(str(sponge.call("ListDoodles")))
|
the-stack_0_5209 | """
Support for SSH access.
For more details about this platform, please refer to the documentation at
https://github.com/custom-components/switch.ssh
"""
import base64
import paramiko
import logging
import voluptuous as vol
from datetime import timedelta
import json
import asyncio
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_USERNAME, CONF_PASSWORD,
CONF_VALUE_TEMPLATE, CONF_PORT,
STATE_UNKNOWN, CONF_UNIT_OF_MEASUREMENT)
__version__ = '0.2.2'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'switch'
DEFAULT_NAME = 'SSH'
DEFAULT_SSH_PORT = 22
DEFAULT_INTERVAL = 30
CONF_KEY = 'key'
CONF_INTERVAL = 'interval'
CONF_COMMAND_ON = 'command_on'
CONF_COMMAND_OFF = 'command_off'
CONF_COMMAND_STATUS = 'command_status'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_KEY): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_SSH_PORT): cv.port,
vol.Required(CONF_COMMAND_ON): cv.string,
vol.Required(CONF_COMMAND_OFF): cv.string,
vol.Required(CONF_COMMAND_STATUS): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
dev = []
dev.append(SSHSwitch(hass, config))
async_add_devices(dev, True)
class SSHSwitch(Entity):
def __init__(self, hass, config):
"""Initialize the scanner."""
self._name = config.get(CONF_NAME)
self._host = config.get(CONF_HOST)
self._username = config.get(CONF_USERNAME)
self._password = config.get(CONF_PASSWORD)
self._key = config.get(CONF_KEY)
self._interval = config.get(CONF_INTERVAL)
self._port = config.get(CONF_PORT)
self._command_on = config.get(CONF_COMMAND_ON)
self._command_off = config.get(CONF_COMMAND_OFF)
self._command_status = config.get(CONF_COMMAND_STATUS)
self._value_template = config.get(CONF_VALUE_TEMPLATE)
self._ssh = None
self._state = None
self._connected = False
self._attributes = {}
if self._value_template is not None:
self._value_template.hass = hass
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return 'mdi:folder-key-network'
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_on(self):
"""Return true if switch is on."""
return self._state == "on"
@property
def state_attributes(self):
"""Return the device state attributes."""
return self._attributes
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, **kwargs):
from paramiko import ssh_exception
try:
if not self._connected:
self._connect()
"""Exit if still not connected by now."""
if not self._connected:
return None
stdin, stdout, stderr = self._ssh.exec_command(self._command_status)
value = ""
for line in stdout:
value = line.strip('\n')
if self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
value, STATE_UNKNOWN)
else:
self._state = value
_LOGGER.debug(self._state)
except Exception as err:
_LOGGER.error("Update error: %s", str(err))
self._disconnect()
async def async_turn_on(self, **kwargs):
"""Instruct the switch to turn on."""
self._state = "on"
self._execute(self._command_on)
async def async_turn_off(self, **kwargs):
"""Instruct the switch to turn off."""
self._state = "off"
self._execute(self._command_off)
def _execute(self, command):
"""Execute remote command."""
from paramiko import ssh_exception
cmd = command.strip('\n')
try:
if not self._connected:
self._connect()
"""Exit if still not connected by now."""
if not self._connected:
_LOGGER.error("Unable to establish connection.")
return None
"""
Option 1:
"""
stdin, stdout, stderr = self._ssh.exec_command(cmd)
"""
Option 2:
chan = self._ssh.invoke_shell()
stdin = chan.makefile('wb')
stdout = chan.makefile('r')
stdin.write(cmd + '\n')
chan.close()
"""
for line in stdout:
_LOGGER.debug("Raw Line Response: %s", str(line))
"""Ignore any response"""
return None
except Exception as err:
_LOGGER.error("Unexpected SSH error: %s", str(err))
self._disconnect()
def _connect(self):
"""Connect to the SSH server."""
from paramiko import RSAKey, SSHClient, ssh_exception
from base64 import b64decode
try:
key = paramiko.RSAKey(data=base64.b64decode(self._key))
client = paramiko.SSHClient()
client.get_host_keys().add(self._host, 'ssh-rsa', key)
client.connect(self._host, username=self._username, password=self._password)
self._ssh = client
self._connected = True
except ssh_exception.BadHostKeyException as err:
_LOGGER.error("Host Key Mismatch: %s", str(err))
self._disconnect()
except:
_LOGGER.error("Connection refused. SSH enabled?")
self._disconnect()
def _disconnect(self):
"""Disconnect the current SSH connection."""
try:
self._ssh.close()
except Exception:
pass
finally:
self._ssh = None
self._connected = False
|
the-stack_0_5210 | import datetime
from moto.organizations import utils
def test_make_random_org_id():
org_id = utils.make_random_org_id()
org_id.should.match(utils.ORG_ID_REGEX)
def test_make_random_root_id():
root_id = utils.make_random_root_id()
root_id.should.match(utils.ROOT_ID_REGEX)
def test_make_random_ou_id():
root_id = utils.make_random_root_id()
ou_id = utils.make_random_ou_id(root_id)
ou_id.should.match(utils.OU_ID_REGEX)
def test_make_random_account_id():
account_id = utils.make_random_account_id()
account_id.should.match(utils.ACCOUNT_ID_REGEX)
def test_make_random_create_account_status_id():
create_account_status_id = utils.make_random_create_account_status_id()
create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
def test_make_random_policy_id():
policy_id = utils.make_random_policy_id()
policy_id.should.match(utils.POLICY_ID_REGEX)
def validate_organization(response):
org = response["Organization"]
sorted(org.keys()).should.equal(
[
"Arn",
"AvailablePolicyTypes",
"FeatureSet",
"Id",
"MasterAccountArn",
"MasterAccountEmail",
"MasterAccountId",
]
)
org["Id"].should.match(utils.ORG_ID_REGEX)
org["MasterAccountId"].should.equal(utils.MASTER_ACCOUNT_ID)
org["MasterAccountArn"].should.equal(
utils.MASTER_ACCOUNT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"])
)
org["Arn"].should.equal(
utils.ORGANIZATION_ARN_FORMAT.format(org["MasterAccountId"], org["Id"])
)
org["MasterAccountEmail"].should.equal(utils.MASTER_ACCOUNT_EMAIL)
org["FeatureSet"].should.be.within(["ALL", "CONSOLIDATED_BILLING"])
org["AvailablePolicyTypes"].should.equal(
[{"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}]
)
def validate_roots(org, response):
response.should.have.key("Roots").should.be.a(list)
response["Roots"].shouldnt.equal([])
root = response["Roots"][0]
root.should.have.key("Id").should.match(utils.ROOT_ID_REGEX)
root.should.have.key("Arn").should.equal(
utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root["Id"])
)
root.should.have.key("Name").should.be.a(str)
root.should.have.key("PolicyTypes").should.equal([])
def validate_organizational_unit(org, response):
response.should.have.key("OrganizationalUnit").should.be.a(dict)
ou = response["OrganizationalUnit"]
ou.should.have.key("Id").should.match(utils.OU_ID_REGEX)
ou.should.have.key("Arn").should.equal(
utils.OU_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], ou["Id"])
)
ou.should.have.key("Name").should.be.a(str)
def validate_account(org, account):
sorted(account.keys()).should.equal(
["Arn", "Email", "Id", "JoinedMethod", "JoinedTimestamp", "Name", "Status"]
)
account["Id"].should.match(utils.ACCOUNT_ID_REGEX)
account["Arn"].should.equal(
utils.ACCOUNT_ARN_FORMAT.format(
org["MasterAccountId"], org["Id"], account["Id"]
)
)
account["Email"].should.match(utils.EMAIL_REGEX)
account["JoinedMethod"].should.be.within(["INVITED", "CREATED"])
account["Status"].should.be.within(["ACTIVE", "SUSPENDED"])
account["Name"].should.be.a(str)
account["JoinedTimestamp"].should.be.a(datetime.datetime)
def validate_create_account_status(create_status):
sorted(create_status.keys()).should.equal(
[
"AccountId",
"AccountName",
"CompletedTimestamp",
"Id",
"RequestedTimestamp",
"State",
]
)
create_status["Id"].should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
create_status["AccountId"].should.match(utils.ACCOUNT_ID_REGEX)
create_status["AccountName"].should.be.a(str)
create_status["State"].should.equal("SUCCEEDED")
create_status["RequestedTimestamp"].should.be.a(datetime.datetime)
create_status["CompletedTimestamp"].should.be.a(datetime.datetime)
def validate_policy_summary(org, summary):
summary.should.be.a(dict)
summary.should.have.key("Id").should.match(utils.POLICY_ID_REGEX)
summary.should.have.key("Arn").should.equal(
utils.SCP_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], summary["Id"])
)
summary.should.have.key("Name").should.be.a(str)
summary.should.have.key("Description").should.be.a(str)
summary.should.have.key("Type").should.equal("SERVICE_CONTROL_POLICY")
summary.should.have.key("AwsManaged").should.be.a(bool)
def validate_service_control_policy(org, response):
response.should.have.key("PolicySummary").should.be.a(dict)
response.should.have.key("Content").should.be.a(str)
validate_policy_summary(org, response["PolicySummary"])
def validate_account_created(accounts_list, account_id):
account_created = False
for account in accounts_list:
if account_id == account["Id"]:
account_created = True
assert account_created
def validate_account_closed(accounts_list, account_id):
for account in accounts_list:
if account_id == account["Id"]:
assert False
|
the-stack_0_5211 | # flake8: noqa
from . import dataclasses
from .class_validators import root_validator, validator
from .decorator import validate_arguments
from .env_settings import BaseSettings
from .error_wrappers import ValidationError
from .errors import *
from .fields import Field, Required, Schema
from .main import *
from .networks import *
from .parse import Protocol
from .tools import *
from .types import *
from .version import VERSION
# WARNING __all__ from .errors is not included here, it will be removed as an export here in v2
# please use "from pydantic.errors import ..." instead
__all__ = [
# dataclasses
'dataclasses',
# class_validators
'root_validator',
'validator',
# decorator
'validate_arguments',
# env_settings
'BaseSettings',
# error_wrappers
'ValidationError',
# fields
'Field',
'Required',
'Schema',
# main
'BaseConfig',
'BaseModel',
'Extra',
'compiled',
'create_model',
'validate_model',
# network
'AnyUrl',
'AnyHttpUrl',
'HttpUrl',
'stricturl',
'EmailStr',
'NameEmail',
'IPvAnyAddress',
'IPvAnyInterface',
'IPvAnyNetwork',
'PostgresDsn',
'RedisDsn',
'validate_email',
# parse
'Protocol',
# tools
'parse_file_as',
'parse_obj_as',
'parse_raw_as',
# types
'NoneStr',
'NoneBytes',
'StrBytes',
'NoneStrBytes',
'StrictStr',
'ConstrainedBytes',
'conbytes',
'ConstrainedList',
'conlist',
'ConstrainedSet',
'conset',
'ConstrainedStr',
'constr',
'PyObject',
'ConstrainedInt',
'conint',
'PositiveInt',
'NegativeInt',
'ConstrainedFloat',
'confloat',
'PositiveFloat',
'NegativeFloat',
'ConstrainedDecimal',
'condecimal',
'UUID1',
'UUID3',
'UUID4',
'UUID5',
'FilePath',
'DirectoryPath',
'Json',
'JsonWrapper',
'SecretStr',
'SecretBytes',
'StrictBool',
'StrictInt',
'StrictFloat',
'PaymentCardNumber',
'ByteSize',
# version
'VERSION',
]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.